From ddd86436f4e3643c04b797f858dab95d5f2e4de9 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 25 Dec 2015 15:00:15 +0000 Subject: fish --- drivers/gpu/drm/i915/Makefile | 53 + drivers/gpu/drm/i915/dvo.h | 149 + drivers/gpu/drm/i915/dvo_ch7017.c | 414 ++ drivers/gpu/drm/i915/dvo_ch7xxx.c | 344 + drivers/gpu/drm/i915/dvo_ivch.c | 436 ++ drivers/gpu/drm/i915/dvo_ns2501.c | 588 ++ drivers/gpu/drm/i915/dvo_sil164.c | 279 + drivers/gpu/drm/i915/dvo_tfp410.c | 318 + drivers/gpu/drm/i915/i915_debugfs.c | 2165 +++++++ drivers/gpu/drm/i915/i915_dma.c | 1922 ++++++ drivers/gpu/drm/i915/i915_drv.c | 1344 ++++ drivers/gpu/drm/i915/i915_drv.h | 1965 ++++++ drivers/gpu/drm/i915/i915_gem.c | 4525 +++++++++++++ drivers/gpu/drm/i915/i915_gem_context.c | 521 ++ drivers/gpu/drm/i915/i915_gem_debug.c | 187 + drivers/gpu/drm/i915/i915_gem_dmabuf.c | 308 + drivers/gpu/drm/i915/i915_gem_evict.c | 185 + drivers/gpu/drm/i915/i915_gem_execbuffer.c | 1224 ++++ drivers/gpu/drm/i915/i915_gem_gtt.c | 830 +++ drivers/gpu/drm/i915/i915_gem_stolen.c | 387 ++ drivers/gpu/drm/i915/i915_gem_tiling.c | 523 ++ drivers/gpu/drm/i915/i915_ioc32.c | 221 + drivers/gpu/drm/i915/i915_irq.c | 3212 +++++++++ drivers/gpu/drm/i915/i915_reg.h | 4835 ++++++++++++++ drivers/gpu/drm/i915/i915_suspend.c | 424 ++ drivers/gpu/drm/i915/i915_sysfs.c | 408 ++ drivers/gpu/drm/i915/i915_trace.h | 455 ++ drivers/gpu/drm/i915/i915_trace_points.c | 13 + drivers/gpu/drm/i915/i915_ums.c | 503 ++ drivers/gpu/drm/i915/intel_acpi.c | 251 + drivers/gpu/drm/i915/intel_bios.c | 771 +++ drivers/gpu/drm/i915/intel_bios.h | 621 ++ drivers/gpu/drm/i915/intel_crt.c | 809 +++ drivers/gpu/drm/i915/intel_ddi.c | 1562 +++++ drivers/gpu/drm/i915/intel_display.c | 9664 ++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_dp.c | 3010 +++++++++ drivers/gpu/drm/i915/intel_drv.h | 732 +++ drivers/gpu/drm/i915/intel_dvo.c | 533 ++ drivers/gpu/drm/i915/intel_fb.c | 313 + drivers/gpu/drm/i915/intel_hdmi.c | 1107 ++++ drivers/gpu/drm/i915/intel_i2c.c | 615 ++ drivers/gpu/drm/i915/intel_lvds.c | 1291 ++++ drivers/gpu/drm/i915/intel_modes.c | 128 + drivers/gpu/drm/i915/intel_opregion.c | 559 ++ drivers/gpu/drm/i915/intel_overlay.c | 1539 +++++ drivers/gpu/drm/i915/intel_panel.c | 493 ++ drivers/gpu/drm/i915/intel_pm.c | 4657 ++++++++++++++ drivers/gpu/drm/i915/intel_ringbuffer.c | 1931 ++++++ drivers/gpu/drm/i915/intel_ringbuffer.h | 251 + drivers/gpu/drm/i915/intel_sdvo.c | 2871 +++++++++ drivers/gpu/drm/i915/intel_sdvo_regs.h | 730 +++ drivers/gpu/drm/i915/intel_sprite.c | 963 +++ drivers/gpu/drm/i915/intel_tv.c | 1681 +++++ 53 files changed, 65820 insertions(+) create mode 100644 drivers/gpu/drm/i915/Makefile create mode 100644 drivers/gpu/drm/i915/dvo.h create mode 100644 drivers/gpu/drm/i915/dvo_ch7017.c create mode 100644 drivers/gpu/drm/i915/dvo_ch7xxx.c create mode 100644 drivers/gpu/drm/i915/dvo_ivch.c create mode 100644 drivers/gpu/drm/i915/dvo_ns2501.c create mode 100644 drivers/gpu/drm/i915/dvo_sil164.c create mode 100644 drivers/gpu/drm/i915/dvo_tfp410.c create mode 100644 drivers/gpu/drm/i915/i915_debugfs.c create mode 100644 drivers/gpu/drm/i915/i915_dma.c create mode 100644 drivers/gpu/drm/i915/i915_drv.c create mode 100644 drivers/gpu/drm/i915/i915_drv.h create mode 100644 drivers/gpu/drm/i915/i915_gem.c create mode 100644 drivers/gpu/drm/i915/i915_gem_context.c create mode 100644 drivers/gpu/drm/i915/i915_gem_debug.c create mode 100644 drivers/gpu/drm/i915/i915_gem_dmabuf.c create mode 100644 drivers/gpu/drm/i915/i915_gem_evict.c create mode 100644 drivers/gpu/drm/i915/i915_gem_execbuffer.c create mode 100644 drivers/gpu/drm/i915/i915_gem_gtt.c create mode 100644 drivers/gpu/drm/i915/i915_gem_stolen.c create mode 100644 drivers/gpu/drm/i915/i915_gem_tiling.c create mode 100644 drivers/gpu/drm/i915/i915_ioc32.c create mode 100644 drivers/gpu/drm/i915/i915_irq.c create mode 100644 drivers/gpu/drm/i915/i915_reg.h create mode 100644 drivers/gpu/drm/i915/i915_suspend.c create mode 100644 drivers/gpu/drm/i915/i915_sysfs.c create mode 100644 drivers/gpu/drm/i915/i915_trace.h create mode 100644 drivers/gpu/drm/i915/i915_trace_points.c create mode 100644 drivers/gpu/drm/i915/i915_ums.c create mode 100644 drivers/gpu/drm/i915/intel_acpi.c create mode 100644 drivers/gpu/drm/i915/intel_bios.c create mode 100644 drivers/gpu/drm/i915/intel_bios.h create mode 100644 drivers/gpu/drm/i915/intel_crt.c create mode 100644 drivers/gpu/drm/i915/intel_ddi.c create mode 100644 drivers/gpu/drm/i915/intel_display.c create mode 100644 drivers/gpu/drm/i915/intel_dp.c create mode 100644 drivers/gpu/drm/i915/intel_drv.h create mode 100644 drivers/gpu/drm/i915/intel_dvo.c create mode 100644 drivers/gpu/drm/i915/intel_fb.c create mode 100644 drivers/gpu/drm/i915/intel_hdmi.c create mode 100644 drivers/gpu/drm/i915/intel_i2c.c create mode 100644 drivers/gpu/drm/i915/intel_lvds.c create mode 100644 drivers/gpu/drm/i915/intel_modes.c create mode 100644 drivers/gpu/drm/i915/intel_opregion.c create mode 100644 drivers/gpu/drm/i915/intel_overlay.c create mode 100644 drivers/gpu/drm/i915/intel_panel.c create mode 100644 drivers/gpu/drm/i915/intel_pm.c create mode 100644 drivers/gpu/drm/i915/intel_ringbuffer.c create mode 100644 drivers/gpu/drm/i915/intel_ringbuffer.h create mode 100644 drivers/gpu/drm/i915/intel_sdvo.c create mode 100644 drivers/gpu/drm/i915/intel_sdvo_regs.h create mode 100644 drivers/gpu/drm/i915/intel_sprite.c create mode 100644 drivers/gpu/drm/i915/intel_tv.c (limited to 'drivers/gpu/drm/i915') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile new file mode 100644 index 0000000..644fad3 --- /dev/null +++ b/drivers/gpu/drm/i915/Makefile @@ -0,0 +1,53 @@ +# +# Makefile for the drm device driver. This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. + +ccflags-y := -Iinclude/drm +i915-y := i915_drv.o i915_dma.o i915_irq.o \ + i915_debugfs.o \ + i915_suspend.o \ + i915_gem.o \ + i915_gem_context.o \ + i915_gem_debug.o \ + i915_gem_evict.o \ + i915_gem_execbuffer.o \ + i915_gem_gtt.o \ + i915_gem_stolen.o \ + i915_gem_tiling.o \ + i915_sysfs.o \ + i915_trace_points.o \ + i915_ums.o \ + intel_display.o \ + intel_crt.o \ + intel_lvds.o \ + intel_bios.o \ + intel_ddi.o \ + intel_dp.o \ + intel_hdmi.o \ + intel_sdvo.o \ + intel_modes.o \ + intel_panel.o \ + intel_pm.o \ + intel_i2c.o \ + intel_fb.o \ + intel_tv.o \ + intel_dvo.o \ + intel_ringbuffer.o \ + intel_overlay.o \ + intel_sprite.o \ + intel_opregion.o \ + dvo_ch7xxx.o \ + dvo_ch7017.o \ + dvo_ivch.o \ + dvo_tfp410.o \ + dvo_sil164.o \ + dvo_ns2501.o \ + i915_gem_dmabuf.o + +i915-$(CONFIG_COMPAT) += i915_ioc32.o + +i915-$(CONFIG_ACPI) += intel_acpi.o + +obj-$(CPTCFG_DRM_I915) += i915.o + +CFLAGS_i915_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h new file mode 100644 index 0000000..33a62ad --- /dev/null +++ b/drivers/gpu/drm/i915/dvo.h @@ -0,0 +1,149 @@ +/* + * Copyright © 2006 Eric Anholt + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef _INTEL_DVO_H +#define _INTEL_DVO_H + +#include +#include +#include +#include "intel_drv.h" + +struct intel_dvo_device { + const char *name; + int type; + /* DVOA/B/C output register */ + u32 dvo_reg; + /* GPIO register used for i2c bus to control this device */ + u32 gpio; + int slave_addr; + + const struct intel_dvo_dev_ops *dev_ops; + void *dev_priv; + struct i2c_adapter *i2c_bus; +}; + +struct intel_dvo_dev_ops { + /* + * Initialize the device at startup time. + * Returns NULL if the device does not exist. + */ + bool (*init)(struct intel_dvo_device *dvo, + struct i2c_adapter *i2cbus); + + /* + * Called to allow the output a chance to create properties after the + * RandR objects have been created. + */ + void (*create_resources)(struct intel_dvo_device *dvo); + + /* + * Turn on/off output. + * + * Because none of our dvo drivers support an intermediate power levels, + * we don't expose this in the interfac. + */ + void (*dpms)(struct intel_dvo_device *dvo, bool enable); + + /* + * Callback for testing a video mode for a given output. + * + * This function should only check for cases where a mode can't + * be supported on the output specifically, and not represent + * generic CRTC limitations. + * + * \return MODE_OK if the mode is valid, or another MODE_* otherwise. + */ + int (*mode_valid)(struct intel_dvo_device *dvo, + struct drm_display_mode *mode); + + /* + * Callback to adjust the mode to be set in the CRTC. + * + * This allows an output to adjust the clock or even the entire set of + * timings, which is used for panels with fixed timings or for + * buses with clock limitations. + */ + bool (*mode_fixup)(struct intel_dvo_device *dvo, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + /* + * Callback for preparing mode changes on an output + */ + void (*prepare)(struct intel_dvo_device *dvo); + + /* + * Callback for committing mode changes on an output + */ + void (*commit)(struct intel_dvo_device *dvo); + + /* + * Callback for setting up a video mode after fixups have been made. + * + * This is only called while the output is disabled. The dpms callback + * must be all that's necessary for the output, to turn the output on + * after this function is called. + */ + void (*mode_set)(struct intel_dvo_device *dvo, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + /* + * Probe for a connected output, and return detect_status. + */ + enum drm_connector_status (*detect)(struct intel_dvo_device *dvo); + + /* + * Probe the current hw status, returning true if the connected output + * is active. + */ + bool (*get_hw_state)(struct intel_dvo_device *dev); + + /** + * Query the device for the modes it provides. + * + * This function may also update MonInfo, mm_width, and mm_height. + * + * \return singly-linked list of modes or NULL if no modes found. + */ + struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo); + + /** + * Clean up driver-specific bits of the output + */ + void (*destroy) (struct intel_dvo_device *dvo); + + /** + * Debugging hook to dump device registers to log file + */ + void (*dump_regs)(struct intel_dvo_device *dvo); +}; + +extern struct intel_dvo_dev_ops sil164_ops; +extern struct intel_dvo_dev_ops ch7xxx_ops; +extern struct intel_dvo_dev_ops ivch_ops; +extern struct intel_dvo_dev_ops tfp410_ops; +extern struct intel_dvo_dev_ops ch7017_ops; +extern struct intel_dvo_dev_ops ns2501_ops; + +#endif /* _INTEL_DVO_H */ diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c new file mode 100644 index 0000000..86b27d1 --- /dev/null +++ b/drivers/gpu/drm/i915/dvo_ch7017.c @@ -0,0 +1,414 @@ +/* + * Copyright © 2006 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +#include "dvo.h" + +#define CH7017_TV_DISPLAY_MODE 0x00 +#define CH7017_FLICKER_FILTER 0x01 +#define CH7017_VIDEO_BANDWIDTH 0x02 +#define CH7017_TEXT_ENHANCEMENT 0x03 +#define CH7017_START_ACTIVE_VIDEO 0x04 +#define CH7017_HORIZONTAL_POSITION 0x05 +#define CH7017_VERTICAL_POSITION 0x06 +#define CH7017_BLACK_LEVEL 0x07 +#define CH7017_CONTRAST_ENHANCEMENT 0x08 +#define CH7017_TV_PLL 0x09 +#define CH7017_TV_PLL_M 0x0a +#define CH7017_TV_PLL_N 0x0b +#define CH7017_SUB_CARRIER_0 0x0c +#define CH7017_CIV_CONTROL 0x10 +#define CH7017_CIV_0 0x11 +#define CH7017_CHROMA_BOOST 0x14 +#define CH7017_CLOCK_MODE 0x1c +#define CH7017_INPUT_CLOCK 0x1d +#define CH7017_GPIO_CONTROL 0x1e +#define CH7017_INPUT_DATA_FORMAT 0x1f +#define CH7017_CONNECTION_DETECT 0x20 +#define CH7017_DAC_CONTROL 0x21 +#define CH7017_BUFFERED_CLOCK_OUTPUT 0x22 +#define CH7017_DEFEAT_VSYNC 0x47 +#define CH7017_TEST_PATTERN 0x48 + +#define CH7017_POWER_MANAGEMENT 0x49 +/** Enables the TV output path. */ +#define CH7017_TV_EN (1 << 0) +#define CH7017_DAC0_POWER_DOWN (1 << 1) +#define CH7017_DAC1_POWER_DOWN (1 << 2) +#define CH7017_DAC2_POWER_DOWN (1 << 3) +#define CH7017_DAC3_POWER_DOWN (1 << 4) +/** Powers down the TV out block, and DAC0-3 */ +#define CH7017_TV_POWER_DOWN_EN (1 << 5) + +#define CH7017_VERSION_ID 0x4a + +#define CH7017_DEVICE_ID 0x4b +#define CH7017_DEVICE_ID_VALUE 0x1b +#define CH7018_DEVICE_ID_VALUE 0x1a +#define CH7019_DEVICE_ID_VALUE 0x19 + +#define CH7017_XCLK_D2_ADJUST 0x53 +#define CH7017_UP_SCALER_COEFF_0 0x55 +#define CH7017_UP_SCALER_COEFF_1 0x56 +#define CH7017_UP_SCALER_COEFF_2 0x57 +#define CH7017_UP_SCALER_COEFF_3 0x58 +#define CH7017_UP_SCALER_COEFF_4 0x59 +#define CH7017_UP_SCALER_VERTICAL_INC_0 0x5a +#define CH7017_UP_SCALER_VERTICAL_INC_1 0x5b +#define CH7017_GPIO_INVERT 0x5c +#define CH7017_UP_SCALER_HORIZONTAL_INC_0 0x5d +#define CH7017_UP_SCALER_HORIZONTAL_INC_1 0x5e + +#define CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT 0x5f +/**< Low bits of horizontal active pixel input */ + +#define CH7017_ACTIVE_INPUT_LINE_OUTPUT 0x60 +/** High bits of horizontal active pixel input */ +#define CH7017_LVDS_HAP_INPUT_MASK (0x7 << 0) +/** High bits of vertical active line output */ +#define CH7017_LVDS_VAL_HIGH_MASK (0x7 << 3) + +#define CH7017_VERTICAL_ACTIVE_LINE_OUTPUT 0x61 +/**< Low bits of vertical active line output */ + +#define CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT 0x62 +/**< Low bits of horizontal active pixel output */ + +#define CH7017_LVDS_POWER_DOWN 0x63 +/** High bits of horizontal active pixel output */ +#define CH7017_LVDS_HAP_HIGH_MASK (0x7 << 0) +/** Enables the LVDS power down state transition */ +#define CH7017_LVDS_POWER_DOWN_EN (1 << 6) +/** Enables the LVDS upscaler */ +#define CH7017_LVDS_UPSCALER_EN (1 << 7) +#define CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED 0x08 + +#define CH7017_LVDS_ENCODING 0x64 +#define CH7017_LVDS_DITHER_2D (1 << 2) +#define CH7017_LVDS_DITHER_DIS (1 << 3) +#define CH7017_LVDS_DUAL_CHANNEL_EN (1 << 4) +#define CH7017_LVDS_24_BIT (1 << 5) + +#define CH7017_LVDS_ENCODING_2 0x65 + +#define CH7017_LVDS_PLL_CONTROL 0x66 +/** Enables the LVDS panel output path */ +#define CH7017_LVDS_PANEN (1 << 0) +/** Enables the LVDS panel backlight */ +#define CH7017_LVDS_BKLEN (1 << 3) + +#define CH7017_POWER_SEQUENCING_T1 0x67 +#define CH7017_POWER_SEQUENCING_T2 0x68 +#define CH7017_POWER_SEQUENCING_T3 0x69 +#define CH7017_POWER_SEQUENCING_T4 0x6a +#define CH7017_POWER_SEQUENCING_T5 0x6b +#define CH7017_GPIO_DRIVER_TYPE 0x6c +#define CH7017_GPIO_DATA 0x6d +#define CH7017_GPIO_DIRECTION_CONTROL 0x6e + +#define CH7017_LVDS_PLL_FEEDBACK_DIV 0x71 +# define CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT 4 +# define CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT 0 +# define CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED 0x80 + +#define CH7017_LVDS_PLL_VCO_CONTROL 0x72 +# define CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED 0x80 +# define CH7017_LVDS_PLL_VCO_SHIFT 4 +# define CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT 0 + +#define CH7017_OUTPUTS_ENABLE 0x73 +# define CH7017_CHARGE_PUMP_LOW 0x0 +# define CH7017_CHARGE_PUMP_HIGH 0x3 +# define CH7017_LVDS_CHANNEL_A (1 << 3) +# define CH7017_LVDS_CHANNEL_B (1 << 4) +# define CH7017_TV_DAC_A (1 << 5) +# define CH7017_TV_DAC_B (1 << 6) +# define CH7017_DDC_SELECT_DC2 (1 << 7) + +#define CH7017_LVDS_OUTPUT_AMPLITUDE 0x74 +#define CH7017_LVDS_PLL_EMI_REDUCTION 0x75 +#define CH7017_LVDS_POWER_DOWN_FLICKER 0x76 + +#define CH7017_LVDS_CONTROL_2 0x78 +# define CH7017_LOOP_FILTER_SHIFT 5 +# define CH7017_PHASE_DETECTOR_SHIFT 0 + +#define CH7017_BANG_LIMIT_CONTROL 0x7f + +struct ch7017_priv { + uint8_t dummy; +}; + +static void ch7017_dump_regs(struct intel_dvo_device *dvo); +static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable); + +static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val) +{ + struct i2c_msg msgs[] = { + { + .addr = dvo->slave_addr, + .flags = 0, + .len = 1, + .buf = &addr, + }, + { + .addr = dvo->slave_addr, + .flags = I2C_M_RD, + .len = 1, + .buf = val, + } + }; + return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2; +} + +static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val) +{ + uint8_t buf[2] = { addr, val }; + struct i2c_msg msg = { + .addr = dvo->slave_addr, + .flags = 0, + .len = 2, + .buf = buf, + }; + return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1; +} + +/** Probes for a CH7017 on the given bus and slave address. */ +static bool ch7017_init(struct intel_dvo_device *dvo, + struct i2c_adapter *adapter) +{ + struct ch7017_priv *priv; + const char *str; + u8 val; + + priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL); + if (priv == NULL) + return false; + + dvo->i2c_bus = adapter; + dvo->dev_priv = priv; + + if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) + goto fail; + + switch (val) { + case CH7017_DEVICE_ID_VALUE: + str = "ch7017"; + break; + case CH7018_DEVICE_ID_VALUE: + str = "ch7018"; + break; + case CH7019_DEVICE_ID_VALUE: + str = "ch7019"; + break; + default: + DRM_DEBUG_KMS("ch701x not detected, got %d: from %s " + "slave %d.\n", + val, adapter->name, dvo->slave_addr); + goto fail; + } + + DRM_DEBUG_KMS("%s detected on %s, addr %d\n", + str, adapter->name, dvo->slave_addr); + return true; + +fail: + kfree(priv); + return false; +} + +static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo) +{ + return connector_status_connected; +} + +static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo, + struct drm_display_mode *mode) +{ + if (mode->clock > 160000) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static void ch7017_mode_set(struct intel_dvo_device *dvo, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + uint8_t lvds_pll_feedback_div, lvds_pll_vco_control; + uint8_t outputs_enable, lvds_control_2, lvds_power_down; + uint8_t horizontal_active_pixel_input; + uint8_t horizontal_active_pixel_output, vertical_active_line_output; + uint8_t active_input_line_output; + + DRM_DEBUG_KMS("Registers before mode setting\n"); + ch7017_dump_regs(dvo); + + /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/ + if (mode->clock < 100000) { + outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_LOW; + lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED | + (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) | + (13 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT); + lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED | + (2 << CH7017_LVDS_PLL_VCO_SHIFT) | + (3 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT); + lvds_control_2 = (1 << CH7017_LOOP_FILTER_SHIFT) | + (0 << CH7017_PHASE_DETECTOR_SHIFT); + } else { + outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_HIGH; + lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED | + (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) | + (3 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT); + lvds_pll_feedback_div = 35; + lvds_control_2 = (3 << CH7017_LOOP_FILTER_SHIFT) | + (0 << CH7017_PHASE_DETECTOR_SHIFT); + if (1) { /* XXX: dual channel panel detection. Assume yes for now. */ + outputs_enable |= CH7017_LVDS_CHANNEL_B; + lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED | + (2 << CH7017_LVDS_PLL_VCO_SHIFT) | + (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT); + } else { + lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED | + (1 << CH7017_LVDS_PLL_VCO_SHIFT) | + (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT); + } + } + + horizontal_active_pixel_input = mode->hdisplay & 0x00ff; + + vertical_active_line_output = mode->vdisplay & 0x00ff; + horizontal_active_pixel_output = mode->hdisplay & 0x00ff; + + active_input_line_output = ((mode->hdisplay & 0x0700) >> 8) | + (((mode->vdisplay & 0x0700) >> 8) << 3); + + lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED | + (mode->hdisplay & 0x0700) >> 8; + + ch7017_dpms(dvo, false); + ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, + horizontal_active_pixel_input); + ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT, + horizontal_active_pixel_output); + ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, + vertical_active_line_output); + ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, + active_input_line_output); + ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, lvds_pll_vco_control); + ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, lvds_pll_feedback_div); + ch7017_write(dvo, CH7017_LVDS_CONTROL_2, lvds_control_2); + ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, outputs_enable); + + /* Turn the LVDS back on with new settings. */ + ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down); + + DRM_DEBUG_KMS("Registers after mode setting\n"); + ch7017_dump_regs(dvo); +} + +/* set the CH7017 power state */ +static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable) +{ + uint8_t val; + + ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val); + + /* Turn off TV/VGA, and never turn it on since we don't support it. */ + ch7017_write(dvo, CH7017_POWER_MANAGEMENT, + CH7017_DAC0_POWER_DOWN | + CH7017_DAC1_POWER_DOWN | + CH7017_DAC2_POWER_DOWN | + CH7017_DAC3_POWER_DOWN | + CH7017_TV_POWER_DOWN_EN); + + if (enable) { + /* Turn on the LVDS */ + ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, + val & ~CH7017_LVDS_POWER_DOWN_EN); + } else { + /* Turn off the LVDS */ + ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, + val | CH7017_LVDS_POWER_DOWN_EN); + } + + /* XXX: Should actually wait for update power status somehow */ + msleep(20); +} + +static bool ch7017_get_hw_state(struct intel_dvo_device *dvo) +{ + uint8_t val; + + ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val); + + if (val & CH7017_LVDS_POWER_DOWN_EN) + return false; + else + return true; +} + +static void ch7017_dump_regs(struct intel_dvo_device *dvo) +{ + uint8_t val; + +#define DUMP(reg) \ +do { \ + ch7017_read(dvo, reg, &val); \ + DRM_DEBUG_KMS(#reg ": %02x\n", val); \ +} while (0) + + DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT); + DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT); + DUMP(CH7017_VERTICAL_ACTIVE_LINE_OUTPUT); + DUMP(CH7017_ACTIVE_INPUT_LINE_OUTPUT); + DUMP(CH7017_LVDS_PLL_VCO_CONTROL); + DUMP(CH7017_LVDS_PLL_FEEDBACK_DIV); + DUMP(CH7017_LVDS_CONTROL_2); + DUMP(CH7017_OUTPUTS_ENABLE); + DUMP(CH7017_LVDS_POWER_DOWN); +} + +static void ch7017_destroy(struct intel_dvo_device *dvo) +{ + struct ch7017_priv *priv = dvo->dev_priv; + + if (priv) { + kfree(priv); + dvo->dev_priv = NULL; + } +} + +struct intel_dvo_dev_ops ch7017_ops = { + .init = ch7017_init, + .detect = ch7017_detect, + .mode_valid = ch7017_mode_valid, + .mode_set = ch7017_mode_set, + .dpms = ch7017_dpms, + .get_hw_state = ch7017_get_hw_state, + .dump_regs = ch7017_dump_regs, + .destroy = ch7017_destroy, +}; diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c new file mode 100644 index 0000000..3edd981 --- /dev/null +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c @@ -0,0 +1,344 @@ +/************************************************************************** + +Copyright © 2006 Dave Airlie + +All Rights Reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sub license, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +**************************************************************************/ + +#include "dvo.h" + +#define CH7xxx_REG_VID 0x4a +#define CH7xxx_REG_DID 0x4b + +#define CH7011_VID 0x83 /* 7010 as well */ +#define CH7009A_VID 0x84 +#define CH7009B_VID 0x85 +#define CH7301_VID 0x95 + +#define CH7xxx_VID 0x84 +#define CH7xxx_DID 0x17 + +#define CH7xxx_NUM_REGS 0x4c + +#define CH7xxx_CM 0x1c +#define CH7xxx_CM_XCM (1<<0) +#define CH7xxx_CM_MCP (1<<2) +#define CH7xxx_INPUT_CLOCK 0x1d +#define CH7xxx_GPIO 0x1e +#define CH7xxx_GPIO_HPIR (1<<3) +#define CH7xxx_IDF 0x1f + +#define CH7xxx_IDF_HSP (1<<3) +#define CH7xxx_IDF_VSP (1<<4) + +#define CH7xxx_CONNECTION_DETECT 0x20 +#define CH7xxx_CDET_DVI (1<<5) + +#define CH7301_DAC_CNTL 0x21 +#define CH7301_HOTPLUG 0x23 +#define CH7xxx_TCTL 0x31 +#define CH7xxx_TVCO 0x32 +#define CH7xxx_TPCP 0x33 +#define CH7xxx_TPD 0x34 +#define CH7xxx_TPVT 0x35 +#define CH7xxx_TLPF 0x36 +#define CH7xxx_TCT 0x37 +#define CH7301_TEST_PATTERN 0x48 + +#define CH7xxx_PM 0x49 +#define CH7xxx_PM_FPD (1<<0) +#define CH7301_PM_DACPD0 (1<<1) +#define CH7301_PM_DACPD1 (1<<2) +#define CH7301_PM_DACPD2 (1<<3) +#define CH7xxx_PM_DVIL (1<<6) +#define CH7xxx_PM_DVIP (1<<7) + +#define CH7301_SYNC_POLARITY 0x56 +#define CH7301_SYNC_RGB_YUV (1<<0) +#define CH7301_SYNC_POL_DVI (1<<5) + +/** @file + * driver for the Chrontel 7xxx DVI chip over DVO. + */ + +static struct ch7xxx_id_struct { + uint8_t vid; + char *name; +} ch7xxx_ids[] = { + { CH7011_VID, "CH7011" }, + { CH7009A_VID, "CH7009A" }, + { CH7009B_VID, "CH7009B" }, + { CH7301_VID, "CH7301" }, +}; + +struct ch7xxx_priv { + bool quiet; +}; + +static char *ch7xxx_get_id(uint8_t vid) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ch7xxx_ids); i++) { + if (ch7xxx_ids[i].vid == vid) + return ch7xxx_ids[i].name; + } + + return NULL; +} + +/** Reads an 8 bit register */ +static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) +{ + struct ch7xxx_priv *ch7xxx = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + u8 out_buf[2]; + u8 in_buf[2]; + + struct i2c_msg msgs[] = { + { + .addr = dvo->slave_addr, + .flags = 0, + .len = 1, + .buf = out_buf, + }, + { + .addr = dvo->slave_addr, + .flags = I2C_M_RD, + .len = 1, + .buf = in_buf, + } + }; + + out_buf[0] = addr; + out_buf[1] = 0; + + if (i2c_transfer(adapter, msgs, 2) == 2) { + *ch = in_buf[0]; + return true; + }; + + if (!ch7xxx->quiet) { + DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", + addr, adapter->name, dvo->slave_addr); + } + return false; +} + +/** Writes an 8 bit register */ +static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) +{ + struct ch7xxx_priv *ch7xxx = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + uint8_t out_buf[2]; + struct i2c_msg msg = { + .addr = dvo->slave_addr, + .flags = 0, + .len = 2, + .buf = out_buf, + }; + + out_buf[0] = addr; + out_buf[1] = ch; + + if (i2c_transfer(adapter, &msg, 1) == 1) + return true; + + if (!ch7xxx->quiet) { + DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", + addr, adapter->name, dvo->slave_addr); + } + + return false; +} + +static bool ch7xxx_init(struct intel_dvo_device *dvo, + struct i2c_adapter *adapter) +{ + /* this will detect the CH7xxx chip on the specified i2c bus */ + struct ch7xxx_priv *ch7xxx; + uint8_t vendor, device; + char *name; + + ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL); + if (ch7xxx == NULL) + return false; + + dvo->i2c_bus = adapter; + dvo->dev_priv = ch7xxx; + ch7xxx->quiet = true; + + if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor)) + goto out; + + name = ch7xxx_get_id(vendor); + if (!name) { + DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s " + "slave %d.\n", + vendor, adapter->name, dvo->slave_addr); + goto out; + } + + + if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device)) + goto out; + + if (device != CH7xxx_DID) { + DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s " + "slave %d.\n", + vendor, adapter->name, dvo->slave_addr); + goto out; + } + + ch7xxx->quiet = false; + DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n", + name, vendor, device); + return true; +out: + kfree(ch7xxx); + return false; +} + +static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo) +{ + uint8_t cdet, orig_pm, pm; + + ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm); + + pm = orig_pm; + pm &= ~CH7xxx_PM_FPD; + pm |= CH7xxx_PM_DVIL | CH7xxx_PM_DVIP; + + ch7xxx_writeb(dvo, CH7xxx_PM, pm); + + ch7xxx_readb(dvo, CH7xxx_CONNECTION_DETECT, &cdet); + + ch7xxx_writeb(dvo, CH7xxx_PM, orig_pm); + + if (cdet & CH7xxx_CDET_DVI) + return connector_status_connected; + return connector_status_disconnected; +} + +static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo, + struct drm_display_mode *mode) +{ + if (mode->clock > 165000) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static void ch7xxx_mode_set(struct intel_dvo_device *dvo, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + uint8_t tvco, tpcp, tpd, tlpf, idf; + + if (mode->clock <= 65000) { + tvco = 0x23; + tpcp = 0x08; + tpd = 0x16; + tlpf = 0x60; + } else { + tvco = 0x2d; + tpcp = 0x06; + tpd = 0x26; + tlpf = 0xa0; + } + + ch7xxx_writeb(dvo, CH7xxx_TCTL, 0x00); + ch7xxx_writeb(dvo, CH7xxx_TVCO, tvco); + ch7xxx_writeb(dvo, CH7xxx_TPCP, tpcp); + ch7xxx_writeb(dvo, CH7xxx_TPD, tpd); + ch7xxx_writeb(dvo, CH7xxx_TPVT, 0x30); + ch7xxx_writeb(dvo, CH7xxx_TLPF, tlpf); + ch7xxx_writeb(dvo, CH7xxx_TCT, 0x00); + + ch7xxx_readb(dvo, CH7xxx_IDF, &idf); + + idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP); + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + idf |= CH7xxx_IDF_HSP; + + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + idf |= CH7xxx_IDF_HSP; + + ch7xxx_writeb(dvo, CH7xxx_IDF, idf); +} + +/* set the CH7xxx power state */ +static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable) +{ + if (enable) + ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP); + else + ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD); +} + +static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo) +{ + u8 val; + + ch7xxx_readb(dvo, CH7xxx_PM, &val); + + if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP)) + return true; + else + return false; +} + +static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) +{ + int i; + + for (i = 0; i < CH7xxx_NUM_REGS; i++) { + uint8_t val; + if ((i % 8) == 0) + DRM_LOG_KMS("\n %02X: ", i); + ch7xxx_readb(dvo, i, &val); + DRM_LOG_KMS("%02X ", val); + } +} + +static void ch7xxx_destroy(struct intel_dvo_device *dvo) +{ + struct ch7xxx_priv *ch7xxx = dvo->dev_priv; + + if (ch7xxx) { + kfree(ch7xxx); + dvo->dev_priv = NULL; + } +} + +struct intel_dvo_dev_ops ch7xxx_ops = { + .init = ch7xxx_init, + .detect = ch7xxx_detect, + .mode_valid = ch7xxx_mode_valid, + .mode_set = ch7xxx_mode_set, + .dpms = ch7xxx_dpms, + .get_hw_state = ch7xxx_get_hw_state, + .dump_regs = ch7xxx_dump_regs, + .destroy = ch7xxx_destroy, +}; diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c new file mode 100644 index 0000000..baaf65b --- /dev/null +++ b/drivers/gpu/drm/i915/dvo_ivch.c @@ -0,0 +1,436 @@ +/* + * Copyright © 2006 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +#include "dvo.h" + +/* + * register definitions for the i82807aa. + * + * Documentation on this chipset can be found in datasheet #29069001 at + * intel.com. + */ + +/* + * VCH Revision & GMBus Base Addr + */ +#define VR00 0x00 +# define VR00_BASE_ADDRESS_MASK 0x007f + +/* + * Functionality Enable + */ +#define VR01 0x01 + +/* + * Enable the panel fitter + */ +# define VR01_PANEL_FIT_ENABLE (1 << 3) +/* + * Enables the LCD display. + * + * This must not be set while VR01_DVO_BYPASS_ENABLE is set. + */ +# define VR01_LCD_ENABLE (1 << 2) +/** Enables the DVO repeater. */ +# define VR01_DVO_BYPASS_ENABLE (1 << 1) +/** Enables the DVO clock */ +# define VR01_DVO_ENABLE (1 << 0) + +/* + * LCD Interface Format + */ +#define VR10 0x10 +/** Enables LVDS output instead of CMOS */ +# define VR10_LVDS_ENABLE (1 << 4) +/** Enables 18-bit LVDS output. */ +# define VR10_INTERFACE_1X18 (0 << 2) +/** Enables 24-bit LVDS or CMOS output */ +# define VR10_INTERFACE_1X24 (1 << 2) +/** Enables 2x18-bit LVDS or CMOS output. */ +# define VR10_INTERFACE_2X18 (2 << 2) +/** Enables 2x24-bit LVDS output */ +# define VR10_INTERFACE_2X24 (3 << 2) + +/* + * VR20 LCD Horizontal Display Size + */ +#define VR20 0x20 + +/* + * LCD Vertical Display Size + */ +#define VR21 0x20 + +/* + * Panel power down status + */ +#define VR30 0x30 +/** Read only bit indicating that the panel is not in a safe poweroff state. */ +# define VR30_PANEL_ON (1 << 15) + +#define VR40 0x40 +# define VR40_STALL_ENABLE (1 << 13) +# define VR40_VERTICAL_INTERP_ENABLE (1 << 12) +# define VR40_ENHANCED_PANEL_FITTING (1 << 11) +# define VR40_HORIZONTAL_INTERP_ENABLE (1 << 10) +# define VR40_AUTO_RATIO_ENABLE (1 << 9) +# define VR40_CLOCK_GATING_ENABLE (1 << 8) + +/* + * Panel Fitting Vertical Ratio + * (((image_height - 1) << 16) / ((panel_height - 1))) >> 2 + */ +#define VR41 0x41 + +/* + * Panel Fitting Horizontal Ratio + * (((image_width - 1) << 16) / ((panel_width - 1))) >> 2 + */ +#define VR42 0x42 + +/* + * Horizontal Image Size + */ +#define VR43 0x43 + +/* VR80 GPIO 0 + */ +#define VR80 0x80 +#define VR81 0x81 +#define VR82 0x82 +#define VR83 0x83 +#define VR84 0x84 +#define VR85 0x85 +#define VR86 0x86 +#define VR87 0x87 + +/* VR88 GPIO 8 + */ +#define VR88 0x88 + +/* Graphics BIOS scratch 0 + */ +#define VR8E 0x8E +# define VR8E_PANEL_TYPE_MASK (0xf << 0) +# define VR8E_PANEL_INTERFACE_CMOS (0 << 4) +# define VR8E_PANEL_INTERFACE_LVDS (1 << 4) +# define VR8E_FORCE_DEFAULT_PANEL (1 << 5) + +/* Graphics BIOS scratch 1 + */ +#define VR8F 0x8F +# define VR8F_VCH_PRESENT (1 << 0) +# define VR8F_DISPLAY_CONN (1 << 1) +# define VR8F_POWER_MASK (0x3c) +# define VR8F_POWER_POS (2) + + +struct ivch_priv { + bool quiet; + + uint16_t width, height; +}; + + +static void ivch_dump_regs(struct intel_dvo_device *dvo); + +/** + * Reads a register on the ivch. + * + * Each of the 256 registers are 16 bits long. + */ +static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) +{ + struct ivch_priv *priv = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + u8 out_buf[1]; + u8 in_buf[2]; + + struct i2c_msg msgs[] = { + { + .addr = dvo->slave_addr, + .flags = I2C_M_RD, + .len = 0, + }, + { + .addr = 0, + .flags = I2C_M_NOSTART, + .len = 1, + .buf = out_buf, + }, + { + .addr = dvo->slave_addr, + .flags = I2C_M_RD | I2C_M_NOSTART, + .len = 2, + .buf = in_buf, + } + }; + + out_buf[0] = addr; + + if (i2c_transfer(adapter, msgs, 3) == 3) { + *data = (in_buf[1] << 8) | in_buf[0]; + return true; + }; + + if (!priv->quiet) { + DRM_DEBUG_KMS("Unable to read register 0x%02x from " + "%s:%02x.\n", + addr, adapter->name, dvo->slave_addr); + } + return false; +} + +/** Writes a 16-bit register on the ivch */ +static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) +{ + struct ivch_priv *priv = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + u8 out_buf[3]; + struct i2c_msg msg = { + .addr = dvo->slave_addr, + .flags = 0, + .len = 3, + .buf = out_buf, + }; + + out_buf[0] = addr; + out_buf[1] = data & 0xff; + out_buf[2] = data >> 8; + + if (i2c_transfer(adapter, &msg, 1) == 1) + return true; + + if (!priv->quiet) { + DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", + addr, adapter->name, dvo->slave_addr); + } + + return false; +} + +/** Probes the given bus and slave address for an ivch */ +static bool ivch_init(struct intel_dvo_device *dvo, + struct i2c_adapter *adapter) +{ + struct ivch_priv *priv; + uint16_t temp; + + priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL); + if (priv == NULL) + return false; + + dvo->i2c_bus = adapter; + dvo->dev_priv = priv; + priv->quiet = true; + + if (!ivch_read(dvo, VR00, &temp)) + goto out; + priv->quiet = false; + + /* Since the identification bits are probably zeroes, which doesn't seem + * very unique, check that the value in the base address field matches + * the address it's responding on. + */ + if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) { + DRM_DEBUG_KMS("ivch detect failed due to address mismatch " + "(%d vs %d)\n", + (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr); + goto out; + } + + ivch_read(dvo, VR20, &priv->width); + ivch_read(dvo, VR21, &priv->height); + + return true; + +out: + kfree(priv); + return false; +} + +static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo) +{ + return connector_status_connected; +} + +static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo, + struct drm_display_mode *mode) +{ + if (mode->clock > 112000) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +/** Sets the power state of the panel connected to the ivch */ +static void ivch_dpms(struct intel_dvo_device *dvo, bool enable) +{ + int i; + uint16_t vr01, vr30, backlight; + + /* Set the new power state of the panel. */ + if (!ivch_read(dvo, VR01, &vr01)) + return; + + if (enable) + backlight = 1; + else + backlight = 0; + ivch_write(dvo, VR80, backlight); + + if (enable) + vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE; + else + vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE); + + ivch_write(dvo, VR01, vr01); + + /* Wait for the panel to make its state transition */ + for (i = 0; i < 100; i++) { + if (!ivch_read(dvo, VR30, &vr30)) + break; + + if (((vr30 & VR30_PANEL_ON) != 0) == enable) + break; + udelay(1000); + } + /* wait some more; vch may fail to resync sometimes without this */ + udelay(16 * 1000); +} + +static bool ivch_get_hw_state(struct intel_dvo_device *dvo) +{ + uint16_t vr01; + + /* Set the new power state of the panel. */ + if (!ivch_read(dvo, VR01, &vr01)) + return false; + + if (vr01 & VR01_LCD_ENABLE) + return true; + else + return false; +} + +static void ivch_mode_set(struct intel_dvo_device *dvo, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + uint16_t vr40 = 0; + uint16_t vr01; + + vr01 = 0; + vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE | + VR40_HORIZONTAL_INTERP_ENABLE); + + if (mode->hdisplay != adjusted_mode->hdisplay || + mode->vdisplay != adjusted_mode->vdisplay) { + uint16_t x_ratio, y_ratio; + + vr01 |= VR01_PANEL_FIT_ENABLE; + vr40 |= VR40_CLOCK_GATING_ENABLE; + x_ratio = (((mode->hdisplay - 1) << 16) / + (adjusted_mode->hdisplay - 1)) >> 2; + y_ratio = (((mode->vdisplay - 1) << 16) / + (adjusted_mode->vdisplay - 1)) >> 2; + ivch_write(dvo, VR42, x_ratio); + ivch_write(dvo, VR41, y_ratio); + } else { + vr01 &= ~VR01_PANEL_FIT_ENABLE; + vr40 &= ~VR40_CLOCK_GATING_ENABLE; + } + vr40 &= ~VR40_AUTO_RATIO_ENABLE; + + ivch_write(dvo, VR01, vr01); + ivch_write(dvo, VR40, vr40); + + ivch_dump_regs(dvo); +} + +static void ivch_dump_regs(struct intel_dvo_device *dvo) +{ + uint16_t val; + + ivch_read(dvo, VR00, &val); + DRM_LOG_KMS("VR00: 0x%04x\n", val); + ivch_read(dvo, VR01, &val); + DRM_LOG_KMS("VR01: 0x%04x\n", val); + ivch_read(dvo, VR30, &val); + DRM_LOG_KMS("VR30: 0x%04x\n", val); + ivch_read(dvo, VR40, &val); + DRM_LOG_KMS("VR40: 0x%04x\n", val); + + /* GPIO registers */ + ivch_read(dvo, VR80, &val); + DRM_LOG_KMS("VR80: 0x%04x\n", val); + ivch_read(dvo, VR81, &val); + DRM_LOG_KMS("VR81: 0x%04x\n", val); + ivch_read(dvo, VR82, &val); + DRM_LOG_KMS("VR82: 0x%04x\n", val); + ivch_read(dvo, VR83, &val); + DRM_LOG_KMS("VR83: 0x%04x\n", val); + ivch_read(dvo, VR84, &val); + DRM_LOG_KMS("VR84: 0x%04x\n", val); + ivch_read(dvo, VR85, &val); + DRM_LOG_KMS("VR85: 0x%04x\n", val); + ivch_read(dvo, VR86, &val); + DRM_LOG_KMS("VR86: 0x%04x\n", val); + ivch_read(dvo, VR87, &val); + DRM_LOG_KMS("VR87: 0x%04x\n", val); + ivch_read(dvo, VR88, &val); + DRM_LOG_KMS("VR88: 0x%04x\n", val); + + /* Scratch register 0 - AIM Panel type */ + ivch_read(dvo, VR8E, &val); + DRM_LOG_KMS("VR8E: 0x%04x\n", val); + + /* Scratch register 1 - Status register */ + ivch_read(dvo, VR8F, &val); + DRM_LOG_KMS("VR8F: 0x%04x\n", val); +} + +static void ivch_destroy(struct intel_dvo_device *dvo) +{ + struct ivch_priv *priv = dvo->dev_priv; + + if (priv) { + kfree(priv); + dvo->dev_priv = NULL; + } +} + +struct intel_dvo_dev_ops ivch_ops = { + .init = ivch_init, + .dpms = ivch_dpms, + .get_hw_state = ivch_get_hw_state, + .mode_valid = ivch_mode_valid, + .mode_set = ivch_mode_set, + .detect = ivch_detect, + .dump_regs = ivch_dump_regs, + .destroy = ivch_destroy, +}; diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c new file mode 100644 index 0000000..c4a255b --- /dev/null +++ b/drivers/gpu/drm/i915/dvo_ns2501.c @@ -0,0 +1,588 @@ +/* + * + * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "dvo.h" +#include "i915_reg.h" +#include "i915_drv.h" + +#define NS2501_VID 0x1305 +#define NS2501_DID 0x6726 + +#define NS2501_VID_LO 0x00 +#define NS2501_VID_HI 0x01 +#define NS2501_DID_LO 0x02 +#define NS2501_DID_HI 0x03 +#define NS2501_REV 0x04 +#define NS2501_RSVD 0x05 +#define NS2501_FREQ_LO 0x06 +#define NS2501_FREQ_HI 0x07 + +#define NS2501_REG8 0x08 +#define NS2501_8_VEN (1<<5) +#define NS2501_8_HEN (1<<4) +#define NS2501_8_DSEL (1<<3) +#define NS2501_8_BPAS (1<<2) +#define NS2501_8_RSVD (1<<1) +#define NS2501_8_PD (1<<0) + +#define NS2501_REG9 0x09 +#define NS2501_9_VLOW (1<<7) +#define NS2501_9_MSEL_MASK (0x7<<4) +#define NS2501_9_TSEL (1<<3) +#define NS2501_9_RSEN (1<<2) +#define NS2501_9_RSVD (1<<1) +#define NS2501_9_MDI (1<<0) + +#define NS2501_REGC 0x0c + +struct ns2501_priv { + //I2CDevRec d; + bool quiet; + int reg_8_shadow; + int reg_8_set; + // Shadow registers for i915 + int dvoc; + int pll_a; + int srcdim; + int fw_blc; +}; + +#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr)) + +/* + * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens + * laptops does not react on the i2c bus unless + * both the PLL is running and the display is configured in its native + * resolution. + * This function forces the DVO on, and stores the registers it touches. + * Afterwards, registers are restored to regular values. + * + * This is pretty much a hack, though it works. + * Without that, ns2501_readb and ns2501_writeb fail + * when switching the resolution. + */ + +static void enable_dvo(struct intel_dvo_device *dvo) +{ + struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_gmbus *bus = container_of(adapter, + struct intel_gmbus, + adapter); + struct drm_i915_private *dev_priv = bus->dev_priv; + + DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__); + + ns->dvoc = I915_READ(DVO_C); + ns->pll_a = I915_READ(_DPLL_A); + ns->srcdim = I915_READ(DVOC_SRCDIM); + ns->fw_blc = I915_READ(FW_BLC); + + I915_WRITE(DVOC, 0x10004084); + I915_WRITE(_DPLL_A, 0xd0820000); + I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768 + I915_WRITE(FW_BLC, 0x1080304); + + I915_WRITE(DVOC, 0x90004084); +} + +/* + * Restore the I915 registers modified by the above + * trigger function. + */ +static void restore_dvo(struct intel_dvo_device *dvo) +{ + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_gmbus *bus = container_of(adapter, + struct intel_gmbus, + adapter); + struct drm_i915_private *dev_priv = bus->dev_priv; + struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); + + I915_WRITE(DVOC, ns->dvoc); + I915_WRITE(_DPLL_A, ns->pll_a); + I915_WRITE(DVOC_SRCDIM, ns->srcdim); + I915_WRITE(FW_BLC, ns->fw_blc); +} + +/* +** Read a register from the ns2501. +** Returns true if successful, false otherwise. +** If it returns false, it might be wise to enable the +** DVO with the above function. +*/ +static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch) +{ + struct ns2501_priv *ns = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + u8 out_buf[2]; + u8 in_buf[2]; + + struct i2c_msg msgs[] = { + { + .addr = dvo->slave_addr, + .flags = 0, + .len = 1, + .buf = out_buf, + }, + { + .addr = dvo->slave_addr, + .flags = I2C_M_RD, + .len = 1, + .buf = in_buf, + } + }; + + out_buf[0] = addr; + out_buf[1] = 0; + + if (i2c_transfer(adapter, msgs, 2) == 2) { + *ch = in_buf[0]; + return true; + }; + + if (!ns->quiet) { + DRM_DEBUG_KMS + ("Unable to read register 0x%02x from %s:0x%02x.\n", addr, + adapter->name, dvo->slave_addr); + } + + return false; +} + +/* +** Write a register to the ns2501. +** Returns true if successful, false otherwise. +** If it returns false, it might be wise to enable the +** DVO with the above function. +*/ +static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) +{ + struct ns2501_priv *ns = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + uint8_t out_buf[2]; + + struct i2c_msg msg = { + .addr = dvo->slave_addr, + .flags = 0, + .len = 2, + .buf = out_buf, + }; + + out_buf[0] = addr; + out_buf[1] = ch; + + if (i2c_transfer(adapter, &msg, 1) == 1) { + return true; + } + + if (!ns->quiet) { + DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n", + addr, adapter->name, dvo->slave_addr); + } + + return false; +} + +/* National Semiconductor 2501 driver for chip on i2c bus + * scan for the chip on the bus. + * Hope the VBIOS initialized the PLL correctly so we can + * talk to it. If not, it will not be seen and not detected. + * Bummer! + */ +static bool ns2501_init(struct intel_dvo_device *dvo, + struct i2c_adapter *adapter) +{ + /* this will detect the NS2501 chip on the specified i2c bus */ + struct ns2501_priv *ns; + unsigned char ch; + + ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL); + if (ns == NULL) + return false; + + dvo->i2c_bus = adapter; + dvo->dev_priv = ns; + ns->quiet = true; + + if (!ns2501_readb(dvo, NS2501_VID_LO, &ch)) + goto out; + + if (ch != (NS2501_VID & 0xff)) { + DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n", + ch, adapter->name, dvo->slave_addr); + goto out; + } + + if (!ns2501_readb(dvo, NS2501_DID_LO, &ch)) + goto out; + + if (ch != (NS2501_DID & 0xff)) { + DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n", + ch, adapter->name, dvo->slave_addr); + goto out; + } + ns->quiet = false; + ns->reg_8_set = 0; + ns->reg_8_shadow = + NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN; + + DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n"); + return true; + +out: + kfree(ns); + return false; +} + +static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo) +{ + /* + * This is a Laptop display, it doesn't have hotplugging. + * Even if not, the detection bit of the 2501 is unreliable as + * it only works for some display types. + * It is even more unreliable as the PLL must be active for + * allowing reading from the chiop. + */ + return connector_status_connected; +} + +static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo, + struct drm_display_mode *mode) +{ + DRM_DEBUG_KMS + ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n", + __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay, + mode->vtotal); + + /* + * Currently, these are all the modes I have data from. + * More might exist. Unclear how to find the native resolution + * of the panel in here so we could always accept it + * by disabling the scaler. + */ + if ((mode->hdisplay == 800 && mode->vdisplay == 600) || + (mode->hdisplay == 640 && mode->vdisplay == 480) || + (mode->hdisplay == 1024 && mode->vdisplay == 768)) { + return MODE_OK; + } else { + return MODE_ONE_SIZE; /* Is this a reasonable error? */ + } +} + +static void ns2501_mode_set(struct intel_dvo_device *dvo, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + bool ok; + bool restore = false; + struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); + + DRM_DEBUG_KMS + ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n", + __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay, + mode->vtotal); + + /* + * Where do I find the native resolution for which scaling is not required??? + * + * First trigger the DVO on as otherwise the chip does not appear on the i2c + * bus. + */ + do { + ok = true; + + if (mode->hdisplay == 800 && mode->vdisplay == 600) { + /* mode 277 */ + ns->reg_8_shadow &= ~NS2501_8_BPAS; + DRM_DEBUG_KMS("%s: switching to 800x600\n", + __FUNCTION__); + + /* + * No, I do not know where this data comes from. + * It is just what the video bios left in the DVO, so + * I'm just copying it here over. + * This also means that I cannot support any other modes + * except the ones supported by the bios. + */ + ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works. + ok &= ns2501_writeb(dvo, 0x1b, 0x19); + ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer + ok &= ns2501_writeb(dvo, 0x1d, 0x02); + + ok &= ns2501_writeb(dvo, 0x34, 0x03); + ok &= ns2501_writeb(dvo, 0x35, 0xff); + + ok &= ns2501_writeb(dvo, 0x80, 0x27); + ok &= ns2501_writeb(dvo, 0x81, 0x03); + ok &= ns2501_writeb(dvo, 0x82, 0x41); + ok &= ns2501_writeb(dvo, 0x83, 0x05); + + ok &= ns2501_writeb(dvo, 0x8d, 0x02); + ok &= ns2501_writeb(dvo, 0x8e, 0x04); + ok &= ns2501_writeb(dvo, 0x8f, 0x00); + + ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */ + ok &= ns2501_writeb(dvo, 0x91, 0x07); + ok &= ns2501_writeb(dvo, 0x94, 0x00); + ok &= ns2501_writeb(dvo, 0x95, 0x00); + + ok &= ns2501_writeb(dvo, 0x96, 0x00); + + ok &= ns2501_writeb(dvo, 0x99, 0x00); + ok &= ns2501_writeb(dvo, 0x9a, 0x88); + + ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */ + ok &= ns2501_writeb(dvo, 0x9d, 0x00); + ok &= ns2501_writeb(dvo, 0x9e, 0x25); + ok &= ns2501_writeb(dvo, 0x9f, 0x03); + + ok &= ns2501_writeb(dvo, 0xa4, 0x80); + + ok &= ns2501_writeb(dvo, 0xb6, 0x00); + + ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */ + ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */ + + ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */ + ok &= ns2501_writeb(dvo, 0xc1, 0xd7); + + ok &= ns2501_writeb(dvo, 0xc2, 0x00); + ok &= ns2501_writeb(dvo, 0xc3, 0xf8); + + ok &= ns2501_writeb(dvo, 0xc4, 0x03); + ok &= ns2501_writeb(dvo, 0xc5, 0x1a); + + ok &= ns2501_writeb(dvo, 0xc6, 0x00); + ok &= ns2501_writeb(dvo, 0xc7, 0x73); + ok &= ns2501_writeb(dvo, 0xc8, 0x02); + + } else if (mode->hdisplay == 640 && mode->vdisplay == 480) { + /* mode 274 */ + DRM_DEBUG_KMS("%s: switching to 640x480\n", + __FUNCTION__); + /* + * No, I do not know where this data comes from. + * It is just what the video bios left in the DVO, so + * I'm just copying it here over. + * This also means that I cannot support any other modes + * except the ones supported by the bios. + */ + ns->reg_8_shadow &= ~NS2501_8_BPAS; + + ok &= ns2501_writeb(dvo, 0x11, 0xa0); + ok &= ns2501_writeb(dvo, 0x1b, 0x11); + ok &= ns2501_writeb(dvo, 0x1c, 0x54); + ok &= ns2501_writeb(dvo, 0x1d, 0x03); + + ok &= ns2501_writeb(dvo, 0x34, 0x03); + ok &= ns2501_writeb(dvo, 0x35, 0xff); + + ok &= ns2501_writeb(dvo, 0x80, 0xff); + ok &= ns2501_writeb(dvo, 0x81, 0x07); + ok &= ns2501_writeb(dvo, 0x82, 0x3d); + ok &= ns2501_writeb(dvo, 0x83, 0x05); + + ok &= ns2501_writeb(dvo, 0x8d, 0x02); + ok &= ns2501_writeb(dvo, 0x8e, 0x10); + ok &= ns2501_writeb(dvo, 0x8f, 0x00); + + ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */ + ok &= ns2501_writeb(dvo, 0x91, 0x07); + ok &= ns2501_writeb(dvo, 0x94, 0x00); + ok &= ns2501_writeb(dvo, 0x95, 0x00); + + ok &= ns2501_writeb(dvo, 0x96, 0x05); + + ok &= ns2501_writeb(dvo, 0x99, 0x00); + ok &= ns2501_writeb(dvo, 0x9a, 0x88); + + ok &= ns2501_writeb(dvo, 0x9c, 0x24); + ok &= ns2501_writeb(dvo, 0x9d, 0x00); + ok &= ns2501_writeb(dvo, 0x9e, 0x25); + ok &= ns2501_writeb(dvo, 0x9f, 0x03); + + ok &= ns2501_writeb(dvo, 0xa4, 0x84); + + ok &= ns2501_writeb(dvo, 0xb6, 0x09); + + ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */ + ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */ + + ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */ + ok &= ns2501_writeb(dvo, 0xc1, 0x90); + + ok &= ns2501_writeb(dvo, 0xc2, 0x00); + ok &= ns2501_writeb(dvo, 0xc3, 0x0f); + + ok &= ns2501_writeb(dvo, 0xc4, 0x03); + ok &= ns2501_writeb(dvo, 0xc5, 0x16); + + ok &= ns2501_writeb(dvo, 0xc6, 0x00); + ok &= ns2501_writeb(dvo, 0xc7, 0x02); + ok &= ns2501_writeb(dvo, 0xc8, 0x02); + + } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) { + /* mode 280 */ + DRM_DEBUG_KMS("%s: switching to 1024x768\n", + __FUNCTION__); + /* + * This might or might not work, actually. I'm silently + * assuming here that the native panel resolution is + * 1024x768. If not, then this leaves the scaler disabled + * generating a picture that is likely not the expected. + * + * Problem is that I do not know where to take the panel + * dimensions from. + * + * Enable the bypass, scaling not required. + * + * The scaler registers are irrelevant here.... + * + */ + ns->reg_8_shadow |= NS2501_8_BPAS; + ok &= ns2501_writeb(dvo, 0x37, 0x44); + } else { + /* + * Data not known. Bummer! + * Hopefully, the code should not go here + * as mode_OK delivered no other modes. + */ + ns->reg_8_shadow |= NS2501_8_BPAS; + } + ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow); + + if (!ok) { + if (restore) + restore_dvo(dvo); + enable_dvo(dvo); + restore = true; + } + } while (!ok); + /* + * Restore the old i915 registers before + * forcing the ns2501 on. + */ + if (restore) + restore_dvo(dvo); +} + +/* set the NS2501 power state */ +static bool ns2501_get_hw_state(struct intel_dvo_device *dvo) +{ + unsigned char ch; + + if (!ns2501_readb(dvo, NS2501_REG8, &ch)) + return false; + + if (ch & NS2501_8_PD) + return true; + else + return false; +} + +/* set the NS2501 power state */ +static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable) +{ + bool ok; + bool restore = false; + struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); + unsigned char ch; + + DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n", + __FUNCTION__, enable); + + ch = ns->reg_8_shadow; + + if (enable) + ch |= NS2501_8_PD; + else + ch &= ~NS2501_8_PD; + + if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) { + ns->reg_8_set = 1; + ns->reg_8_shadow = ch; + + do { + ok = true; + ok &= ns2501_writeb(dvo, NS2501_REG8, ch); + ok &= + ns2501_writeb(dvo, 0x34, + enable ? 0x03 : 0x00); + ok &= + ns2501_writeb(dvo, 0x35, + enable ? 0xff : 0x00); + if (!ok) { + if (restore) + restore_dvo(dvo); + enable_dvo(dvo); + restore = true; + } + } while (!ok); + + if (restore) + restore_dvo(dvo); + } +} + +static void ns2501_dump_regs(struct intel_dvo_device *dvo) +{ + uint8_t val; + + ns2501_readb(dvo, NS2501_FREQ_LO, &val); + DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val); + ns2501_readb(dvo, NS2501_FREQ_HI, &val); + DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val); + ns2501_readb(dvo, NS2501_REG8, &val); + DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val); + ns2501_readb(dvo, NS2501_REG9, &val); + DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val); + ns2501_readb(dvo, NS2501_REGC, &val); + DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val); +} + +static void ns2501_destroy(struct intel_dvo_device *dvo) +{ + struct ns2501_priv *ns = dvo->dev_priv; + + if (ns) { + kfree(ns); + dvo->dev_priv = NULL; + } +} + +struct intel_dvo_dev_ops ns2501_ops = { + .init = ns2501_init, + .detect = ns2501_detect, + .mode_valid = ns2501_mode_valid, + .mode_set = ns2501_mode_set, + .dpms = ns2501_dpms, + .get_hw_state = ns2501_get_hw_state, + .dump_regs = ns2501_dump_regs, + .destroy = ns2501_destroy, +}; diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c new file mode 100644 index 0000000..4debd32 --- /dev/null +++ b/drivers/gpu/drm/i915/dvo_sil164.c @@ -0,0 +1,279 @@ +/************************************************************************** + +Copyright © 2006 Dave Airlie + +All Rights Reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sub license, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +**************************************************************************/ + +#include "dvo.h" + +#define SIL164_VID 0x0001 +#define SIL164_DID 0x0006 + +#define SIL164_VID_LO 0x00 +#define SIL164_VID_HI 0x01 +#define SIL164_DID_LO 0x02 +#define SIL164_DID_HI 0x03 +#define SIL164_REV 0x04 +#define SIL164_RSVD 0x05 +#define SIL164_FREQ_LO 0x06 +#define SIL164_FREQ_HI 0x07 + +#define SIL164_REG8 0x08 +#define SIL164_8_VEN (1<<5) +#define SIL164_8_HEN (1<<4) +#define SIL164_8_DSEL (1<<3) +#define SIL164_8_BSEL (1<<2) +#define SIL164_8_EDGE (1<<1) +#define SIL164_8_PD (1<<0) + +#define SIL164_REG9 0x09 +#define SIL164_9_VLOW (1<<7) +#define SIL164_9_MSEL_MASK (0x7<<4) +#define SIL164_9_TSEL (1<<3) +#define SIL164_9_RSEN (1<<2) +#define SIL164_9_HTPLG (1<<1) +#define SIL164_9_MDI (1<<0) + +#define SIL164_REGC 0x0c + +struct sil164_priv { + //I2CDevRec d; + bool quiet; +}; + +#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr)) + +static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) +{ + struct sil164_priv *sil = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + u8 out_buf[2]; + u8 in_buf[2]; + + struct i2c_msg msgs[] = { + { + .addr = dvo->slave_addr, + .flags = 0, + .len = 1, + .buf = out_buf, + }, + { + .addr = dvo->slave_addr, + .flags = I2C_M_RD, + .len = 1, + .buf = in_buf, + } + }; + + out_buf[0] = addr; + out_buf[1] = 0; + + if (i2c_transfer(adapter, msgs, 2) == 2) { + *ch = in_buf[0]; + return true; + }; + + if (!sil->quiet) { + DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", + addr, adapter->name, dvo->slave_addr); + } + return false; +} + +static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) +{ + struct sil164_priv *sil = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + uint8_t out_buf[2]; + struct i2c_msg msg = { + .addr = dvo->slave_addr, + .flags = 0, + .len = 2, + .buf = out_buf, + }; + + out_buf[0] = addr; + out_buf[1] = ch; + + if (i2c_transfer(adapter, &msg, 1) == 1) + return true; + + if (!sil->quiet) { + DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", + addr, adapter->name, dvo->slave_addr); + } + + return false; +} + +/* Silicon Image 164 driver for chip on i2c bus */ +static bool sil164_init(struct intel_dvo_device *dvo, + struct i2c_adapter *adapter) +{ + /* this will detect the SIL164 chip on the specified i2c bus */ + struct sil164_priv *sil; + unsigned char ch; + + sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL); + if (sil == NULL) + return false; + + dvo->i2c_bus = adapter; + dvo->dev_priv = sil; + sil->quiet = true; + + if (!sil164_readb(dvo, SIL164_VID_LO, &ch)) + goto out; + + if (ch != (SIL164_VID & 0xff)) { + DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n", + ch, adapter->name, dvo->slave_addr); + goto out; + } + + if (!sil164_readb(dvo, SIL164_DID_LO, &ch)) + goto out; + + if (ch != (SIL164_DID & 0xff)) { + DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n", + ch, adapter->name, dvo->slave_addr); + goto out; + } + sil->quiet = false; + + DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n"); + return true; + +out: + kfree(sil); + return false; +} + +static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo) +{ + uint8_t reg9; + + sil164_readb(dvo, SIL164_REG9, ®9); + + if (reg9 & SIL164_9_HTPLG) + return connector_status_connected; + else + return connector_status_disconnected; +} + +static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static void sil164_mode_set(struct intel_dvo_device *dvo, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + /* As long as the basics are set up, since we don't have clock + * dependencies in the mode setup, we can just leave the + * registers alone and everything will work fine. + */ + /* recommended programming sequence from doc */ + /*sil164_writeb(sil, 0x08, 0x30); + sil164_writeb(sil, 0x09, 0x00); + sil164_writeb(sil, 0x0a, 0x90); + sil164_writeb(sil, 0x0c, 0x89); + sil164_writeb(sil, 0x08, 0x31);*/ + /* don't do much */ + return; +} + +/* set the SIL164 power state */ +static void sil164_dpms(struct intel_dvo_device *dvo, bool enable) +{ + int ret; + unsigned char ch; + + ret = sil164_readb(dvo, SIL164_REG8, &ch); + if (ret == false) + return; + + if (enable) + ch |= SIL164_8_PD; + else + ch &= ~SIL164_8_PD; + + sil164_writeb(dvo, SIL164_REG8, ch); + return; +} + +static bool sil164_get_hw_state(struct intel_dvo_device *dvo) +{ + int ret; + unsigned char ch; + + ret = sil164_readb(dvo, SIL164_REG8, &ch); + if (ret == false) + return false; + + if (ch & SIL164_8_PD) + return true; + else + return false; +} + +static void sil164_dump_regs(struct intel_dvo_device *dvo) +{ + uint8_t val; + + sil164_readb(dvo, SIL164_FREQ_LO, &val); + DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val); + sil164_readb(dvo, SIL164_FREQ_HI, &val); + DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val); + sil164_readb(dvo, SIL164_REG8, &val); + DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val); + sil164_readb(dvo, SIL164_REG9, &val); + DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val); + sil164_readb(dvo, SIL164_REGC, &val); + DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val); +} + +static void sil164_destroy(struct intel_dvo_device *dvo) +{ + struct sil164_priv *sil = dvo->dev_priv; + + if (sil) { + kfree(sil); + dvo->dev_priv = NULL; + } +} + +struct intel_dvo_dev_ops sil164_ops = { + .init = sil164_init, + .detect = sil164_detect, + .mode_valid = sil164_mode_valid, + .mode_set = sil164_mode_set, + .dpms = sil164_dpms, + .get_hw_state = sil164_get_hw_state, + .dump_regs = sil164_dump_regs, + .destroy = sil164_destroy, +}; diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c new file mode 100644 index 0000000..e17f1b0 --- /dev/null +++ b/drivers/gpu/drm/i915/dvo_tfp410.c @@ -0,0 +1,318 @@ +/* + * Copyright © 2007 Dave Mueller + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dave Mueller + * + */ + +#include "dvo.h" + +/* register definitions according to the TFP410 data sheet */ +#define TFP410_VID 0x014C +#define TFP410_DID 0x0410 + +#define TFP410_VID_LO 0x00 +#define TFP410_VID_HI 0x01 +#define TFP410_DID_LO 0x02 +#define TFP410_DID_HI 0x03 +#define TFP410_REV 0x04 + +#define TFP410_CTL_1 0x08 +#define TFP410_CTL_1_TDIS (1<<6) +#define TFP410_CTL_1_VEN (1<<5) +#define TFP410_CTL_1_HEN (1<<4) +#define TFP410_CTL_1_DSEL (1<<3) +#define TFP410_CTL_1_BSEL (1<<2) +#define TFP410_CTL_1_EDGE (1<<1) +#define TFP410_CTL_1_PD (1<<0) + +#define TFP410_CTL_2 0x09 +#define TFP410_CTL_2_VLOW (1<<7) +#define TFP410_CTL_2_MSEL_MASK (0x7<<4) +#define TFP410_CTL_2_MSEL (1<<4) +#define TFP410_CTL_2_TSEL (1<<3) +#define TFP410_CTL_2_RSEN (1<<2) +#define TFP410_CTL_2_HTPLG (1<<1) +#define TFP410_CTL_2_MDI (1<<0) + +#define TFP410_CTL_3 0x0A +#define TFP410_CTL_3_DK_MASK (0x7<<5) +#define TFP410_CTL_3_DK (1<<5) +#define TFP410_CTL_3_DKEN (1<<4) +#define TFP410_CTL_3_CTL_MASK (0x7<<1) +#define TFP410_CTL_3_CTL (1<<1) + +#define TFP410_USERCFG 0x0B + +#define TFP410_DE_DLY 0x32 + +#define TFP410_DE_CTL 0x33 +#define TFP410_DE_CTL_DEGEN (1<<6) +#define TFP410_DE_CTL_VSPOL (1<<5) +#define TFP410_DE_CTL_HSPOL (1<<4) +#define TFP410_DE_CTL_DEDLY8 (1<<0) + +#define TFP410_DE_TOP 0x34 + +#define TFP410_DE_CNT_LO 0x36 +#define TFP410_DE_CNT_HI 0x37 + +#define TFP410_DE_LIN_LO 0x38 +#define TFP410_DE_LIN_HI 0x39 + +#define TFP410_H_RES_LO 0x3A +#define TFP410_H_RES_HI 0x3B + +#define TFP410_V_RES_LO 0x3C +#define TFP410_V_RES_HI 0x3D + +struct tfp410_priv { + bool quiet; +}; + +static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) +{ + struct tfp410_priv *tfp = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + u8 out_buf[2]; + u8 in_buf[2]; + + struct i2c_msg msgs[] = { + { + .addr = dvo->slave_addr, + .flags = 0, + .len = 1, + .buf = out_buf, + }, + { + .addr = dvo->slave_addr, + .flags = I2C_M_RD, + .len = 1, + .buf = in_buf, + } + }; + + out_buf[0] = addr; + out_buf[1] = 0; + + if (i2c_transfer(adapter, msgs, 2) == 2) { + *ch = in_buf[0]; + return true; + }; + + if (!tfp->quiet) { + DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", + addr, adapter->name, dvo->slave_addr); + } + return false; +} + +static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) +{ + struct tfp410_priv *tfp = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + uint8_t out_buf[2]; + struct i2c_msg msg = { + .addr = dvo->slave_addr, + .flags = 0, + .len = 2, + .buf = out_buf, + }; + + out_buf[0] = addr; + out_buf[1] = ch; + + if (i2c_transfer(adapter, &msg, 1) == 1) + return true; + + if (!tfp->quiet) { + DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", + addr, adapter->name, dvo->slave_addr); + } + + return false; +} + +static int tfp410_getid(struct intel_dvo_device *dvo, int addr) +{ + uint8_t ch1, ch2; + + if (tfp410_readb(dvo, addr+0, &ch1) && + tfp410_readb(dvo, addr+1, &ch2)) + return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF); + + return -1; +} + +/* Ti TFP410 driver for chip on i2c bus */ +static bool tfp410_init(struct intel_dvo_device *dvo, + struct i2c_adapter *adapter) +{ + /* this will detect the tfp410 chip on the specified i2c bus */ + struct tfp410_priv *tfp; + int id; + + tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL); + if (tfp == NULL) + return false; + + dvo->i2c_bus = adapter; + dvo->dev_priv = tfp; + tfp->quiet = true; + + if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { + DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s " + "Slave %d.\n", + id, adapter->name, dvo->slave_addr); + goto out; + } + + if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { + DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s " + "Slave %d.\n", + id, adapter->name, dvo->slave_addr); + goto out; + } + tfp->quiet = false; + return true; +out: + kfree(tfp); + return false; +} + +static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo) +{ + enum drm_connector_status ret = connector_status_disconnected; + uint8_t ctl2; + + if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { + if (ctl2 & TFP410_CTL_2_RSEN) + ret = connector_status_connected; + else + ret = connector_status_disconnected; + } + + return ret; +} + +static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static void tfp410_mode_set(struct intel_dvo_device *dvo, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + /* As long as the basics are set up, since we don't have clock dependencies + * in the mode setup, we can just leave the registers alone and everything + * will work fine. + */ + /* don't do much */ + return; +} + +/* set the tfp410 power state */ +static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable) +{ + uint8_t ctl1; + + if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) + return; + + if (enable) + ctl1 |= TFP410_CTL_1_PD; + else + ctl1 &= ~TFP410_CTL_1_PD; + + tfp410_writeb(dvo, TFP410_CTL_1, ctl1); +} + +static bool tfp410_get_hw_state(struct intel_dvo_device *dvo) +{ + uint8_t ctl1; + + if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) + return false; + + if (ctl1 & TFP410_CTL_1_PD) + return true; + else + return false; +} + +static void tfp410_dump_regs(struct intel_dvo_device *dvo) +{ + uint8_t val, val2; + + tfp410_readb(dvo, TFP410_REV, &val); + DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val); + tfp410_readb(dvo, TFP410_CTL_1, &val); + DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val); + tfp410_readb(dvo, TFP410_CTL_2, &val); + DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val); + tfp410_readb(dvo, TFP410_CTL_3, &val); + DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val); + tfp410_readb(dvo, TFP410_USERCFG, &val); + DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val); + tfp410_readb(dvo, TFP410_DE_DLY, &val); + DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val); + tfp410_readb(dvo, TFP410_DE_CTL, &val); + DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val); + tfp410_readb(dvo, TFP410_DE_TOP, &val); + DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val); + tfp410_readb(dvo, TFP410_DE_CNT_LO, &val); + tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2); + DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val); + tfp410_readb(dvo, TFP410_DE_LIN_LO, &val); + tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2); + DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val); + tfp410_readb(dvo, TFP410_H_RES_LO, &val); + tfp410_readb(dvo, TFP410_H_RES_HI, &val2); + DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val); + tfp410_readb(dvo, TFP410_V_RES_LO, &val); + tfp410_readb(dvo, TFP410_V_RES_HI, &val2); + DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); +} + +static void tfp410_destroy(struct intel_dvo_device *dvo) +{ + struct tfp410_priv *tfp = dvo->dev_priv; + + if (tfp) { + kfree(tfp); + dvo->dev_priv = NULL; + } +} + +struct intel_dvo_dev_ops tfp410_ops = { + .init = tfp410_init, + .detect = tfp410_detect, + .mode_valid = tfp410_mode_valid, + .mode_set = tfp410_mode_set, + .dpms = tfp410_dpms, + .get_hw_state = tfp410_get_hw_state, + .dump_regs = tfp410_dump_regs, + .destroy = tfp410_destroy, +}; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c new file mode 100644 index 0000000..e913d32 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -0,0 +1,2165 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * Keith Packard + * + */ + +#include +#include +#include +#include +#include +#include +#include "intel_drv.h" +#include "intel_ringbuffer.h" +#include +#include "i915_drv.h" + +#define DRM_I915_RING_DEBUG 1 + + +#if defined(CONFIG_DEBUG_FS) + +enum { + ACTIVE_LIST, + INACTIVE_LIST, + PINNED_LIST, +}; + +static const char *yesno(int v) +{ + return v ? "yes" : "no"; +} + +static int i915_capabilities(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + const struct intel_device_info *info = INTEL_INFO(dev); + + seq_printf(m, "gen: %d\n", info->gen); + seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); +#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) +#define DEV_INFO_SEP ; + DEV_INFO_FLAGS; +#undef DEV_INFO_FLAG +#undef DEV_INFO_SEP + + return 0; +} + +static const char *get_pin_flag(struct drm_i915_gem_object *obj) +{ + if (obj->user_pin_count > 0) + return "P"; + else if (obj->pin_count > 0) + return "p"; + else + return " "; +} + +static const char *get_tiling_flag(struct drm_i915_gem_object *obj) +{ + switch (obj->tiling_mode) { + default: + case I915_TILING_NONE: return " "; + case I915_TILING_X: return "X"; + case I915_TILING_Y: return "Y"; + } +} + +static const char *cache_level_str(int type) +{ + switch (type) { + case I915_CACHE_NONE: return " uncached"; + case I915_CACHE_LLC: return " snooped (LLC)"; + case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; + default: return ""; + } +} + +static void +describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) +{ + seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", + &obj->base, + get_pin_flag(obj), + get_tiling_flag(obj), + obj->base.size / 1024, + obj->base.read_domains, + obj->base.write_domain, + obj->last_read_seqno, + obj->last_write_seqno, + obj->last_fenced_seqno, + cache_level_str(obj->cache_level), + obj->dirty ? " dirty" : "", + obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); + if (obj->base.name) + seq_printf(m, " (name: %d)", obj->base.name); + if (obj->pin_count) + seq_printf(m, " (pinned x %d)", obj->pin_count); + if (obj->fence_reg != I915_FENCE_REG_NONE) + seq_printf(m, " (fence: %d)", obj->fence_reg); + if (obj->gtt_space != NULL) + seq_printf(m, " (gtt offset: %08x, size: %08x)", + obj->gtt_offset, (unsigned int)obj->gtt_space->size); + if (obj->stolen) + seq_printf(m, " (stolen: %08lx)", obj->stolen->start); + if (obj->pin_mappable || obj->fault_mappable) { + char s[3], *t = s; + if (obj->pin_mappable) + *t++ = 'p'; + if (obj->fault_mappable) + *t++ = 'f'; + *t = '\0'; + seq_printf(m, " (%s mappable)", s); + } + if (obj->ring != NULL) + seq_printf(m, " (%s)", obj->ring->name); +} + +static int i915_gem_object_list_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + uintptr_t list = (uintptr_t) node->info_ent->data; + struct list_head *head; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + size_t total_obj_size, total_gtt_size; + int count, ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + switch (list) { + case ACTIVE_LIST: + seq_printf(m, "Active:\n"); + head = &dev_priv->mm.active_list; + break; + case INACTIVE_LIST: + seq_printf(m, "Inactive:\n"); + head = &dev_priv->mm.inactive_list; + break; + default: + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + + total_obj_size = total_gtt_size = count = 0; + list_for_each_entry(obj, head, mm_list) { + seq_printf(m, " "); + describe_obj(m, obj); + seq_printf(m, "\n"); + total_obj_size += obj->base.size; + total_gtt_size += obj->gtt_space->size; + count++; + } + mutex_unlock(&dev->struct_mutex); + + seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", + count, total_obj_size, total_gtt_size); + return 0; +} + +#define count_objects(list, member) do { \ + list_for_each_entry(obj, list, member) { \ + size += obj->gtt_space->size; \ + ++count; \ + if (obj->map_and_fenceable) { \ + mappable_size += obj->gtt_space->size; \ + ++mappable_count; \ + } \ + } \ +} while (0) + +static int i915_gem_object_info(struct seq_file *m, void* data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 count, mappable_count, purgeable_count; + size_t size, mappable_size, purgeable_size; + struct drm_i915_gem_object *obj; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + seq_printf(m, "%u objects, %zu bytes\n", + dev_priv->mm.object_count, + dev_priv->mm.object_memory); + + size = count = mappable_size = mappable_count = 0; + count_objects(&dev_priv->mm.bound_list, gtt_list); + seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", + count, mappable_count, size, mappable_size); + + size = count = mappable_size = mappable_count = 0; + count_objects(&dev_priv->mm.active_list, mm_list); + seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", + count, mappable_count, size, mappable_size); + + size = count = mappable_size = mappable_count = 0; + count_objects(&dev_priv->mm.inactive_list, mm_list); + seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", + count, mappable_count, size, mappable_size); + + size = count = purgeable_size = purgeable_count = 0; + list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) { + size += obj->base.size, ++count; + if (obj->madv == I915_MADV_DONTNEED) + purgeable_size += obj->base.size, ++purgeable_count; + } + seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); + + size = count = mappable_size = mappable_count = 0; + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { + if (obj->fault_mappable) { + size += obj->gtt_space->size; + ++count; + } + if (obj->pin_mappable) { + mappable_size += obj->gtt_space->size; + ++mappable_count; + } + if (obj->madv == I915_MADV_DONTNEED) { + purgeable_size += obj->base.size; + ++purgeable_count; + } + } + seq_printf(m, "%u purgeable objects, %zu bytes\n", + purgeable_count, purgeable_size); + seq_printf(m, "%u pinned mappable objects, %zu bytes\n", + mappable_count, mappable_size); + seq_printf(m, "%u fault mappable objects, %zu bytes\n", + count, size); + + seq_printf(m, "%zu [%lu] gtt total\n", + dev_priv->gtt.total, + dev_priv->gtt.mappable_end - dev_priv->gtt.start); + + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +static int i915_gem_gtt_info(struct seq_file *m, void* data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + uintptr_t list = (uintptr_t) node->info_ent->data; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + size_t total_obj_size, total_gtt_size; + int count, ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + total_obj_size = total_gtt_size = count = 0; + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { + if (list == PINNED_LIST && obj->pin_count == 0) + continue; + + seq_printf(m, " "); + describe_obj(m, obj); + seq_printf(m, "\n"); + total_obj_size += obj->base.size; + total_gtt_size += obj->gtt_space->size; + count++; + } + + mutex_unlock(&dev->struct_mutex); + + seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", + count, total_obj_size, total_gtt_size); + + return 0; +} + +static int i915_gem_pageflip_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + unsigned long flags; + struct intel_crtc *crtc; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { + const char pipe = pipe_name(crtc->pipe); + const char plane = plane_name(crtc->plane); + struct intel_unpin_work *work; + + spin_lock_irqsave(&dev->event_lock, flags); + work = crtc->unpin_work; + if (work == NULL) { + seq_printf(m, "No flip due on pipe %c (plane %c)\n", + pipe, plane); + } else { + if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { + seq_printf(m, "Flip queued on pipe %c (plane %c)\n", + pipe, plane); + } else { + seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", + pipe, plane); + } + if (work->enable_stall_check) + seq_printf(m, "Stall check enabled, "); + else + seq_printf(m, "Stall check waiting for page flip ioctl, "); + seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); + + if (work->old_fb_obj) { + struct drm_i915_gem_object *obj = work->old_fb_obj; + if (obj) + seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); + } + if (work->pending_flip_obj) { + struct drm_i915_gem_object *obj = work->pending_flip_obj; + if (obj) + seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); + } + } + spin_unlock_irqrestore(&dev->event_lock, flags); + } + + return 0; +} + +static int i915_gem_request_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + struct drm_i915_gem_request *gem_request; + int ret, count, i; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + count = 0; + for_each_ring(ring, dev_priv, i) { + if (list_empty(&ring->request_list)) + continue; + + seq_printf(m, "%s requests:\n", ring->name); + list_for_each_entry(gem_request, + &ring->request_list, + list) { + seq_printf(m, " %d @ %d\n", + gem_request->seqno, + (int) (jiffies - gem_request->emitted_jiffies)); + } + count++; + } + mutex_unlock(&dev->struct_mutex); + + if (count == 0) + seq_printf(m, "No requests\n"); + + return 0; +} + +static void i915_ring_seqno_info(struct seq_file *m, + struct intel_ring_buffer *ring) +{ + if (ring->get_seqno) { + seq_printf(m, "Current sequence (%s): %u\n", + ring->name, ring->get_seqno(ring, false)); + } +} + +static int i915_gem_seqno_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + int ret, i; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + for_each_ring(ring, dev_priv, i) + i915_ring_seqno_info(m, ring); + + mutex_unlock(&dev->struct_mutex); + + return 0; +} + + +static int i915_interrupt_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + int ret, i, pipe; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + if (IS_VALLEYVIEW(dev)) { + seq_printf(m, "Display IER:\t%08x\n", + I915_READ(VLV_IER)); + seq_printf(m, "Display IIR:\t%08x\n", + I915_READ(VLV_IIR)); + seq_printf(m, "Display IIR_RW:\t%08x\n", + I915_READ(VLV_IIR_RW)); + seq_printf(m, "Display IMR:\t%08x\n", + I915_READ(VLV_IMR)); + for_each_pipe(pipe) + seq_printf(m, "Pipe %c stat:\t%08x\n", + pipe_name(pipe), + I915_READ(PIPESTAT(pipe))); + + seq_printf(m, "Master IER:\t%08x\n", + I915_READ(VLV_MASTER_IER)); + + seq_printf(m, "Render IER:\t%08x\n", + I915_READ(GTIER)); + seq_printf(m, "Render IIR:\t%08x\n", + I915_READ(GTIIR)); + seq_printf(m, "Render IMR:\t%08x\n", + I915_READ(GTIMR)); + + seq_printf(m, "PM IER:\t\t%08x\n", + I915_READ(GEN6_PMIER)); + seq_printf(m, "PM IIR:\t\t%08x\n", + I915_READ(GEN6_PMIIR)); + seq_printf(m, "PM IMR:\t\t%08x\n", + I915_READ(GEN6_PMIMR)); + + seq_printf(m, "Port hotplug:\t%08x\n", + I915_READ(PORT_HOTPLUG_EN)); + seq_printf(m, "DPFLIPSTAT:\t%08x\n", + I915_READ(VLV_DPFLIPSTAT)); + seq_printf(m, "DPINVGTT:\t%08x\n", + I915_READ(DPINVGTT)); + + } else if (!HAS_PCH_SPLIT(dev)) { + seq_printf(m, "Interrupt enable: %08x\n", + I915_READ(IER)); + seq_printf(m, "Interrupt identity: %08x\n", + I915_READ(IIR)); + seq_printf(m, "Interrupt mask: %08x\n", + I915_READ(IMR)); + for_each_pipe(pipe) + seq_printf(m, "Pipe %c stat: %08x\n", + pipe_name(pipe), + I915_READ(PIPESTAT(pipe))); + } else { + seq_printf(m, "North Display Interrupt enable: %08x\n", + I915_READ(DEIER)); + seq_printf(m, "North Display Interrupt identity: %08x\n", + I915_READ(DEIIR)); + seq_printf(m, "North Display Interrupt mask: %08x\n", + I915_READ(DEIMR)); + seq_printf(m, "South Display Interrupt enable: %08x\n", + I915_READ(SDEIER)); + seq_printf(m, "South Display Interrupt identity: %08x\n", + I915_READ(SDEIIR)); + seq_printf(m, "South Display Interrupt mask: %08x\n", + I915_READ(SDEIMR)); + seq_printf(m, "Graphics Interrupt enable: %08x\n", + I915_READ(GTIER)); + seq_printf(m, "Graphics Interrupt identity: %08x\n", + I915_READ(GTIIR)); + seq_printf(m, "Graphics Interrupt mask: %08x\n", + I915_READ(GTIMR)); + } + seq_printf(m, "Interrupts received: %d\n", + atomic_read(&dev_priv->irq_received)); + for_each_ring(ring, dev_priv, i) { + if (IS_GEN6(dev) || IS_GEN7(dev)) { + seq_printf(m, + "Graphics Interrupt mask (%s): %08x\n", + ring->name, I915_READ_IMR(ring)); + } + i915_ring_seqno_info(m, ring); + } + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +static int i915_gem_fence_regs_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + int i, ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); + seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); + for (i = 0; i < dev_priv->num_fence_regs; i++) { + struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; + + seq_printf(m, "Fence %d, pin count = %d, object = ", + i, dev_priv->fence_regs[i].pin_count); + if (obj == NULL) + seq_printf(m, "unused"); + else + describe_obj(m, obj); + seq_printf(m, "\n"); + } + + mutex_unlock(&dev->struct_mutex); + return 0; +} + +static int i915_hws_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + const u32 *hws; + int i; + + ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; + hws = ring->status_page.page_addr; + if (hws == NULL) + return 0; + + for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { + seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", + i * 4, + hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); + } + return 0; +} + +static const char *ring_str(int ring) +{ + switch (ring) { + case RCS: return "render"; + case VCS: return "bsd"; + case BCS: return "blt"; + default: return ""; + } +} + +static const char *pin_flag(int pinned) +{ + if (pinned > 0) + return " P"; + else if (pinned < 0) + return " p"; + else + return ""; +} + +static const char *tiling_flag(int tiling) +{ + switch (tiling) { + default: + case I915_TILING_NONE: return ""; + case I915_TILING_X: return " X"; + case I915_TILING_Y: return " Y"; + } +} + +static const char *dirty_flag(int dirty) +{ + return dirty ? " dirty" : ""; +} + +static const char *purgeable_flag(int purgeable) +{ + return purgeable ? " purgeable" : ""; +} + +static void print_error_buffers(struct seq_file *m, + const char *name, + struct drm_i915_error_buffer *err, + int count) +{ + seq_printf(m, "%s [%d]:\n", name, count); + + while (count--) { + seq_printf(m, " %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s", + err->gtt_offset, + err->size, + err->read_domains, + err->write_domain, + err->rseqno, err->wseqno, + pin_flag(err->pinned), + tiling_flag(err->tiling), + dirty_flag(err->dirty), + purgeable_flag(err->purgeable), + err->ring != -1 ? " " : "", + ring_str(err->ring), + cache_level_str(err->cache_level)); + + if (err->name) + seq_printf(m, " (name: %d)", err->name); + if (err->fence_reg != I915_FENCE_REG_NONE) + seq_printf(m, " (fence: %d)", err->fence_reg); + + seq_printf(m, "\n"); + err++; + } +} + +static void i915_ring_error_state(struct seq_file *m, + struct drm_device *dev, + struct drm_i915_error_state *error, + unsigned ring) +{ + BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ + seq_printf(m, "%s command stream:\n", ring_str(ring)); + seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); + seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); + seq_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); + seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); + seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); + seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); + seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); + if (ring == RCS && INTEL_INFO(dev)->gen >= 4) + seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); + + if (INTEL_INFO(dev)->gen >= 4) + seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); + seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); + seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); + if (INTEL_INFO(dev)->gen >= 6) { + seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); + seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); + seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", + error->semaphore_mboxes[ring][0], + error->semaphore_seqno[ring][0]); + seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", + error->semaphore_mboxes[ring][1], + error->semaphore_seqno[ring][1]); + } + seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); + seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); + seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); + seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); +} + +struct i915_error_state_file_priv { + struct drm_device *dev; + struct drm_i915_error_state *error; +}; + +static int i915_error_state(struct seq_file *m, void *unused) +{ + struct i915_error_state_file_priv *error_priv = m->private; + struct drm_device *dev = error_priv->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_error_state *error = error_priv->error; + struct intel_ring_buffer *ring; + int i, j, page, offset, elt; + + if (!error) { + seq_printf(m, "no error state collected\n"); + return 0; + } + + seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, + error->time.tv_usec); + seq_printf(m, "Kernel: " UTS_RELEASE "\n"); + seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); + seq_printf(m, "EIR: 0x%08x\n", error->eir); + seq_printf(m, "IER: 0x%08x\n", error->ier); + seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); + seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); + seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr); + seq_printf(m, "CCID: 0x%08x\n", error->ccid); + + for (i = 0; i < dev_priv->num_fence_regs; i++) + seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); + + for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) + seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]); + + if (INTEL_INFO(dev)->gen >= 6) { + seq_printf(m, "ERROR: 0x%08x\n", error->error); + seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); + } + + if (INTEL_INFO(dev)->gen == 7) + seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int); + + for_each_ring(ring, dev_priv, i) + i915_ring_error_state(m, dev, error, i); + + if (error->active_bo) + print_error_buffers(m, "Active", + error->active_bo, + error->active_bo_count); + + if (error->pinned_bo) + print_error_buffers(m, "Pinned", + error->pinned_bo, + error->pinned_bo_count); + + for (i = 0; i < ARRAY_SIZE(error->ring); i++) { + struct drm_i915_error_object *obj; + + if ((obj = error->ring[i].batchbuffer)) { + seq_printf(m, "%s --- gtt_offset = 0x%08x\n", + dev_priv->ring[i].name, + obj->gtt_offset); + offset = 0; + for (page = 0; page < obj->page_count; page++) { + for (elt = 0; elt < PAGE_SIZE/4; elt++) { + seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); + offset += 4; + } + } + } + + if (error->ring[i].num_requests) { + seq_printf(m, "%s --- %d requests\n", + dev_priv->ring[i].name, + error->ring[i].num_requests); + for (j = 0; j < error->ring[i].num_requests; j++) { + seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", + error->ring[i].requests[j].seqno, + error->ring[i].requests[j].jiffies, + error->ring[i].requests[j].tail); + } + } + + if ((obj = error->ring[i].ringbuffer)) { + seq_printf(m, "%s --- ringbuffer = 0x%08x\n", + dev_priv->ring[i].name, + obj->gtt_offset); + offset = 0; + for (page = 0; page < obj->page_count; page++) { + for (elt = 0; elt < PAGE_SIZE/4; elt++) { + seq_printf(m, "%08x : %08x\n", + offset, + obj->pages[page][elt]); + offset += 4; + } + } + } + + obj = error->ring[i].ctx; + if (obj) { + seq_printf(m, "%s --- HW Context = 0x%08x\n", + dev_priv->ring[i].name, + obj->gtt_offset); + offset = 0; + for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { + seq_printf(m, "[%04x] %08x %08x %08x %08x\n", + offset, + obj->pages[0][elt], + obj->pages[0][elt+1], + obj->pages[0][elt+2], + obj->pages[0][elt+3]); + offset += 16; + } + } + } + + if (error->overlay) + intel_overlay_print_error_state(m, error->overlay); + + if (error->display) + intel_display_print_error_state(m, dev, error->display); + + return 0; +} + +static ssize_t +i915_error_state_write(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct i915_error_state_file_priv *error_priv = m->private; + struct drm_device *dev = error_priv->dev; + int ret; + + DRM_DEBUG_DRIVER("Resetting error state\n"); + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + i915_destroy_error_state(dev); + mutex_unlock(&dev->struct_mutex); + + return cnt; +} + +static int i915_error_state_open(struct inode *inode, struct file *file) +{ + struct drm_device *dev = inode->i_private; + drm_i915_private_t *dev_priv = dev->dev_private; + struct i915_error_state_file_priv *error_priv; + unsigned long flags; + + error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); + if (!error_priv) + return -ENOMEM; + + error_priv->dev = dev; + + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); + error_priv->error = dev_priv->gpu_error.first_error; + if (error_priv->error) + kref_get(&error_priv->error->ref); + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); + + return single_open(file, i915_error_state, error_priv); +} + +static int i915_error_state_release(struct inode *inode, struct file *file) +{ + struct seq_file *m = file->private_data; + struct i915_error_state_file_priv *error_priv = m->private; + + if (error_priv->error) + kref_put(&error_priv->error->ref, i915_error_state_free); + kfree(error_priv); + + return single_release(inode, file); +} + +static const struct file_operations i915_error_state_fops = { + .owner = THIS_MODULE, + .open = i915_error_state_open, + .read = seq_read, + .write = i915_error_state_write, + .llseek = default_llseek, + .release = i915_error_state_release, +}; + +static int +i915_next_seqno_get(void *data, u64 *val) +{ + struct drm_device *dev = data; + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + *val = dev_priv->next_seqno; + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +static int +i915_next_seqno_set(void *data, u64 val) +{ + struct drm_device *dev = data; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + ret = i915_gem_set_seqno(dev, val); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, + i915_next_seqno_get, i915_next_seqno_set, + "0x%llx\n"); + +static int i915_rstdby_delays(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + u16 crstanddelay; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + crstanddelay = I915_READ16(CRSTANDVID); + + mutex_unlock(&dev->struct_mutex); + + seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); + + return 0; +} + +static int i915_cur_delayinfo(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + if (IS_GEN5(dev)) { + u16 rgvswctl = I915_READ16(MEMSWCTL); + u16 rgvstat = I915_READ16(MEMSTAT_ILK); + + seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); + seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); + seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> + MEMSTAT_VID_SHIFT); + seq_printf(m, "Current P-state: %d\n", + (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); + } else if (IS_GEN6(dev) || IS_GEN7(dev)) { + u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); + u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); + u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + u32 rpstat, cagf; + u32 rpupei, rpcurup, rpprevup; + u32 rpdownei, rpcurdown, rpprevdown; + int max_freq; + + /* RPSTAT1 is in the GT power well */ + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + gen6_gt_force_wake_get(dev_priv); + + rpstat = I915_READ(GEN6_RPSTAT1); + rpupei = I915_READ(GEN6_RP_CUR_UP_EI); + rpcurup = I915_READ(GEN6_RP_CUR_UP); + rpprevup = I915_READ(GEN6_RP_PREV_UP); + rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); + rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); + rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); + if (IS_HASWELL(dev)) + cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; + else + cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; + cagf *= GT_FREQUENCY_MULTIPLIER; + + gen6_gt_force_wake_put(dev_priv); + mutex_unlock(&dev->struct_mutex); + + seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); + seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); + seq_printf(m, "Render p-state ratio: %d\n", + (gt_perf_status & 0xff00) >> 8); + seq_printf(m, "Render p-state VID: %d\n", + gt_perf_status & 0xff); + seq_printf(m, "Render p-state limit: %d\n", + rp_state_limits & 0xff); + seq_printf(m, "CAGF: %dMHz\n", cagf); + seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & + GEN6_CURICONT_MASK); + seq_printf(m, "RP CUR UP: %dus\n", rpcurup & + GEN6_CURBSYTAVG_MASK); + seq_printf(m, "RP PREV UP: %dus\n", rpprevup & + GEN6_CURBSYTAVG_MASK); + seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & + GEN6_CURIAVG_MASK); + seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & + GEN6_CURBSYTAVG_MASK); + seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & + GEN6_CURBSYTAVG_MASK); + + max_freq = (rp_state_cap & 0xff0000) >> 16; + seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", + max_freq * GT_FREQUENCY_MULTIPLIER); + + max_freq = (rp_state_cap & 0xff00) >> 8; + seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", + max_freq * GT_FREQUENCY_MULTIPLIER); + + max_freq = rp_state_cap & 0xff; + seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", + max_freq * GT_FREQUENCY_MULTIPLIER); + + seq_printf(m, "Max overclocked frequency: %dMHz\n", + dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER); + } else { + seq_printf(m, "no P-state info available\n"); + } + + return 0; +} + +static int i915_delayfreq_table(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + u32 delayfreq; + int ret, i; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + for (i = 0; i < 16; i++) { + delayfreq = I915_READ(PXVFREQ_BASE + i * 4); + seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, + (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); + } + + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +static inline int MAP_TO_MV(int map) +{ + return 1250 - (map * 25); +} + +static int i915_inttoext_table(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + u32 inttoext; + int ret, i; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + for (i = 1; i <= 32; i++) { + inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); + seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); + } + + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +static int ironlake_drpc_info(struct seq_file *m) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + u32 rgvmodectl, rstdbyctl; + u16 crstandvid; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + rgvmodectl = I915_READ(MEMMODECTL); + rstdbyctl = I915_READ(RSTDBYCTL); + crstandvid = I915_READ16(CRSTANDVID); + + mutex_unlock(&dev->struct_mutex); + + seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? + "yes" : "no"); + seq_printf(m, "Boost freq: %d\n", + (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> + MEMMODE_BOOST_FREQ_SHIFT); + seq_printf(m, "HW control enabled: %s\n", + rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); + seq_printf(m, "SW control enabled: %s\n", + rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); + seq_printf(m, "Gated voltage change: %s\n", + rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); + seq_printf(m, "Starting frequency: P%d\n", + (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); + seq_printf(m, "Max P-state: P%d\n", + (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); + seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); + seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); + seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); + seq_printf(m, "Render standby enabled: %s\n", + (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); + seq_printf(m, "Current RS state: "); + switch (rstdbyctl & RSX_STATUS_MASK) { + case RSX_STATUS_ON: + seq_printf(m, "on\n"); + break; + case RSX_STATUS_RC1: + seq_printf(m, "RC1\n"); + break; + case RSX_STATUS_RC1E: + seq_printf(m, "RC1E\n"); + break; + case RSX_STATUS_RS1: + seq_printf(m, "RS1\n"); + break; + case RSX_STATUS_RS2: + seq_printf(m, "RS2 (RC6)\n"); + break; + case RSX_STATUS_RS3: + seq_printf(m, "RC3 (RC6+)\n"); + break; + default: + seq_printf(m, "unknown\n"); + break; + } + + return 0; +} + +static int gen6_drpc_info(struct seq_file *m) +{ + + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; + unsigned forcewake_count; + int count=0, ret; + + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + spin_lock_irq(&dev_priv->gt_lock); + forcewake_count = dev_priv->forcewake_count; + spin_unlock_irq(&dev_priv->gt_lock); + + if (forcewake_count) { + seq_printf(m, "RC information inaccurate because somebody " + "holds a forcewake reference \n"); + } else { + /* NB: we cannot use forcewake, else we read the wrong values */ + while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) + udelay(10); + seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); + } + + gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); + trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); + + rpmodectl1 = I915_READ(GEN6_RP_CONTROL); + rcctl1 = I915_READ(GEN6_RC_CONTROL); + mutex_unlock(&dev->struct_mutex); + mutex_lock(&dev_priv->rps.hw_lock); + sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); + mutex_unlock(&dev_priv->rps.hw_lock); + + seq_printf(m, "Video Turbo Mode: %s\n", + yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); + seq_printf(m, "HW control enabled: %s\n", + yesno(rpmodectl1 & GEN6_RP_ENABLE)); + seq_printf(m, "SW control enabled: %s\n", + yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == + GEN6_RP_MEDIA_SW_MODE)); + seq_printf(m, "RC1e Enabled: %s\n", + yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); + seq_printf(m, "RC6 Enabled: %s\n", + yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); + seq_printf(m, "Deep RC6 Enabled: %s\n", + yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); + seq_printf(m, "Deepest RC6 Enabled: %s\n", + yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); + seq_printf(m, "Current RC state: "); + switch (gt_core_status & GEN6_RCn_MASK) { + case GEN6_RC0: + if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) + seq_printf(m, "Core Power Down\n"); + else + seq_printf(m, "on\n"); + break; + case GEN6_RC3: + seq_printf(m, "RC3\n"); + break; + case GEN6_RC6: + seq_printf(m, "RC6\n"); + break; + case GEN6_RC7: + seq_printf(m, "RC7\n"); + break; + default: + seq_printf(m, "Unknown\n"); + break; + } + + seq_printf(m, "Core Power Down: %s\n", + yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); + + /* Not exactly sure what this is */ + seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", + I915_READ(GEN6_GT_GFX_RC6_LOCKED)); + seq_printf(m, "RC6 residency since boot: %u\n", + I915_READ(GEN6_GT_GFX_RC6)); + seq_printf(m, "RC6+ residency since boot: %u\n", + I915_READ(GEN6_GT_GFX_RC6p)); + seq_printf(m, "RC6++ residency since boot: %u\n", + I915_READ(GEN6_GT_GFX_RC6pp)); + + seq_printf(m, "RC6 voltage: %dmV\n", + GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); + seq_printf(m, "RC6+ voltage: %dmV\n", + GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); + seq_printf(m, "RC6++ voltage: %dmV\n", + GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); + return 0; +} + +static int i915_drpc_info(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + + if (IS_GEN6(dev) || IS_GEN7(dev)) + return gen6_drpc_info(m); + else + return ironlake_drpc_info(m); +} + +static int i915_fbc_status(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + if (!I915_HAS_FBC(dev)) { + seq_printf(m, "FBC unsupported on this chipset\n"); + return 0; + } + + if (intel_fbc_enabled(dev)) { + seq_printf(m, "FBC enabled\n"); + } else { + seq_printf(m, "FBC disabled: "); + switch (dev_priv->no_fbc_reason) { + case FBC_NO_OUTPUT: + seq_printf(m, "no outputs"); + break; + case FBC_STOLEN_TOO_SMALL: + seq_printf(m, "not enough stolen memory"); + break; + case FBC_UNSUPPORTED_MODE: + seq_printf(m, "mode not supported"); + break; + case FBC_MODE_TOO_LARGE: + seq_printf(m, "mode too large"); + break; + case FBC_BAD_PLANE: + seq_printf(m, "FBC unsupported on plane"); + break; + case FBC_NOT_TILED: + seq_printf(m, "scanout buffer not tiled"); + break; + case FBC_MULTIPLE_PIPES: + seq_printf(m, "multiple pipes are enabled"); + break; + case FBC_MODULE_PARAM: + seq_printf(m, "disabled per module param (default off)"); + break; + default: + seq_printf(m, "unknown reason"); + } + seq_printf(m, "\n"); + } + return 0; +} + +static int i915_sr_status(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + bool sr_enabled = false; + + if (HAS_PCH_SPLIT(dev)) + sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; + else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) + sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; + else if (IS_I915GM(dev)) + sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; + else if (IS_PINEVIEW(dev)) + sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; + + seq_printf(m, "self-refresh: %s\n", + sr_enabled ? "enabled" : "disabled"); + + return 0; +} + +static int i915_emon_status(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long temp, chipset, gfx; + int ret; + + if (!IS_GEN5(dev)) + return -ENODEV; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + temp = i915_mch_val(dev_priv); + chipset = i915_chipset_val(dev_priv); + gfx = i915_gfx_val(dev_priv); + mutex_unlock(&dev->struct_mutex); + + seq_printf(m, "GMCH temp: %ld\n", temp); + seq_printf(m, "Chipset power: %ld\n", chipset); + seq_printf(m, "GFX power: %ld\n", gfx); + seq_printf(m, "Total power: %ld\n", chipset + gfx); + + return 0; +} + +static int i915_ring_freq_table(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + int gpu_freq, ia_freq; + + if (!(IS_GEN6(dev) || IS_GEN7(dev))) { + seq_printf(m, "unsupported on this chipset\n"); + return 0; + } + + ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + if (ret) + return ret; + + seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); + + for (gpu_freq = dev_priv->rps.min_delay; + gpu_freq <= dev_priv->rps.max_delay; + gpu_freq++) { + ia_freq = gpu_freq; + sandybridge_pcode_read(dev_priv, + GEN6_PCODE_READ_MIN_FREQ_TABLE, + &ia_freq); + seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", + gpu_freq * GT_FREQUENCY_MULTIPLIER, + ((ia_freq >> 0) & 0xff) * 100, + ((ia_freq >> 8) & 0xff) * 100); + } + + mutex_unlock(&dev_priv->rps.hw_lock); + + return 0; +} + +static int i915_gfxec(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); + + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +static int i915_opregion(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); + int ret; + + if (data == NULL) + return -ENOMEM; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + goto out; + + if (opregion->header) { + memcpy_fromio(data, opregion->header, OPREGION_SIZE); + seq_write(m, data, OPREGION_SIZE); + } + + mutex_unlock(&dev->struct_mutex); + +out: + kfree(data); + return 0; +} + +static int i915_gem_framebuffer_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_fbdev *ifbdev; + struct intel_framebuffer *fb; + int ret; + + ret = mutex_lock_interruptible(&dev->mode_config.mutex); + if (ret) + return ret; + + ifbdev = dev_priv->fbdev; + fb = to_intel_framebuffer(ifbdev->helper.fb); + + seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ", + fb->base.width, + fb->base.height, + fb->base.depth, + fb->base.bits_per_pixel, + atomic_read(&fb->base.refcount.refcount)); + describe_obj(m, fb->obj); + seq_printf(m, "\n"); + mutex_unlock(&dev->mode_config.mutex); + + mutex_lock(&dev->mode_config.fb_lock); + list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { + if (&fb->base == ifbdev->helper.fb) + continue; + + seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ", + fb->base.width, + fb->base.height, + fb->base.depth, + fb->base.bits_per_pixel, + atomic_read(&fb->base.refcount.refcount)); + describe_obj(m, fb->obj); + seq_printf(m, "\n"); + } + mutex_unlock(&dev->mode_config.fb_lock); + + return 0; +} + +static int i915_context_status(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + int ret, i; + + ret = mutex_lock_interruptible(&dev->mode_config.mutex); + if (ret) + return ret; + + if (dev_priv->ips.pwrctx) { + seq_printf(m, "power context "); + describe_obj(m, dev_priv->ips.pwrctx); + seq_printf(m, "\n"); + } + + if (dev_priv->ips.renderctx) { + seq_printf(m, "render context "); + describe_obj(m, dev_priv->ips.renderctx); + seq_printf(m, "\n"); + } + + for_each_ring(ring, dev_priv, i) { + if (ring->default_context) { + seq_printf(m, "HW default context %s ring ", ring->name); + describe_obj(m, ring->default_context->obj); + seq_printf(m, "\n"); + } + } + + mutex_unlock(&dev->mode_config.mutex); + + return 0; +} + +static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned forcewake_count; + + spin_lock_irq(&dev_priv->gt_lock); + forcewake_count = dev_priv->forcewake_count; + spin_unlock_irq(&dev_priv->gt_lock); + + seq_printf(m, "forcewake count = %u\n", forcewake_count); + + return 0; +} + +static const char *swizzle_string(unsigned swizzle) +{ + switch(swizzle) { + case I915_BIT_6_SWIZZLE_NONE: + return "none"; + case I915_BIT_6_SWIZZLE_9: + return "bit9"; + case I915_BIT_6_SWIZZLE_9_10: + return "bit9/bit10"; + case I915_BIT_6_SWIZZLE_9_11: + return "bit9/bit11"; + case I915_BIT_6_SWIZZLE_9_10_11: + return "bit9/bit10/bit11"; + case I915_BIT_6_SWIZZLE_9_17: + return "bit9/bit17"; + case I915_BIT_6_SWIZZLE_9_10_17: + return "bit9/bit10/bit17"; + case I915_BIT_6_SWIZZLE_UNKNOWN: + return "unknown"; + } + + return "bug"; +} + +static int i915_swizzle_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + seq_printf(m, "bit6 swizzle for X-tiling = %s\n", + swizzle_string(dev_priv->mm.bit_6_swizzle_x)); + seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", + swizzle_string(dev_priv->mm.bit_6_swizzle_y)); + + if (IS_GEN3(dev) || IS_GEN4(dev)) { + seq_printf(m, "DDC = 0x%08x\n", + I915_READ(DCC)); + seq_printf(m, "C0DRB3 = 0x%04x\n", + I915_READ16(C0DRB3)); + seq_printf(m, "C1DRB3 = 0x%04x\n", + I915_READ16(C1DRB3)); + } else if (IS_GEN6(dev) || IS_GEN7(dev)) { + seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", + I915_READ(MAD_DIMM_C0)); + seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", + I915_READ(MAD_DIMM_C1)); + seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", + I915_READ(MAD_DIMM_C2)); + seq_printf(m, "TILECTL = 0x%08x\n", + I915_READ(TILECTL)); + seq_printf(m, "ARB_MODE = 0x%08x\n", + I915_READ(ARB_MODE)); + seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", + I915_READ(DISP_ARB_CTL)); + } + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +static int i915_ppgtt_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + int i, ret; + + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + if (INTEL_INFO(dev)->gen == 6) + seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); + + for_each_ring(ring, dev_priv, i) { + seq_printf(m, "%s\n", ring->name); + if (INTEL_INFO(dev)->gen == 7) + seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); + seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); + seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); + seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); + } + if (dev_priv->mm.aliasing_ppgtt) { + struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; + + seq_printf(m, "aliasing PPGTT:\n"); + seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); + } + seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +static int i915_dpio_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + + if (!IS_VALLEYVIEW(dev)) { + seq_printf(m, "unsupported\n"); + return 0; + } + + ret = mutex_lock_interruptible(&dev_priv->dpio_lock); + if (ret) + return ret; + + seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); + + seq_printf(m, "DPIO_DIV_A: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_DIV_A)); + seq_printf(m, "DPIO_DIV_B: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_DIV_B)); + + seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_REFSFR_A)); + seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_REFSFR_B)); + + seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A)); + seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); + + seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A)); + seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n", + intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B)); + + seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", + intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); + + mutex_unlock(&dev_priv->dpio_lock); + + return 0; +} + +static int +i915_wedged_get(void *data, u64 *val) +{ + struct drm_device *dev = data; + drm_i915_private_t *dev_priv = dev->dev_private; + + *val = atomic_read(&dev_priv->gpu_error.reset_counter); + + return 0; +} + +static int +i915_wedged_set(void *data, u64 val) +{ + struct drm_device *dev = data; + + DRM_INFO("Manually setting wedged to %llu\n", val); + i915_handle_error(dev, val); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, + i915_wedged_get, i915_wedged_set, + "%llu\n"); + +static int +i915_ring_stop_get(void *data, u64 *val) +{ + struct drm_device *dev = data; + drm_i915_private_t *dev_priv = dev->dev_private; + + *val = dev_priv->gpu_error.stop_rings; + + return 0; +} + +static int +i915_ring_stop_set(void *data, u64 val) +{ + struct drm_device *dev = data; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + dev_priv->gpu_error.stop_rings = val; + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, + i915_ring_stop_get, i915_ring_stop_set, + "0x%08llx\n"); + +#define DROP_UNBOUND 0x1 +#define DROP_BOUND 0x2 +#define DROP_RETIRE 0x4 +#define DROP_ACTIVE 0x8 +#define DROP_ALL (DROP_UNBOUND | \ + DROP_BOUND | \ + DROP_RETIRE | \ + DROP_ACTIVE) +static int +i915_drop_caches_get(void *data, u64 *val) +{ + *val = DROP_ALL; + + return 0; +} + +static int +i915_drop_caches_set(void *data, u64 val) +{ + struct drm_device *dev = data; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj, *next; + int ret; + + DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val); + + /* No need to check and wait for gpu resets, only libdrm auto-restarts + * on ioctls on -EAGAIN. */ + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + if (val & DROP_ACTIVE) { + ret = i915_gpu_idle(dev); + if (ret) + goto unlock; + } + + if (val & (DROP_RETIRE | DROP_ACTIVE)) + i915_gem_retire_requests(dev); + + if (val & DROP_BOUND) { + list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) + if (obj->pin_count == 0) { + ret = i915_gem_object_unbind(obj); + if (ret) + goto unlock; + } + } + + if (val & DROP_UNBOUND) { + list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) + if (obj->pages_pin_count == 0) { + ret = i915_gem_object_put_pages(obj); + if (ret) + goto unlock; + } + } + +unlock: + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, + i915_drop_caches_get, i915_drop_caches_set, + "0x%08llx\n"); + +static int +i915_max_freq_get(void *data, u64 *val) +{ + struct drm_device *dev = data; + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + if (!(IS_GEN6(dev) || IS_GEN7(dev))) + return -ENODEV; + + ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + if (ret) + return ret; + + *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; + mutex_unlock(&dev_priv->rps.hw_lock); + + return 0; +} + +static int +i915_max_freq_set(void *data, u64 val) +{ + struct drm_device *dev = data; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + if (!(IS_GEN6(dev) || IS_GEN7(dev))) + return -ENODEV; + + DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); + + ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + if (ret) + return ret; + + /* + * Turbo will still be enabled, but won't go above the set value. + */ + do_div(val, GT_FREQUENCY_MULTIPLIER); + dev_priv->rps.max_delay = val; + gen6_set_rps(dev, val); + mutex_unlock(&dev_priv->rps.hw_lock); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, + i915_max_freq_get, i915_max_freq_set, + "%llu\n"); + +static int +i915_min_freq_get(void *data, u64 *val) +{ + struct drm_device *dev = data; + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + if (!(IS_GEN6(dev) || IS_GEN7(dev))) + return -ENODEV; + + ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + if (ret) + return ret; + + *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; + mutex_unlock(&dev_priv->rps.hw_lock); + + return 0; +} + +static int +i915_min_freq_set(void *data, u64 val) +{ + struct drm_device *dev = data; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + if (!(IS_GEN6(dev) || IS_GEN7(dev))) + return -ENODEV; + + DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); + + ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + if (ret) + return ret; + + /* + * Turbo will still be enabled, but won't go below the set value. + */ + do_div(val, GT_FREQUENCY_MULTIPLIER); + dev_priv->rps.min_delay = val; + gen6_set_rps(dev, val); + mutex_unlock(&dev_priv->rps.hw_lock); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, + i915_min_freq_get, i915_min_freq_set, + "%llu\n"); + +static int +i915_cache_sharing_get(void *data, u64 *val) +{ + struct drm_device *dev = data; + drm_i915_private_t *dev_priv = dev->dev_private; + u32 snpcr; + int ret; + + if (!(IS_GEN6(dev) || IS_GEN7(dev))) + return -ENODEV; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); + mutex_unlock(&dev_priv->dev->struct_mutex); + + *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; + + return 0; +} + +static int +i915_cache_sharing_set(void *data, u64 val) +{ + struct drm_device *dev = data; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 snpcr; + + if (!(IS_GEN6(dev) || IS_GEN7(dev))) + return -ENODEV; + + if (val > 3) + return -EINVAL; + + DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); + + /* Update the cache sharing policy here as well */ + snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); + snpcr &= ~GEN6_MBC_SNPCR_MASK; + snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); + I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, + i915_cache_sharing_get, i915_cache_sharing_set, + "%llu\n"); + +/* As the drm_debugfs_init() routines are called before dev->dev_private is + * allocated we need to hook into the minor for release. */ +static int +drm_add_fake_info_node(struct drm_minor *minor, + struct dentry *ent, + const void *key) +{ + struct drm_info_node *node; + + node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); + if (node == NULL) { + debugfs_remove(ent); + return -ENOMEM; + } + + node->minor = minor; + node->dent = ent; + node->info_ent = (void *) key; + + mutex_lock(&minor->debugfs_lock); + list_add(&node->list, &minor->debugfs_list); + mutex_unlock(&minor->debugfs_lock); + + return 0; +} + +static int i915_forcewake_open(struct inode *inode, struct file *file) +{ + struct drm_device *dev = inode->i_private; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (INTEL_INFO(dev)->gen < 6) + return 0; + + gen6_gt_force_wake_get(dev_priv); + + return 0; +} + +static int i915_forcewake_release(struct inode *inode, struct file *file) +{ + struct drm_device *dev = inode->i_private; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (INTEL_INFO(dev)->gen < 6) + return 0; + + gen6_gt_force_wake_put(dev_priv); + + return 0; +} + +static const struct file_operations i915_forcewake_fops = { + .owner = THIS_MODULE, + .open = i915_forcewake_open, + .release = i915_forcewake_release, +}; + +static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + struct dentry *ent; + + ent = debugfs_create_file("i915_forcewake_user", + S_IRUSR, + root, dev, + &i915_forcewake_fops); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); +} + +static int i915_debugfs_create(struct dentry *root, + struct drm_minor *minor, + const char *name, + const struct file_operations *fops) +{ + struct drm_device *dev = minor->dev; + struct dentry *ent; + + ent = debugfs_create_file(name, + S_IRUGO | S_IWUSR, + root, dev, + fops); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + return drm_add_fake_info_node(minor, ent, fops); +} + +static struct drm_info_list i915_debugfs_list[] = { + {"i915_capabilities", i915_capabilities, 0}, + {"i915_gem_objects", i915_gem_object_info, 0}, + {"i915_gem_gtt", i915_gem_gtt_info, 0}, + {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, + {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, + {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, + {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, + {"i915_gem_request", i915_gem_request_info, 0}, + {"i915_gem_seqno", i915_gem_seqno_info, 0}, + {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, + {"i915_gem_interrupt", i915_interrupt_info, 0}, + {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, + {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, + {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, + {"i915_rstdby_delays", i915_rstdby_delays, 0}, + {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, + {"i915_delayfreq_table", i915_delayfreq_table, 0}, + {"i915_inttoext_table", i915_inttoext_table, 0}, + {"i915_drpc_info", i915_drpc_info, 0}, + {"i915_emon_status", i915_emon_status, 0}, + {"i915_ring_freq_table", i915_ring_freq_table, 0}, + {"i915_gfxec", i915_gfxec, 0}, + {"i915_fbc_status", i915_fbc_status, 0}, + {"i915_sr_status", i915_sr_status, 0}, + {"i915_opregion", i915_opregion, 0}, + {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, + {"i915_context_status", i915_context_status, 0}, + {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, + {"i915_swizzle_info", i915_swizzle_info, 0}, + {"i915_ppgtt_info", i915_ppgtt_info, 0}, + {"i915_dpio", i915_dpio_info, 0}, +}; +#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) + +int i915_debugfs_init(struct drm_minor *minor) +{ + int ret; + + ret = i915_debugfs_create(minor->debugfs_root, minor, + "i915_wedged", + &i915_wedged_fops); + if (ret) + return ret; + + ret = i915_forcewake_create(minor->debugfs_root, minor); + if (ret) + return ret; + + ret = i915_debugfs_create(minor->debugfs_root, minor, + "i915_max_freq", + &i915_max_freq_fops); + if (ret) + return ret; + + ret = i915_debugfs_create(minor->debugfs_root, minor, + "i915_min_freq", + &i915_min_freq_fops); + if (ret) + return ret; + + ret = i915_debugfs_create(minor->debugfs_root, minor, + "i915_cache_sharing", + &i915_cache_sharing_fops); + if (ret) + return ret; + + ret = i915_debugfs_create(minor->debugfs_root, minor, + "i915_ring_stop", + &i915_ring_stop_fops); + if (ret) + return ret; + + ret = i915_debugfs_create(minor->debugfs_root, minor, + "i915_gem_drop_caches", + &i915_drop_caches_fops); + if (ret) + return ret; + + ret = i915_debugfs_create(minor->debugfs_root, minor, + "i915_error_state", + &i915_error_state_fops); + if (ret) + return ret; + + ret = i915_debugfs_create(minor->debugfs_root, minor, + "i915_next_seqno", + &i915_next_seqno_fops); + if (ret) + return ret; + + return drm_debugfs_create_files(i915_debugfs_list, + I915_DEBUGFS_ENTRIES, + minor->debugfs_root, minor); +} + +void i915_debugfs_cleanup(struct drm_minor *minor) +{ + drm_debugfs_remove_files(i915_debugfs_list, + I915_DEBUGFS_ENTRIES, minor); + drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, + 1, minor); + drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, + 1, minor); + drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, + 1, minor); + drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops, + 1, minor); + drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, + 1, minor); + drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops, + 1, minor); + drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, + 1, minor); + drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops, + 1, minor); + drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops, + 1, minor); +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c new file mode 100644 index 0000000..17d9b0b --- /dev/null +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -0,0 +1,1922 @@ +/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- + */ +/* + * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include "intel_drv.h" +#include +#include "i915_drv.h" +#include "i915_trace.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) + +#define BEGIN_LP_RING(n) \ + intel_ring_begin(LP_RING(dev_priv), (n)) + +#define OUT_RING(x) \ + intel_ring_emit(LP_RING(dev_priv), x) + +#define ADVANCE_LP_RING() \ + intel_ring_advance(LP_RING(dev_priv)) + +/** + * Lock test for when it's just for synchronization of ring access. + * + * In that case, we don't need to do it when GEM is initialized as nobody else + * has access to the ring. + */ +#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ + if (LP_RING(dev->dev_private)->obj == NULL) \ + LOCK_TEST_WITH_RETURN(dev, file); \ +} while (0) + +static inline u32 +intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg) +{ + if (I915_NEED_GFX_HWS(dev_priv->dev)) + return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg); + else + return intel_read_status_page(LP_RING(dev_priv), reg); +} + +#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg) +#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) +#define I915_BREADCRUMB_INDEX 0x21 + +void i915_update_dri1_breadcrumb(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_master_private *master_priv; + + if (dev->primary->master) { + master_priv = dev->primary->master->driver_priv; + if (master_priv->sarea_priv) + master_priv->sarea_priv->last_dispatch = + READ_BREADCRUMB(dev_priv); + } +} + +static void i915_write_hws_pga(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u32 addr; + + addr = dev_priv->status_page_dmah->busaddr; + if (INTEL_INFO(dev)->gen >= 4) + addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; + I915_WRITE(HWS_PGA, addr); +} + +/** + * Frees the hardware status page, whether it's a physical address or a virtual + * address set up by the X Server. + */ +static void i915_free_hws(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = LP_RING(dev_priv); + + if (dev_priv->status_page_dmah) { + drm_pci_free(dev, dev_priv->status_page_dmah); + dev_priv->status_page_dmah = NULL; + } + + if (ring->status_page.gfx_addr) { + ring->status_page.gfx_addr = 0; + iounmap(dev_priv->dri1.gfx_hws_cpu_addr); + } + + /* Need to rewrite hardware status page */ + I915_WRITE(HWS_PGA, 0x1ffff000); +} + +void i915_kernel_lost_context(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_master_private *master_priv; + struct intel_ring_buffer *ring = LP_RING(dev_priv); + + /* + * We should never lose context on the ring with modesetting + * as we don't expose it to userspace + */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; + ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; + ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE); + if (ring->space < 0) + ring->space += ring->size; + + if (!dev->primary->master) + return; + + master_priv = dev->primary->master->driver_priv; + if (ring->head == ring->tail && master_priv->sarea_priv) + master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; +} + +static int i915_dma_cleanup(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int i; + + /* Make sure interrupts are disabled here because the uninstall ioctl + * may not have been called from userspace and after dev_private + * is freed, it's too late. + */ + if (dev->irq_enabled) + drm_irq_uninstall(dev); + + mutex_lock(&dev->struct_mutex); + for (i = 0; i < I915_NUM_RINGS; i++) + intel_cleanup_ring_buffer(&dev_priv->ring[i]); + mutex_unlock(&dev->struct_mutex); + + /* Clear the HWS virtual address at teardown */ + if (I915_NEED_GFX_HWS(dev)) + i915_free_hws(dev); + + return 0; +} + +static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + int ret; + + master_priv->sarea = drm_getsarea(dev); + if (master_priv->sarea) { + master_priv->sarea_priv = (drm_i915_sarea_t *) + ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); + } else { + DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); + } + + if (init->ring_size != 0) { + if (LP_RING(dev_priv)->obj != NULL) { + i915_dma_cleanup(dev); + DRM_ERROR("Client tried to initialize ringbuffer in " + "GEM mode\n"); + return -EINVAL; + } + + ret = intel_render_ring_init_dri(dev, + init->ring_start, + init->ring_size); + if (ret) { + i915_dma_cleanup(dev); + return ret; + } + } + + dev_priv->dri1.cpp = init->cpp; + dev_priv->dri1.back_offset = init->back_offset; + dev_priv->dri1.front_offset = init->front_offset; + dev_priv->dri1.current_page = 0; + if (master_priv->sarea_priv) + master_priv->sarea_priv->pf_current_page = 0; + + /* Allow hardware batchbuffers unless told otherwise. + */ + dev_priv->dri1.allow_batchbuffer = 1; + + return 0; +} + +static int i915_dma_resume(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct intel_ring_buffer *ring = LP_RING(dev_priv); + + DRM_DEBUG_DRIVER("%s\n", __func__); + + if (ring->virtual_start == NULL) { + DRM_ERROR("can not ioremap virtual address for" + " ring buffer\n"); + return -ENOMEM; + } + + /* Program Hardware Status Page */ + if (!ring->status_page.page_addr) { + DRM_ERROR("Can not find hardware status page\n"); + return -EINVAL; + } + DRM_DEBUG_DRIVER("hw status page @ %p\n", + ring->status_page.page_addr); + if (ring->status_page.gfx_addr != 0) + intel_ring_setup_status_page(ring); + else + i915_write_hws_pga(dev); + + DRM_DEBUG_DRIVER("Enabled hardware status page\n"); + + return 0; +} + +static int i915_dma_init(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_init_t *init = data; + int retcode = 0; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + switch (init->func) { + case I915_INIT_DMA: + retcode = i915_initialize(dev, init); + break; + case I915_CLEANUP_DMA: + retcode = i915_dma_cleanup(dev); + break; + case I915_RESUME_DMA: + retcode = i915_dma_resume(dev); + break; + default: + retcode = -EINVAL; + break; + } + + return retcode; +} + +/* Implement basically the same security restrictions as hardware does + * for MI_BATCH_NON_SECURE. These can be made stricter at any time. + * + * Most of the calculations below involve calculating the size of a + * particular instruction. It's important to get the size right as + * that tells us where the next instruction to check is. Any illegal + * instruction detected will be given a size of zero, which is a + * signal to abort the rest of the buffer. + */ +static int validate_cmd(int cmd) +{ + switch (((cmd >> 29) & 0x7)) { + case 0x0: + switch ((cmd >> 23) & 0x3f) { + case 0x0: + return 1; /* MI_NOOP */ + case 0x4: + return 1; /* MI_FLUSH */ + default: + return 0; /* disallow everything else */ + } + break; + case 0x1: + return 0; /* reserved */ + case 0x2: + return (cmd & 0xff) + 2; /* 2d commands */ + case 0x3: + if (((cmd >> 24) & 0x1f) <= 0x18) + return 1; + + switch ((cmd >> 24) & 0x1f) { + case 0x1c: + return 1; + case 0x1d: + switch ((cmd >> 16) & 0xff) { + case 0x3: + return (cmd & 0x1f) + 2; + case 0x4: + return (cmd & 0xf) + 2; + default: + return (cmd & 0xffff) + 2; + } + case 0x1e: + if (cmd & (1 << 23)) + return (cmd & 0xffff) + 1; + else + return 1; + case 0x1f: + if ((cmd & (1 << 23)) == 0) /* inline vertices */ + return (cmd & 0x1ffff) + 2; + else if (cmd & (1 << 17)) /* indirect random */ + if ((cmd & 0xffff) == 0) + return 0; /* unknown length, too hard */ + else + return (((cmd & 0xffff) + 1) / 2) + 1; + else + return 2; /* indirect sequential */ + default: + return 0; + } + default: + return 0; + } + + return 0; +} + +static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int i, ret; + + if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) + return -EINVAL; + + for (i = 0; i < dwords;) { + int sz = validate_cmd(buffer[i]); + if (sz == 0 || i + sz > dwords) + return -EINVAL; + i += sz; + } + + ret = BEGIN_LP_RING((dwords+1)&~1); + if (ret) + return ret; + + for (i = 0; i < dwords; i++) + OUT_RING(buffer[i]); + if (dwords & 1) + OUT_RING(0); + + ADVANCE_LP_RING(); + + return 0; +} + +int +i915_emit_box(struct drm_device *dev, + struct drm_clip_rect *box, + int DR1, int DR4) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + if (box->y2 <= box->y1 || box->x2 <= box->x1 || + box->y2 <= 0 || box->x2 <= 0) { + DRM_ERROR("Bad box %d,%d..%d,%d\n", + box->x1, box->y1, box->x2, box->y2); + return -EINVAL; + } + + if (INTEL_INFO(dev)->gen >= 4) { + ret = BEGIN_LP_RING(4); + if (ret) + return ret; + + OUT_RING(GFX_OP_DRAWRECT_INFO_I965); + OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); + OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); + OUT_RING(DR4); + } else { + ret = BEGIN_LP_RING(6); + if (ret) + return ret; + + OUT_RING(GFX_OP_DRAWRECT_INFO); + OUT_RING(DR1); + OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); + OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); + OUT_RING(DR4); + OUT_RING(0); + } + ADVANCE_LP_RING(); + + return 0; +} + +/* XXX: Emitting the counter should really be moved to part of the IRQ + * emit. For now, do it in both places: + */ + +static void i915_emit_breadcrumb(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + + dev_priv->dri1.counter++; + if (dev_priv->dri1.counter > 0x7FFFFFFFUL) + dev_priv->dri1.counter = 0; + if (master_priv->sarea_priv) + master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; + + if (BEGIN_LP_RING(4) == 0) { + OUT_RING(MI_STORE_DWORD_INDEX); + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(dev_priv->dri1.counter); + OUT_RING(0); + ADVANCE_LP_RING(); + } +} + +static int i915_dispatch_cmdbuffer(struct drm_device * dev, + drm_i915_cmdbuffer_t *cmd, + struct drm_clip_rect *cliprects, + void *cmdbuf) +{ + int nbox = cmd->num_cliprects; + int i = 0, count, ret; + + if (cmd->sz & 0x3) { + DRM_ERROR("alignment"); + return -EINVAL; + } + + i915_kernel_lost_context(dev); + + count = nbox ? nbox : 1; + + for (i = 0; i < count; i++) { + if (i < nbox) { + ret = i915_emit_box(dev, &cliprects[i], + cmd->DR1, cmd->DR4); + if (ret) + return ret; + } + + ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); + if (ret) + return ret; + } + + i915_emit_breadcrumb(dev); + return 0; +} + +static int i915_dispatch_batchbuffer(struct drm_device * dev, + drm_i915_batchbuffer_t * batch, + struct drm_clip_rect *cliprects) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int nbox = batch->num_cliprects; + int i, count, ret; + + if ((batch->start | batch->used) & 0x7) { + DRM_ERROR("alignment"); + return -EINVAL; + } + + i915_kernel_lost_context(dev); + + count = nbox ? nbox : 1; + for (i = 0; i < count; i++) { + if (i < nbox) { + ret = i915_emit_box(dev, &cliprects[i], + batch->DR1, batch->DR4); + if (ret) + return ret; + } + + if (!IS_I830(dev) && !IS_845G(dev)) { + ret = BEGIN_LP_RING(2); + if (ret) + return ret; + + if (INTEL_INFO(dev)->gen >= 4) { + OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); + OUT_RING(batch->start); + } else { + OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); + OUT_RING(batch->start | MI_BATCH_NON_SECURE); + } + } else { + ret = BEGIN_LP_RING(4); + if (ret) + return ret; + + OUT_RING(MI_BATCH_BUFFER); + OUT_RING(batch->start | MI_BATCH_NON_SECURE); + OUT_RING(batch->start + batch->used - 4); + OUT_RING(0); + } + ADVANCE_LP_RING(); + } + + + if (IS_G4X(dev) || IS_GEN5(dev)) { + if (BEGIN_LP_RING(2) == 0) { + OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); + } + } + + i915_emit_breadcrumb(dev); + return 0; +} + +static int i915_dispatch_flip(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_master_private *master_priv = + dev->primary->master->driver_priv; + int ret; + + if (!master_priv->sarea_priv) + return -EINVAL; + + DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", + __func__, + dev_priv->dri1.current_page, + master_priv->sarea_priv->pf_current_page); + + i915_kernel_lost_context(dev); + + ret = BEGIN_LP_RING(10); + if (ret) + return ret; + + OUT_RING(MI_FLUSH | MI_READ_FLUSH); + OUT_RING(0); + + OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); + OUT_RING(0); + if (dev_priv->dri1.current_page == 0) { + OUT_RING(dev_priv->dri1.back_offset); + dev_priv->dri1.current_page = 1; + } else { + OUT_RING(dev_priv->dri1.front_offset); + dev_priv->dri1.current_page = 0; + } + OUT_RING(0); + + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); + OUT_RING(0); + + ADVANCE_LP_RING(); + + master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++; + + if (BEGIN_LP_RING(4) == 0) { + OUT_RING(MI_STORE_DWORD_INDEX); + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(dev_priv->dri1.counter); + OUT_RING(0); + ADVANCE_LP_RING(); + } + + master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page; + return 0; +} + +static int i915_quiescent(struct drm_device *dev) +{ + i915_kernel_lost_context(dev); + return intel_ring_idle(LP_RING(dev->dev_private)); +} + +static int i915_flush_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + RING_LOCK_TEST_WITH_RETURN(dev, file_priv); + + mutex_lock(&dev->struct_mutex); + ret = i915_quiescent(dev); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +static int i915_batchbuffer(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) + master_priv->sarea_priv; + drm_i915_batchbuffer_t *batch = data; + int ret; + struct drm_clip_rect *cliprects = NULL; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + if (!dev_priv->dri1.allow_batchbuffer) { + DRM_ERROR("Batchbuffer ioctl disabled\n"); + return -EINVAL; + } + + DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", + batch->start, batch->used, batch->num_cliprects); + + RING_LOCK_TEST_WITH_RETURN(dev, file_priv); + + if (batch->num_cliprects < 0) + return -EINVAL; + + if (batch->num_cliprects) { + cliprects = kcalloc(batch->num_cliprects, + sizeof(struct drm_clip_rect), + GFP_KERNEL); + if (cliprects == NULL) + return -ENOMEM; + + ret = copy_from_user(cliprects, batch->cliprects, + batch->num_cliprects * + sizeof(struct drm_clip_rect)); + if (ret != 0) { + ret = -EFAULT; + goto fail_free; + } + } + + mutex_lock(&dev->struct_mutex); + ret = i915_dispatch_batchbuffer(dev, batch, cliprects); + mutex_unlock(&dev->struct_mutex); + + if (sarea_priv) + sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); + +fail_free: + kfree(cliprects); + + return ret; +} + +static int i915_cmdbuffer(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) + master_priv->sarea_priv; + drm_i915_cmdbuffer_t *cmdbuf = data; + struct drm_clip_rect *cliprects = NULL; + void *batch_data; + int ret; + + DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", + cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + RING_LOCK_TEST_WITH_RETURN(dev, file_priv); + + if (cmdbuf->num_cliprects < 0) + return -EINVAL; + + batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); + if (batch_data == NULL) + return -ENOMEM; + + ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); + if (ret != 0) { + ret = -EFAULT; + goto fail_batch_free; + } + + if (cmdbuf->num_cliprects) { + cliprects = kcalloc(cmdbuf->num_cliprects, + sizeof(struct drm_clip_rect), GFP_KERNEL); + if (cliprects == NULL) { + ret = -ENOMEM; + goto fail_batch_free; + } + + ret = copy_from_user(cliprects, cmdbuf->cliprects, + cmdbuf->num_cliprects * + sizeof(struct drm_clip_rect)); + if (ret != 0) { + ret = -EFAULT; + goto fail_clip_free; + } + } + + mutex_lock(&dev->struct_mutex); + ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); + mutex_unlock(&dev->struct_mutex); + if (ret) { + DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); + goto fail_clip_free; + } + + if (sarea_priv) + sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); + +fail_clip_free: + kfree(cliprects); +fail_batch_free: + kfree(batch_data); + + return ret; +} + +static int i915_emit_irq(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + + i915_kernel_lost_context(dev); + + DRM_DEBUG_DRIVER("\n"); + + dev_priv->dri1.counter++; + if (dev_priv->dri1.counter > 0x7FFFFFFFUL) + dev_priv->dri1.counter = 1; + if (master_priv->sarea_priv) + master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; + + if (BEGIN_LP_RING(4) == 0) { + OUT_RING(MI_STORE_DWORD_INDEX); + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(dev_priv->dri1.counter); + OUT_RING(MI_USER_INTERRUPT); + ADVANCE_LP_RING(); + } + + return dev_priv->dri1.counter; +} + +static int i915_wait_irq(struct drm_device * dev, int irq_nr) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + int ret = 0; + struct intel_ring_buffer *ring = LP_RING(dev_priv); + + DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, + READ_BREADCRUMB(dev_priv)); + + if (READ_BREADCRUMB(dev_priv) >= irq_nr) { + if (master_priv->sarea_priv) + master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); + return 0; + } + + if (master_priv->sarea_priv) + master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; + + if (ring->irq_get(ring)) { + DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, + READ_BREADCRUMB(dev_priv) >= irq_nr); + ring->irq_put(ring); + } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) + ret = -EBUSY; + + if (ret == -EBUSY) { + DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", + READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter); + } + + return ret; +} + +/* Needs the lock as it touches the ring. + */ +static int i915_irq_emit(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_irq_emit_t *emit = data; + int result; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + RING_LOCK_TEST_WITH_RETURN(dev, file_priv); + + mutex_lock(&dev->struct_mutex); + result = i915_emit_irq(dev); + mutex_unlock(&dev->struct_mutex); + + if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { + DRM_ERROR("copy_to_user\n"); + return -EFAULT; + } + + return 0; +} + +/* Doesn't need the hardware lock. + */ +static int i915_irq_wait(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_irq_wait_t *irqwait = data; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + return i915_wait_irq(dev, irqwait->irq_seq); +} + +static int i915_vblank_pipe_get(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_vblank_pipe_t *pipe = data; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; + + return 0; +} + +/** + * Schedule buffer swap at given vertical blank. + */ +static int i915_vblank_swap(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + /* The delayed swap mechanism was fundamentally racy, and has been + * removed. The model was that the client requested a delayed flip/swap + * from the kernel, then waited for vblank before continuing to perform + * rendering. The problem was that the kernel might wake the client + * up before it dispatched the vblank swap (since the lock has to be + * held while touching the ringbuffer), in which case the client would + * clear and start the next frame before the swap occurred, and + * flicker would occur in addition to likely missing the vblank. + * + * In the absence of this ioctl, userland falls back to a correct path + * of waiting for a vblank, then dispatching the swap on its own. + * Context switching to userland and back is plenty fast enough for + * meeting the requirements of vblank swapping. + */ + return -EINVAL; +} + +static int i915_flip_bufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + DRM_DEBUG_DRIVER("%s\n", __func__); + + RING_LOCK_TEST_WITH_RETURN(dev, file_priv); + + mutex_lock(&dev->struct_mutex); + ret = i915_dispatch_flip(dev); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +static int i915_getparam(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_getparam_t *param = data; + int value; + + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + switch (param->param) { + case I915_PARAM_IRQ_ACTIVE: + value = dev->pdev->irq ? 1 : 0; + break; + case I915_PARAM_ALLOW_BATCHBUFFER: + value = dev_priv->dri1.allow_batchbuffer ? 1 : 0; + break; + case I915_PARAM_LAST_DISPATCH: + value = READ_BREADCRUMB(dev_priv); + break; + case I915_PARAM_CHIPSET_ID: + value = dev->pci_device; + break; + case I915_PARAM_HAS_GEM: + value = 1; + break; + case I915_PARAM_NUM_FENCES_AVAIL: + value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; + break; + case I915_PARAM_HAS_OVERLAY: + value = dev_priv->overlay ? 1 : 0; + break; + case I915_PARAM_HAS_PAGEFLIPPING: + value = 1; + break; + case I915_PARAM_HAS_EXECBUF2: + /* depends on GEM */ + value = 1; + break; + case I915_PARAM_HAS_BSD: + value = intel_ring_initialized(&dev_priv->ring[VCS]); + break; + case I915_PARAM_HAS_BLT: + value = intel_ring_initialized(&dev_priv->ring[BCS]); + break; + case I915_PARAM_HAS_RELAXED_FENCING: + value = 1; + break; + case I915_PARAM_HAS_COHERENT_RINGS: + value = 1; + break; + case I915_PARAM_HAS_EXEC_CONSTANTS: + value = INTEL_INFO(dev)->gen >= 4; + break; + case I915_PARAM_HAS_RELAXED_DELTA: + value = 1; + break; + case I915_PARAM_HAS_GEN7_SOL_RESET: + value = 1; + break; + case I915_PARAM_HAS_LLC: + value = HAS_LLC(dev); + break; + case I915_PARAM_HAS_ALIASING_PPGTT: + value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; + break; + case I915_PARAM_HAS_WAIT_TIMEOUT: + value = 1; + break; + case I915_PARAM_HAS_SEMAPHORES: + value = i915_semaphore_is_enabled(dev); + break; + case I915_PARAM_HAS_PRIME_VMAP_FLUSH: + value = 1; + break; + case I915_PARAM_HAS_SECURE_BATCHES: + value = capable(CAP_SYS_ADMIN); + break; + case I915_PARAM_HAS_PINNED_BATCHES: + value = 1; + break; + case I915_PARAM_HAS_EXEC_NO_RELOC: + value = 1; + break; + case I915_PARAM_HAS_EXEC_HANDLE_LUT: + value = 1; + break; + default: + DRM_DEBUG_DRIVER("Unknown parameter %d\n", + param->param); + return -EINVAL; + } + + if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { + DRM_ERROR("DRM_COPY_TO_USER failed\n"); + return -EFAULT; + } + + return 0; +} + +static int i915_setparam(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_setparam_t *param = data; + + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + switch (param->param) { + case I915_SETPARAM_USE_MI_BATCHBUFFER_START: + break; + case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: + break; + case I915_SETPARAM_ALLOW_BATCHBUFFER: + dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; + break; + case I915_SETPARAM_NUM_USED_FENCES: + if (param->value > dev_priv->num_fence_regs || + param->value < 0) + return -EINVAL; + /* Userspace can use first N regs */ + dev_priv->fence_reg_start = param->value; + break; + default: + DRM_DEBUG_DRIVER("unknown parameter %d\n", + param->param); + return -EINVAL; + } + + return 0; +} + +static int i915_set_status_page(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_hws_addr_t *hws = data; + struct intel_ring_buffer *ring; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + if (!I915_NEED_GFX_HWS(dev)) + return -EINVAL; + + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + WARN(1, "tried to set status page when mode setting active\n"); + return 0; + } + + DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); + + ring = LP_RING(dev_priv); + ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); + + dev_priv->dri1.gfx_hws_cpu_addr = + ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096); + if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { + i915_dma_cleanup(dev); + ring->status_page.gfx_addr = 0; + DRM_ERROR("can not ioremap virtual address for" + " G33 hw status page\n"); + return -ENOMEM; + } + + memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE); + I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); + + DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", + ring->status_page.gfx_addr); + DRM_DEBUG_DRIVER("load hws at %p\n", + ring->status_page.page_addr); + return 0; +} + +static int i915_get_bridge_dev(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); + if (!dev_priv->bridge_dev) { + DRM_ERROR("bridge device not found\n"); + return -1; + } + return 0; +} + +#define MCHBAR_I915 0x44 +#define MCHBAR_I965 0x48 +#define MCHBAR_SIZE (4*4096) + +#define DEVEN_REG 0x54 +#define DEVEN_MCHBAR_EN (1 << 28) + +/* Allocate space for the MCH regs if needed, return nonzero on error */ +static int +intel_alloc_mchbar_resource(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; + u32 temp_lo, temp_hi = 0; + u64 mchbar_addr; + int ret; + + if (INTEL_INFO(dev)->gen >= 4) + pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); + pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); + mchbar_addr = ((u64)temp_hi << 32) | temp_lo; + + /* If ACPI doesn't have it, assume we need to allocate it ourselves */ +#ifdef CONFIG_PNP + if (mchbar_addr && + pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) + return 0; +#endif + + /* Get some space for it */ + dev_priv->mch_res.name = "i915 MCHBAR"; + dev_priv->mch_res.flags = IORESOURCE_MEM; + ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, + &dev_priv->mch_res, + MCHBAR_SIZE, MCHBAR_SIZE, + PCIBIOS_MIN_MEM, + 0, pcibios_align_resource, + dev_priv->bridge_dev); + if (ret) { + DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); + dev_priv->mch_res.start = 0; + return ret; + } + + if (INTEL_INFO(dev)->gen >= 4) + pci_write_config_dword(dev_priv->bridge_dev, reg + 4, + upper_32_bits(dev_priv->mch_res.start)); + + pci_write_config_dword(dev_priv->bridge_dev, reg, + lower_32_bits(dev_priv->mch_res.start)); + return 0; +} + +/* Setup MCHBAR if possible, return true if we should disable it again */ +static void +intel_setup_mchbar(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; + u32 temp; + bool enabled; + + dev_priv->mchbar_need_disable = false; + + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); + enabled = !!(temp & DEVEN_MCHBAR_EN); + } else { + pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); + enabled = temp & 1; + } + + /* If it's already enabled, don't have to do anything */ + if (enabled) + return; + + if (intel_alloc_mchbar_resource(dev)) + return; + + dev_priv->mchbar_need_disable = true; + + /* Space is allocated or reserved, so enable it. */ + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, + temp | DEVEN_MCHBAR_EN); + } else { + pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); + pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); + } +} + +static void +intel_teardown_mchbar(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; + u32 temp; + + if (dev_priv->mchbar_need_disable) { + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); + temp &= ~DEVEN_MCHBAR_EN; + pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); + } else { + pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); + temp &= ~1; + pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); + } + } + + if (dev_priv->mch_res.start) + release_resource(&dev_priv->mch_res); +} + +/* true = enable decode, false = disable decoder */ +static unsigned int i915_vga_set_decode(void *cookie, bool state) +{ + struct drm_device *dev = cookie; + + intel_modeset_vga_set_state(dev, state); + if (state) + return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | + VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; + else + return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; +} + +static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; + if (state == VGA_SWITCHEROO_ON) { + pr_info("switched on\n"); + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + /* i915 resume handler doesn't set to D0 */ + pci_set_power_state(dev->pdev, PCI_D0); + i915_resume(dev); + dev->switch_power_state = DRM_SWITCH_POWER_ON; + } else { + pr_err("switched off\n"); + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + i915_suspend(dev, pmm); + dev->switch_power_state = DRM_SWITCH_POWER_OFF; + } +} + +static bool i915_switcheroo_can_switch(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + bool can_switch; + + spin_lock(&dev->count_lock); + can_switch = (dev->open_count == 0); + spin_unlock(&dev->count_lock); + return can_switch; +} + +static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { + .set_gpu_state = i915_switcheroo_set_state, + .reprobe = NULL, + .can_switch = i915_switcheroo_can_switch, +}; + +static int i915_load_modeset_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = intel_parse_bios(dev); + if (ret) + DRM_INFO("failed to find VBIOS tables\n"); + + /* If we have > 1 VGA cards, then we need to arbitrate access + * to the common VGA resources. + * + * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), + * then we do not take part in VGA arbitration and the + * vga_client_register() fails with -ENODEV. + */ + ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); + if (ret && ret != -ENODEV) + goto out; + + intel_register_dsm_handler(); + + ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops); + if (ret) + goto cleanup_vga_client; + + /* Initialise stolen first so that we may reserve preallocated + * objects for the BIOS to KMS transition. + */ + ret = i915_gem_init_stolen(dev); + if (ret) + goto cleanup_vga_switcheroo; + + ret = drm_irq_install(dev); + if (ret) + goto cleanup_gem_stolen; + + /* Important: The output setup functions called by modeset_init need + * working irqs for e.g. gmbus and dp aux transfers. */ + intel_modeset_init(dev); + + ret = i915_gem_init(dev); + if (ret) + goto cleanup_irq; + + INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); + + intel_modeset_gem_init(dev); + + /* Always safe in the mode setting case. */ + /* FIXME: do pre/post-mode set stuff in core KMS code */ + dev->vblank_disable_allowed = 1; + if (INTEL_INFO(dev)->num_pipes == 0) { + dev_priv->mm.suspended = 0; + return 0; + } + + ret = intel_fbdev_init(dev); + if (ret) + goto cleanup_gem; + + /* Only enable hotplug handling once the fbdev is fully set up. */ + intel_hpd_init(dev); + + /* + * Some ports require correctly set-up hpd registers for detection to + * work properly (leading to ghost connected connector status), e.g. VGA + * on gm45. Hence we can only set up the initial fbdev config after hpd + * irqs are fully enabled. Now we should scan for the initial config + * only once hotplug handling is enabled, but due to screwed-up locking + * around kms/fbdev init we can't protect the fdbev initial config + * scanning against hotplug events. Hence do this first and ignore the + * tiny window where we will loose hotplug notifactions. + */ + intel_fbdev_initial_config(dev); + + /* Only enable hotplug handling once the fbdev is fully set up. */ + dev_priv->enable_hotplug_processing = true; + + drm_kms_helper_poll_init(dev); + + /* We're off and running w/KMS */ + dev_priv->mm.suspended = 0; + + return 0; + +cleanup_gem: + mutex_lock(&dev->struct_mutex); + i915_gem_cleanup_ringbuffer(dev); + mutex_unlock(&dev->struct_mutex); + i915_gem_cleanup_aliasing_ppgtt(dev); +cleanup_irq: + drm_irq_uninstall(dev); +cleanup_gem_stolen: + i915_gem_cleanup_stolen(dev); +cleanup_vga_switcheroo: + vga_switcheroo_unregister_client(dev->pdev); +cleanup_vga_client: + vga_client_register(dev->pdev, NULL, NULL, NULL); +out: + return ret; +} + +int i915_master_create(struct drm_device *dev, struct drm_master *master) +{ + struct drm_i915_master_private *master_priv; + + master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); + if (!master_priv) + return -ENOMEM; + + master->driver_priv = master_priv; + return 0; +} + +void i915_master_destroy(struct drm_device *dev, struct drm_master *master) +{ + struct drm_i915_master_private *master_priv = master->driver_priv; + + if (!master_priv) + return; + + kfree(master_priv); + + master->driver_priv = NULL; +} + +static void +i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base, + unsigned long size) +{ + dev_priv->mm.gtt_mtrr = -1; + +#if defined(CONFIG_X86_PAT) + if (cpu_has_pat) + return; +#endif + + /* Set up a WC MTRR for non-PAT systems. This is more common than + * one would think, because the kernel disables PAT on first + * generation Core chips because WC PAT gets overridden by a UC + * MTRR if present. Even if a UC MTRR isn't present. + */ + dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1); + if (dev_priv->mm.gtt_mtrr < 0) { + DRM_INFO("MTRR allocation failed. Graphics " + "performance may suffer.\n"); + } +} + +static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) +{ + struct apertures_struct *ap; + struct pci_dev *pdev = dev_priv->dev->pdev; + bool primary; + + ap = alloc_apertures(1); + if (!ap) + return; + + ap->ranges[0].base = dev_priv->gtt.mappable_base; + ap->ranges[0].size = dev_priv->gtt.mappable_end - dev_priv->gtt.start; + + primary = + pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; + + remove_conflicting_framebuffers(ap, "inteldrmfb", primary); + + kfree(ap); +} + +static void i915_dump_device_info(struct drm_i915_private *dev_priv) +{ + const struct intel_device_info *info = dev_priv->info; + +#define DEV_INFO_FLAG(name) info->name ? #name "," : "" +#define DEV_INFO_SEP , + DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" + "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", + info->gen, + dev_priv->dev->pdev->device, + DEV_INFO_FLAGS); +#undef DEV_INFO_FLAG +#undef DEV_INFO_SEP +} + +/** + * intel_early_sanitize_regs - clean up BIOS state + * @dev: DRM device + * + * This function must be called before we do any I915_READ or I915_WRITE. Its + * purpose is to clean up any state left by the BIOS that may affect us when + * reading and/or writing registers. + */ +static void intel_early_sanitize_regs(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_HASWELL(dev)) + I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); +} + +/** + * i915_driver_load - setup chip and create an initial config + * @dev: DRM device + * @flags: startup flags + * + * The driver load routine has to do several things: + * - drive output discovery via intel_modeset_init() + * - initialize the memory manager + * - allocate initial config memory + * - setup the DRM framebuffer with the allocated memory + */ +int i915_driver_load(struct drm_device *dev, unsigned long flags) +{ + struct drm_i915_private *dev_priv; + struct intel_device_info *info; + int ret = 0, mmio_bar, mmio_size; + uint32_t aperture_size; + + info = (struct intel_device_info *) flags; + + /* Refuse to load on gen6+ without kms enabled. */ + if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + /* i915 has 4 more counters */ + dev->counters += 4; + dev->types[6] = _DRM_STAT_IRQ; + dev->types[7] = _DRM_STAT_PRIMARY; + dev->types[8] = _DRM_STAT_SECONDARY; + dev->types[9] = _DRM_STAT_DMA; + + dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); + if (dev_priv == NULL) + return -ENOMEM; + + dev->dev_private = (void *)dev_priv; + dev_priv->dev = dev; + dev_priv->info = info; + + spin_lock_init(&dev_priv->irq_lock); + spin_lock_init(&dev_priv->gpu_error.lock); + spin_lock_init(&dev_priv->rps.lock); + spin_lock_init(&dev_priv->gt_lock); + mutex_init(&dev_priv->dpio_lock); + mutex_init(&dev_priv->rps.hw_lock); + mutex_init(&dev_priv->modeset_restore_lock); + + i915_dump_device_info(dev_priv); + + if (i915_get_bridge_dev(dev)) { + ret = -EIO; + goto free_priv; + } + + mmio_bar = IS_GEN2(dev) ? 1 : 0; + /* Before gen4, the registers and the GTT are behind different BARs. + * However, from gen4 onwards, the registers and the GTT are shared + * in the same BAR, so we want to restrict this ioremap from + * clobbering the GTT which we want ioremap_wc instead. Fortunately, + * the register BAR remains the same size for all the earlier + * generations up to Ironlake. + */ + if (info->gen < 5) + mmio_size = 512*1024; + else + mmio_size = 2*1024*1024; + + dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); + if (!dev_priv->regs) { + DRM_ERROR("failed to map registers\n"); + ret = -EIO; + goto put_bridge; + } + + intel_early_sanitize_regs(dev); + + ret = i915_gem_gtt_init(dev); + if (ret) + goto put_bridge; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + i915_kick_out_firmware_fb(dev_priv); + + pci_set_master(dev->pdev); + + /* overlay on gen2 is broken and can't address above 1G */ + if (IS_GEN2(dev)) + dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); + + /* 965GM sometimes incorrectly writes to hardware status page (HWS) + * using 32bit addressing, overwriting memory if HWS is located + * above 4GB. + * + * The documentation also mentions an issue with undefined + * behaviour if any general state is accessed within a page above 4GB, + * which also needs to be handled carefully. + */ + if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) + dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); + + aperture_size = dev_priv->gtt.mappable_end; + + dev_priv->gtt.mappable = + io_mapping_create_wc(dev_priv->gtt.mappable_base, + aperture_size); + if (dev_priv->gtt.mappable == NULL) { + ret = -EIO; + goto out_rmmap; + } + + i915_mtrr_setup(dev_priv, dev_priv->gtt.mappable_base, + aperture_size); + + /* The i915 workqueue is primarily used for batched retirement of + * requests (and thus managing bo) once the task has been completed + * by the GPU. i915_gem_retire_requests() is called directly when we + * need high-priority retirement, such as waiting for an explicit + * bo. + * + * It is also used for periodic low-priority events, such as + * idle-timers and recording error state. + * + * All tasks on the workqueue are expected to acquire the dev mutex + * so there is no point in running more than one instance of the + * workqueue at any time. Use an ordered one. + */ + dev_priv->wq = alloc_ordered_workqueue("i915", 0); + if (dev_priv->wq == NULL) { + DRM_ERROR("Failed to create our workqueue.\n"); + ret = -ENOMEM; + goto out_mtrrfree; + } + + /* This must be called before any calls to HAS_PCH_* */ + intel_detect_pch(dev); + + intel_irq_init(dev); + intel_pm_init(dev); + intel_gt_sanitize(dev); + intel_gt_init(dev); + + /* Try to make sure MCHBAR is enabled before poking at it */ + intel_setup_mchbar(dev); + intel_setup_gmbus(dev); + intel_opregion_setup(dev); + + intel_setup_bios(dev); + + i915_gem_load(dev); + + /* On the 945G/GM, the chipset reports the MSI capability on the + * integrated graphics even though the support isn't actually there + * according to the published specs. It doesn't appear to function + * correctly in testing on 945G. + * This may be a side effect of MSI having been made available for PEG + * and the registers being closely associated. + * + * According to chipset errata, on the 965GM, MSI interrupts may + * be lost or delayed, but we use them anyways to avoid + * stuck interrupts on some machines. + */ + if (!IS_I945G(dev) && !IS_I945GM(dev)) + pci_enable_msi(dev->pdev); + + dev_priv->num_plane = 1; + if (IS_VALLEYVIEW(dev)) + dev_priv->num_plane = 2; + + if (INTEL_INFO(dev)->num_pipes) { + ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); + if (ret) + goto out_gem_unload; + } + + /* Start out suspended */ + dev_priv->mm.suspended = 1; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = i915_load_modeset_init(dev); + if (ret < 0) { + DRM_ERROR("failed to init modeset\n"); + goto out_gem_unload; + } + } + + i915_setup_sysfs(dev); + + if (INTEL_INFO(dev)->num_pipes) { + /* Must be done after probing outputs */ + intel_opregion_init(dev); + acpi_video_register(); + } + + if (IS_GEN5(dev)) + intel_gpu_ips_init(dev_priv); + + return 0; + +out_gem_unload: + if (dev_priv->mm.inactive_shrinker.shrink) + unregister_shrinker(&dev_priv->mm.inactive_shrinker); + + if (dev->pdev->msi_enabled) + pci_disable_msi(dev->pdev); + + intel_teardown_gmbus(dev); + intel_teardown_mchbar(dev); + destroy_workqueue(dev_priv->wq); +out_mtrrfree: + if (dev_priv->mm.gtt_mtrr >= 0) { + mtrr_del(dev_priv->mm.gtt_mtrr, + dev_priv->gtt.mappable_base, + aperture_size); + dev_priv->mm.gtt_mtrr = -1; + } + io_mapping_free(dev_priv->gtt.mappable); + dev_priv->gtt.gtt_remove(dev); +out_rmmap: + pci_iounmap(dev->pdev, dev_priv->regs); +put_bridge: + pci_dev_put(dev_priv->bridge_dev); +free_priv: + kfree(dev_priv); + return ret; +} + +int i915_driver_unload(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + intel_gpu_ips_teardown(); + + i915_teardown_sysfs(dev); + + if (dev_priv->mm.inactive_shrinker.shrink) + unregister_shrinker(&dev_priv->mm.inactive_shrinker); + + mutex_lock(&dev->struct_mutex); + ret = i915_gpu_idle(dev); + if (ret) + DRM_ERROR("failed to idle hardware: %d\n", ret); + i915_gem_retire_requests(dev); + mutex_unlock(&dev->struct_mutex); + + /* Cancel the retire work handler, which should be idle now. */ + cancel_delayed_work_sync(&dev_priv->mm.retire_work); + + io_mapping_free(dev_priv->gtt.mappable); + if (dev_priv->mm.gtt_mtrr >= 0) { + mtrr_del(dev_priv->mm.gtt_mtrr, + dev_priv->gtt.mappable_base, + dev_priv->gtt.mappable_end); + dev_priv->mm.gtt_mtrr = -1; + } + + acpi_video_unregister(); + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + intel_fbdev_fini(dev); + intel_modeset_cleanup(dev); + cancel_work_sync(&dev_priv->console_resume_work); + + /* + * free the memory space allocated for the child device + * config parsed from VBT + */ + if (dev_priv->child_dev && dev_priv->child_dev_num) { + kfree(dev_priv->child_dev); + dev_priv->child_dev = NULL; + dev_priv->child_dev_num = 0; + } + + vga_switcheroo_unregister_client(dev->pdev); + vga_client_register(dev->pdev, NULL, NULL, NULL); + } + + /* Free error state after interrupts are fully disabled. */ + del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); + cancel_work_sync(&dev_priv->gpu_error.work); + i915_destroy_error_state(dev); + + if (dev->pdev->msi_enabled) + pci_disable_msi(dev->pdev); + + intel_opregion_fini(dev); + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + /* Flush any outstanding unpin_work. */ + flush_workqueue(dev_priv->wq); + + mutex_lock(&dev->struct_mutex); + i915_gem_free_all_phys_object(dev); + i915_gem_cleanup_ringbuffer(dev); + i915_gem_context_fini(dev); + mutex_unlock(&dev->struct_mutex); + i915_gem_cleanup_aliasing_ppgtt(dev); + i915_gem_cleanup_stolen(dev); + + if (!I915_NEED_GFX_HWS(dev)) + i915_free_hws(dev); + } + + if (dev_priv->regs != NULL) + pci_iounmap(dev->pdev, dev_priv->regs); + + intel_teardown_gmbus(dev); + intel_teardown_mchbar(dev); + + destroy_workqueue(dev_priv->wq); + pm_qos_remove_request(&dev_priv->pm_qos); + + if (dev_priv->slab) + kmem_cache_destroy(dev_priv->slab); + + pci_dev_put(dev_priv->bridge_dev); + kfree(dev->dev_private); + + return 0; +} + +int i915_driver_open(struct drm_device *dev, struct drm_file *file) +{ + struct drm_i915_file_private *file_priv; + + DRM_DEBUG_DRIVER("\n"); + file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); + if (!file_priv) + return -ENOMEM; + + file->driver_priv = file_priv; + + spin_lock_init(&file_priv->mm.lock); + INIT_LIST_HEAD(&file_priv->mm.request_list); + + idr_init(&file_priv->context_idr); + + return 0; +} + +/** + * i915_driver_lastclose - clean up after all DRM clients have exited + * @dev: DRM device + * + * Take care of cleaning up after all DRM clients have exited. In the + * mode setting case, we want to restore the kernel's initial mode (just + * in case the last client left us in a bad state). + * + * Additionally, in the non-mode setting case, we'll tear down the GTT + * and DMA structures, since the kernel won't be using them, and clea + * up any GEM state. + */ +void i915_driver_lastclose(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + /* On gen6+ we refuse to init without kms enabled, but then the drm core + * goes right around and calls lastclose. Check for this and don't clean + * up anything. */ + if (!dev_priv) + return; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + intel_fb_restore_mode(dev); + vga_switcheroo_process_delayed_switch(); + return; + } + + i915_gem_lastclose(dev); + + i915_dma_cleanup(dev); +} + +void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) +{ + i915_gem_context_close(dev, file_priv); + i915_gem_release(dev, file_priv); +} + +void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + + kfree(file_priv); +} + +struct drm_ioctl_desc i915_ioctls[] = { + DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED), +}; + +int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); + +/* + * This is really ugly: Because old userspace abused the linux agp interface to + * manage the gtt, we need to claim that all intel devices are agp. For + * otherwise the drm core refuses to initialize the agp support code. + */ +int i915_driver_device_is_agp(struct drm_device * dev) +{ + return 1; +} diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c new file mode 100644 index 0000000..8da4767 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -0,0 +1,1344 @@ +/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- + */ +/* + * + * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include +#include "i915_drv.h" +#include "i915_trace.h" +#include "intel_drv.h" + +#include +#include +#include + +static int i915_modeset __read_mostly = -1; +module_param_named(modeset, i915_modeset, int, 0400); +MODULE_PARM_DESC(modeset, + "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, " + "1=on, -1=force vga console preference [default])"); + +unsigned int i915_fbpercrtc __always_unused = 0; +module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); + +int i915_panel_ignore_lid __read_mostly = 1; +module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); +MODULE_PARM_DESC(panel_ignore_lid, + "Override lid status (0=autodetect, 1=autodetect disabled [default], " + "-1=force lid closed, -2=force lid open)"); + +unsigned int i915_powersave __read_mostly = 1; +module_param_named(powersave, i915_powersave, int, 0600); +MODULE_PARM_DESC(powersave, + "Enable powersavings, fbc, downclocking, etc. (default: true)"); + +int i915_semaphores __read_mostly = -1; +module_param_named(semaphores, i915_semaphores, int, 0600); +MODULE_PARM_DESC(semaphores, + "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))"); + +int i915_enable_rc6 __read_mostly = -1; +module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400); +MODULE_PARM_DESC(i915_enable_rc6, + "Enable power-saving render C-state 6. " + "Different stages can be selected via bitmask values " + "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). " + "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " + "default: -1 (use per-chip default)"); + +int i915_enable_fbc __read_mostly = -1; +module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); +MODULE_PARM_DESC(i915_enable_fbc, + "Enable frame buffer compression for power savings " + "(default: -1 (use per-chip default))"); + +unsigned int i915_lvds_downclock __read_mostly = 0; +module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); +MODULE_PARM_DESC(lvds_downclock, + "Use panel (LVDS/eDP) downclocking for power savings " + "(default: false)"); + +int i915_lvds_channel_mode __read_mostly; +module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600); +MODULE_PARM_DESC(lvds_channel_mode, + "Specify LVDS channel mode " + "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); + +int i915_panel_use_ssc __read_mostly = -1; +module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); +MODULE_PARM_DESC(lvds_use_ssc, + "Use Spread Spectrum Clock with panels [LVDS/eDP] " + "(default: auto from VBT)"); + +int i915_vbt_sdvo_panel_type __read_mostly = -1; +module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); +MODULE_PARM_DESC(vbt_sdvo_panel_type, + "Override/Ignore selection of SDVO panel mode in the VBT " + "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); + +static bool i915_try_reset __read_mostly = true; +module_param_named(reset, i915_try_reset, bool, 0600); +MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); + +bool i915_enable_hangcheck __read_mostly = true; +module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644); +MODULE_PARM_DESC(enable_hangcheck, + "Periodically check GPU activity for detecting hangs. " + "WARNING: Disabling this can cause system wide hangs. " + "(default: true)"); + +int i915_enable_ppgtt __read_mostly = -1; +module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600); +MODULE_PARM_DESC(i915_enable_ppgtt, + "Enable PPGTT (default: true)"); + +unsigned int i915_preliminary_hw_support __read_mostly = 0; +module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); +MODULE_PARM_DESC(preliminary_hw_support, + "Enable preliminary hardware support. (default: false)"); + +int i915_disable_power_well __read_mostly = 0; +module_param_named(disable_power_well, i915_disable_power_well, int, 0600); +MODULE_PARM_DESC(disable_power_well, + "Disable the power well when possible (default: false)"); + +static struct drm_driver driver; +extern int intel_agp_enabled; + +#define INTEL_VGA_DEVICE(id, info) { \ + .class = PCI_BASE_CLASS_DISPLAY << 16, \ + .class_mask = 0xff0000, \ + .vendor = 0x8086, \ + .device = id, \ + .subvendor = PCI_ANY_ID, \ + .subdevice = PCI_ANY_ID, \ + .driver_data = (unsigned long) info } + +#define INTEL_QUANTA_VGA_DEVICE(info) { \ + .class = PCI_BASE_CLASS_DISPLAY << 16, \ + .class_mask = 0xff0000, \ + .vendor = 0x8086, \ + .device = 0x16a, \ + .subvendor = 0x152d, \ + .subdevice = 0x8990, \ + .driver_data = (unsigned long) info } + + +static const struct intel_device_info intel_i830_info = { + .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, + .has_overlay = 1, .overlay_needs_physical = 1, +}; + +static const struct intel_device_info intel_845g_info = { + .gen = 2, .num_pipes = 1, + .has_overlay = 1, .overlay_needs_physical = 1, +}; + +static const struct intel_device_info intel_i85x_info = { + .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, + .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, +}; + +static const struct intel_device_info intel_i865g_info = { + .gen = 2, .num_pipes = 1, + .has_overlay = 1, .overlay_needs_physical = 1, +}; + +static const struct intel_device_info intel_i915g_info = { + .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, + .has_overlay = 1, .overlay_needs_physical = 1, +}; +static const struct intel_device_info intel_i915gm_info = { + .gen = 3, .is_mobile = 1, .num_pipes = 2, + .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, + .supports_tv = 1, +}; +static const struct intel_device_info intel_i945g_info = { + .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, + .has_overlay = 1, .overlay_needs_physical = 1, +}; +static const struct intel_device_info intel_i945gm_info = { + .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, + .has_hotplug = 1, .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, + .supports_tv = 1, +}; + +static const struct intel_device_info intel_i965g_info = { + .gen = 4, .is_broadwater = 1, .num_pipes = 2, + .has_hotplug = 1, + .has_overlay = 1, +}; + +static const struct intel_device_info intel_i965gm_info = { + .gen = 4, .is_crestline = 1, .num_pipes = 2, + .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, + .has_overlay = 1, + .supports_tv = 1, +}; + +static const struct intel_device_info intel_g33_info = { + .gen = 3, .is_g33 = 1, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_overlay = 1, +}; + +static const struct intel_device_info intel_g45_info = { + .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, + .has_pipe_cxsr = 1, .has_hotplug = 1, + .has_bsd_ring = 1, +}; + +static const struct intel_device_info intel_gm45_info = { + .gen = 4, .is_g4x = 1, .num_pipes = 2, + .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, + .has_pipe_cxsr = 1, .has_hotplug = 1, + .supports_tv = 1, + .has_bsd_ring = 1, +}; + +static const struct intel_device_info intel_pineview_info = { + .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_overlay = 1, +}; + +static const struct intel_device_info intel_ironlake_d_info = { + .gen = 5, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_bsd_ring = 1, +}; + +static const struct intel_device_info intel_ironlake_m_info = { + .gen = 5, .is_mobile = 1, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_fbc = 1, + .has_bsd_ring = 1, +}; + +static const struct intel_device_info intel_sandybridge_d_info = { + .gen = 6, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_bsd_ring = 1, + .has_blt_ring = 1, + .has_llc = 1, + .has_force_wake = 1, +}; + +static const struct intel_device_info intel_sandybridge_m_info = { + .gen = 6, .is_mobile = 1, .num_pipes = 2, + .need_gfx_hws = 1, .has_hotplug = 1, + .has_fbc = 1, + .has_bsd_ring = 1, + .has_blt_ring = 1, + .has_llc = 1, + .has_force_wake = 1, +}; + +#define GEN7_FEATURES \ + .gen = 7, .num_pipes = 3, \ + .need_gfx_hws = 1, .has_hotplug = 1, \ + .has_bsd_ring = 1, \ + .has_blt_ring = 1, \ + .has_llc = 1, \ + .has_force_wake = 1 + +static const struct intel_device_info intel_ivybridge_d_info = { + GEN7_FEATURES, + .is_ivybridge = 1, +}; + +static const struct intel_device_info intel_ivybridge_m_info = { + GEN7_FEATURES, + .is_ivybridge = 1, + .is_mobile = 1, +}; + +static const struct intel_device_info intel_ivybridge_q_info = { + GEN7_FEATURES, + .is_ivybridge = 1, + .num_pipes = 0, /* legal, last one wins */ +}; + +static const struct intel_device_info intel_valleyview_m_info = { + GEN7_FEATURES, + .is_mobile = 1, + .num_pipes = 2, + .is_valleyview = 1, + .display_mmio_offset = VLV_DISPLAY_BASE, + .has_llc = 0, /* legal, last one wins */ +}; + +static const struct intel_device_info intel_valleyview_d_info = { + GEN7_FEATURES, + .num_pipes = 2, + .is_valleyview = 1, + .display_mmio_offset = VLV_DISPLAY_BASE, + .has_llc = 0, /* legal, last one wins */ +}; + +static const struct intel_device_info intel_haswell_d_info = { + GEN7_FEATURES, + .is_haswell = 1, +}; + +static const struct intel_device_info intel_haswell_m_info = { + GEN7_FEATURES, + .is_haswell = 1, + .is_mobile = 1, +}; + +static const struct pci_device_id pciidlist[] = { /* aka */ + INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ + INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ + INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */ + INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), + INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */ + INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */ + INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */ + INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */ + INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */ + INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */ + INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */ + INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */ + INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */ + INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */ + INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */ + INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */ + INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */ + INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */ + INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */ + INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */ + INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */ + INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */ + INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */ + INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ + INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ + INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ + INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */ + INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), + INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), + INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), + INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), + INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), + INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), + INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), + INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), + INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), + INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), + INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), + INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */ + INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */ + INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ + INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ + INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ + INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */ + INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ + INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ + INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ + INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */ + INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */ + INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */ + INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */ + INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ + INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ + INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */ + INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */ + INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */ + INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */ + INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */ + INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */ + INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */ + INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */ + INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */ + INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */ + INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */ + INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */ + INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */ + INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */ + INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */ + INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */ + INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */ + INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */ + INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */ + INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */ + INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */ + INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */ + INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */ + INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */ + INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */ + INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */ + INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */ + INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */ + INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ + INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ + INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */ + INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */ + INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */ + INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */ + INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */ + INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */ + INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */ + INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */ + INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */ + INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */ + INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */ + INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */ + INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */ + INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */ + INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */ + INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */ + INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */ + INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */ + INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */ + INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */ + INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */ + INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */ + INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), + INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info), + INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info), + INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info), + INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), + INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), + {0, 0, 0} +}; + +#if defined(CPTCFG_DRM_I915_KMS) +MODULE_DEVICE_TABLE(pci, pciidlist); +#endif + +void intel_detect_pch(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct pci_dev *pch; + + /* In all current cases, num_pipes is equivalent to the PCH_NOP setting + * (which really amounts to a PCH but no South Display). + */ + if (INTEL_INFO(dev)->num_pipes == 0) { + dev_priv->pch_type = PCH_NOP; + dev_priv->num_pch_pll = 0; + return; + } + + /* + * The reason to probe ISA bridge instead of Dev31:Fun0 is to + * make graphics device passthrough work easy for VMM, that only + * need to expose ISA bridge to let driver know the real hardware + * underneath. This is a requirement from virtualization team. + */ + pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); + if (pch) { + if (pch->vendor == PCI_VENDOR_ID_INTEL) { + unsigned short id; + id = pch->device & INTEL_PCH_DEVICE_ID_MASK; + dev_priv->pch_id = id; + + if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { + dev_priv->pch_type = PCH_IBX; + dev_priv->num_pch_pll = 2; + DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); + WARN_ON(!IS_GEN5(dev)); + } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { + dev_priv->pch_type = PCH_CPT; + dev_priv->num_pch_pll = 2; + DRM_DEBUG_KMS("Found CougarPoint PCH\n"); + WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); + } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { + /* PantherPoint is CPT compatible */ + dev_priv->pch_type = PCH_CPT; + dev_priv->num_pch_pll = 2; + DRM_DEBUG_KMS("Found PatherPoint PCH\n"); + WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); + } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { + dev_priv->pch_type = PCH_LPT; + dev_priv->num_pch_pll = 0; + DRM_DEBUG_KMS("Found LynxPoint PCH\n"); + WARN_ON(!IS_HASWELL(dev)); + WARN_ON(IS_ULT(dev)); + } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { + dev_priv->pch_type = PCH_LPT; + dev_priv->num_pch_pll = 0; + DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); + WARN_ON(!IS_HASWELL(dev)); + WARN_ON(!IS_ULT(dev)); + } + BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); + } + pci_dev_put(pch); + } +} + +bool i915_semaphore_is_enabled(struct drm_device *dev) +{ + if (INTEL_INFO(dev)->gen < 6) + return 0; + + if (i915_semaphores >= 0) + return i915_semaphores; + +#ifdef CONFIG_INTEL_IOMMU + /* Enable semaphores on SNB when IO remapping is off */ + if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) + return false; +#endif + + return 1; +} + +static int i915_drm_freeze(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + + /* ignore lid events during suspend */ + mutex_lock(&dev_priv->modeset_restore_lock); + dev_priv->modeset_restore = MODESET_SUSPENDED; + mutex_unlock(&dev_priv->modeset_restore_lock); + + intel_set_power_well(dev, true); + + drm_kms_helper_poll_disable(dev); + + pci_save_state(dev->pdev); + + /* If KMS is active, we do the leavevt stuff here */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + int error = i915_gem_idle(dev); + if (error) { + dev_err(&dev->pdev->dev, + "GEM idle failed, resume might fail\n"); + return error; + } + + cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); + + drm_irq_uninstall(dev); + dev_priv->enable_hotplug_processing = false; + /* + * Disable CRTCs directly since we want to preserve sw state + * for _thaw. + */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + dev_priv->display.crtc_disable(crtc); + } + + i915_save_state(dev); + + intel_opregion_fini(dev); + + console_lock(); + intel_fbdev_set_suspend(dev, 1); + console_unlock(); + + return 0; +} + +int i915_suspend(struct drm_device *dev, pm_message_t state) +{ + int error; + + if (!dev || !dev->dev_private) { + DRM_ERROR("dev: %p\n", dev); + DRM_ERROR("DRM not initialized, aborting suspend.\n"); + return -ENODEV; + } + + if (state.event == PM_EVENT_PRETHAW) + return 0; + + + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + + error = i915_drm_freeze(dev); + if (error) + return error; + + if (state.event == PM_EVENT_SUSPEND) { + /* Shut down the device */ + pci_disable_device(dev->pdev); + pci_set_power_state(dev->pdev, PCI_D3hot); + } + + return 0; +} + +void intel_console_resume(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, + console_resume_work); + struct drm_device *dev = dev_priv->dev; + + console_lock(); + intel_fbdev_set_suspend(dev, 0); + console_unlock(); +} + +static void intel_resume_hotplug(struct drm_device *dev) +{ + struct drm_mode_config *mode_config = &dev->mode_config; + struct intel_encoder *encoder; + + mutex_lock(&mode_config->mutex); + DRM_DEBUG_KMS("running encoder hotplug functions\n"); + + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) + if (encoder->hot_plug) + encoder->hot_plug(encoder); + + mutex_unlock(&mode_config->mutex); + + /* Just fire off a uevent and let userspace tell us what to do */ + drm_helper_hpd_irq_event(dev); +} + +static int __i915_drm_thaw(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int error = 0; + + i915_restore_state(dev); + intel_opregion_setup(dev); + + /* KMS EnterVT equivalent */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + intel_init_pch_refclk(dev); + + mutex_lock(&dev->struct_mutex); + dev_priv->mm.suspended = 0; + + error = i915_gem_init_hw(dev); + mutex_unlock(&dev->struct_mutex); + + /* We need working interrupts for modeset enabling ... */ + drm_irq_install(dev); + + intel_modeset_init_hw(dev); + + drm_modeset_lock_all(dev); + intel_modeset_setup_hw_state(dev, true); + drm_modeset_unlock_all(dev); + + /* + * ... but also need to make sure that hotplug processing + * doesn't cause havoc. Like in the driver load code we don't + * bother with the tiny race here where we might loose hotplug + * notifications. + * */ + intel_hpd_init(dev); + dev_priv->enable_hotplug_processing = true; + /* Config may have changed between suspend and resume */ + intel_resume_hotplug(dev); + } + + intel_opregion_init(dev); + + /* + * The console lock can be pretty contented on resume due + * to all the printk activity. Try to keep it out of the hot + * path of resume if possible. + */ + if (console_trylock()) { + intel_fbdev_set_suspend(dev, 0); + console_unlock(); + } else { + schedule_work(&dev_priv->console_resume_work); + } + + mutex_lock(&dev_priv->modeset_restore_lock); + dev_priv->modeset_restore = MODESET_DONE; + mutex_unlock(&dev_priv->modeset_restore_lock); + return error; +} + +static int i915_drm_thaw(struct drm_device *dev) +{ + int error = 0; + + intel_gt_sanitize(dev); + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + mutex_lock(&dev->struct_mutex); + i915_gem_restore_gtt_mappings(dev); + mutex_unlock(&dev->struct_mutex); + } + + __i915_drm_thaw(dev); + + return error; +} + +int i915_resume(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + + if (pci_enable_device(dev->pdev)) + return -EIO; + + pci_set_master(dev->pdev); + + intel_gt_sanitize(dev); + + /* + * Platforms with opregion should have sane BIOS, older ones (gen3 and + * earlier) need this since the BIOS might clear all our scratch PTEs. + */ + if (drm_core_check_feature(dev, DRIVER_MODESET) && + !dev_priv->opregion.header) { + mutex_lock(&dev->struct_mutex); + i915_gem_restore_gtt_mappings(dev); + mutex_unlock(&dev->struct_mutex); + } + + ret = __i915_drm_thaw(dev); + if (ret) + return ret; + + drm_kms_helper_poll_enable(dev); + return 0; +} + +static int i8xx_do_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_I85X(dev)) + return -ENODEV; + + I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); + POSTING_READ(D_STATE); + + if (IS_I830(dev) || IS_845G(dev)) { + I915_WRITE(DEBUG_RESET_I830, + DEBUG_RESET_DISPLAY | + DEBUG_RESET_RENDER | + DEBUG_RESET_FULL); + POSTING_READ(DEBUG_RESET_I830); + msleep(1); + + I915_WRITE(DEBUG_RESET_I830, 0); + POSTING_READ(DEBUG_RESET_I830); + } + + msleep(1); + + I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); + POSTING_READ(D_STATE); + + return 0; +} + +static int i965_reset_complete(struct drm_device *dev) +{ + u8 gdrst; + pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); + return (gdrst & GRDOM_RESET_ENABLE) == 0; +} + +static int i965_do_reset(struct drm_device *dev) +{ + int ret; + u8 gdrst; + + /* + * Set the domains we want to reset (GRDOM/bits 2 and 3) as + * well as the reset bit (GR/bit 0). Setting the GR bit + * triggers the reset; when done, the hardware will clear it. + */ + pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); + pci_write_config_byte(dev->pdev, I965_GDRST, + gdrst | GRDOM_RENDER | + GRDOM_RESET_ENABLE); + ret = wait_for(i965_reset_complete(dev), 500); + if (ret) + return ret; + + /* We can't reset render&media without also resetting display ... */ + pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); + pci_write_config_byte(dev->pdev, I965_GDRST, + gdrst | GRDOM_MEDIA | + GRDOM_RESET_ENABLE); + + return wait_for(i965_reset_complete(dev), 500); +} + +static int ironlake_do_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 gdrst; + int ret; + + gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); + gdrst &= ~GRDOM_MASK; + I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, + gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); + ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); + if (ret) + return ret; + + /* We can't reset render&media without also resetting display ... */ + gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); + gdrst &= ~GRDOM_MASK; + I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, + gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE); + return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); +} + +static int gen6_do_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + unsigned long irqflags; + + /* Hold gt_lock across reset to prevent any register access + * with forcewake not set correctly + */ + spin_lock_irqsave(&dev_priv->gt_lock, irqflags); + + /* Reset the chip */ + + /* GEN6_GDRST is not in the gt power well, no need to check + * for fifo space for the write or forcewake the chip for + * the read + */ + I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL); + + /* Spin waiting for the device to ack the reset request */ + ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); + + /* If reset with a user forcewake, try to restore, otherwise turn it off */ + if (dev_priv->forcewake_count) + dev_priv->gt.force_wake_get(dev_priv); + else + dev_priv->gt.force_wake_put(dev_priv); + + /* Restore fifo count */ + dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + + spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); + return ret; +} + +int intel_gpu_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret = -ENODEV; + + switch (INTEL_INFO(dev)->gen) { + case 7: + case 6: + ret = gen6_do_reset(dev); + break; + case 5: + ret = ironlake_do_reset(dev); + break; + case 4: + ret = i965_do_reset(dev); + break; + case 2: + ret = i8xx_do_reset(dev); + break; + } + + /* Also reset the gpu hangman. */ + if (dev_priv->gpu_error.stop_rings) { + DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); + dev_priv->gpu_error.stop_rings = 0; + if (ret == -ENODEV) { + DRM_ERROR("Reset not implemented, but ignoring " + "error for simulated gpu hangs\n"); + ret = 0; + } + } + + return ret; +} + +/** + * i915_reset - reset chip after a hang + * @dev: drm device to reset + * + * Reset the chip. Useful if a hang is detected. Returns zero on successful + * reset or otherwise an error code. + * + * Procedure is fairly simple: + * - reset the chip using the reset reg + * - re-init context state + * - re-init hardware status page + * - re-init ring buffer + * - re-init interrupt state + * - re-init display + */ +int i915_reset(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + if (!i915_try_reset) + return 0; + + mutex_lock(&dev->struct_mutex); + + i915_gem_reset(dev); + + ret = -ENODEV; + if (get_seconds() - dev_priv->gpu_error.last_reset < 5) + DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); + else + ret = intel_gpu_reset(dev); + + dev_priv->gpu_error.last_reset = get_seconds(); + if (ret) { + DRM_ERROR("Failed to reset chip.\n"); + mutex_unlock(&dev->struct_mutex); + return ret; + } + + /* Ok, now get things going again... */ + + /* + * Everything depends on having the GTT running, so we need to start + * there. Fortunately we don't need to do this unless we reset the + * chip at a PCI level. + * + * Next we need to restore the context, but we don't use those + * yet either... + * + * Ring buffer needs to be re-initialized in the KMS case, or if X + * was running at the time of the reset (i.e. we weren't VT + * switched away). + */ + if (drm_core_check_feature(dev, DRIVER_MODESET) || + !dev_priv->mm.suspended) { + struct intel_ring_buffer *ring; + int i; + + dev_priv->mm.suspended = 0; + + i915_gem_init_swizzling(dev); + + for_each_ring(ring, dev_priv, i) + ring->init(ring); + + i915_gem_context_init(dev); + if (dev_priv->mm.aliasing_ppgtt) { + ret = dev_priv->mm.aliasing_ppgtt->enable(dev); + if (ret) + i915_gem_cleanup_aliasing_ppgtt(dev); + } + + /* + * It would make sense to re-init all the other hw state, at + * least the rps/rc6/emon init done within modeset_init_hw. For + * some unknown reason, this blows up my ilk, so don't. + */ + + mutex_unlock(&dev->struct_mutex); + + drm_irq_uninstall(dev); + drm_irq_install(dev); + intel_hpd_init(dev); + } else { + mutex_unlock(&dev->struct_mutex); + } + + return 0; +} + +static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct intel_device_info *intel_info = + (struct intel_device_info *) ent->driver_data; + + if (intel_info->is_valleyview) + if(!i915_preliminary_hw_support) { + DRM_ERROR("Preliminary hardware support disabled\n"); + return -ENODEV; + } + + /* Only bind to function 0 of the device. Early generations + * used function 1 as a placeholder for multi-head. This causes + * us confusion instead, especially on the systems where both + * functions have the same PCI-ID! + */ + if (PCI_FUNC(pdev->devfn)) + return -ENODEV; + + /* We've managed to ship a kms-enabled ddx that shipped with an XvMC + * implementation for gen3 (and only gen3) that used legacy drm maps + * (gasp!) to share buffers between X and the client. Hence we need to + * keep around the fake agp stuff for gen3, even when kms is enabled. */ + if (intel_info->gen != 3) { + driver.driver_features &= + ~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP); + } else if (!intel_agp_enabled) { + DRM_ERROR("drm/i915 can't work without intel_agp module!\n"); + return -ENODEV; + } + + return drm_get_pci_dev(pdev, ent, &driver); +} + +static void +i915_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + drm_put_dev(dev); +} + +static int i915_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + int error; + + if (!drm_dev || !drm_dev->dev_private) { + dev_err(dev, "DRM not initialized, aborting suspend.\n"); + return -ENODEV; + } + + if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + + error = i915_drm_freeze(drm_dev); + if (error) + return error; + + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int i915_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + return i915_resume(drm_dev); +} + +static int i915_pm_freeze(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + if (!drm_dev || !drm_dev->dev_private) { + dev_err(dev, "DRM not initialized, aborting suspend.\n"); + return -ENODEV; + } + + return i915_drm_freeze(drm_dev); +} + +static int i915_pm_thaw(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + return i915_drm_thaw(drm_dev); +} + +static int i915_pm_poweroff(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + return i915_drm_freeze(drm_dev); +} + +static const struct dev_pm_ops i915_pm_ops = { + .suspend = i915_pm_suspend, + .resume = i915_pm_resume, + .freeze = i915_pm_freeze, + .thaw = i915_pm_thaw, + .poweroff = i915_pm_poweroff, + .restore = i915_pm_resume, +}; + +static const struct vm_operations_struct i915_gem_vm_ops = { + .fault = i915_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct file_operations i915_driver_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .mmap = drm_gem_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + .read = drm_read, +#ifdef CONFIG_COMPAT + .compat_ioctl = i915_compat_ioctl, +#endif + .llseek = noop_llseek, +}; + +static struct drm_driver driver = { + /* Don't use MTRRs here; the Xserver or userspace app should + * deal with them for Intel hardware. + */ + .driver_features = + DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME, + .load = i915_driver_load, + .unload = i915_driver_unload, + .open = i915_driver_open, + .lastclose = i915_driver_lastclose, + .preclose = i915_driver_preclose, + .postclose = i915_driver_postclose, + + /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ + .suspend = i915_suspend, + .resume = i915_resume, + + .device_is_agp = i915_driver_device_is_agp, + .master_create = i915_master_create, + .master_destroy = i915_master_destroy, +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = i915_debugfs_init, + .debugfs_cleanup = i915_debugfs_cleanup, +#endif + .gem_init_object = i915_gem_init_object, + .gem_free_object = i915_gem_free_object, + .gem_vm_ops = &i915_gem_vm_ops, + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = i915_gem_prime_export, + .gem_prime_import = i915_gem_prime_import, + + .dumb_create = i915_gem_dumb_create, + .dumb_map_offset = i915_gem_mmap_gtt, + .dumb_destroy = i915_gem_dumb_destroy, + .ioctls = i915_ioctls, + .fops = &i915_driver_fops, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static struct pci_driver i915_pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = i915_pci_probe, + .remove = i915_pci_remove, + .driver.pm = &i915_pm_ops, +}; + +static int __init i915_init(void) +{ + driver.num_ioctls = i915_max_ioctl; + + /* + * If CPTCFG_DRM_I915_KMS is set, default to KMS unless + * explicitly disabled with the module pararmeter. + * + * Otherwise, just follow the parameter (defaulting to off). + * + * Allow optional vga_text_mode_force boot option to override + * the default behavior. + */ +#if defined(CPTCFG_DRM_I915_KMS) + if (i915_modeset != 0) + driver.driver_features |= DRIVER_MODESET; +#endif + if (i915_modeset == 1) + driver.driver_features |= DRIVER_MODESET; + +#ifdef CONFIG_VGA_CONSOLE + if (vgacon_text_force() && i915_modeset == -1) + driver.driver_features &= ~DRIVER_MODESET; +#endif + + if (!(driver.driver_features & DRIVER_MODESET)) + driver.get_vblank_timestamp = NULL; + + return drm_pci_init(&driver, &i915_pci_driver); +} + +static void __exit i915_exit(void) +{ + drm_pci_exit(&driver, &i915_pci_driver); +} + +module_init(i915_init); +module_exit(i915_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); + +/* We give fast paths for the really cool registers */ +#define NEEDS_FORCE_WAKE(dev_priv, reg) \ + ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ + ((reg) < 0x40000) && \ + ((reg) != FORCEWAKE)) +static void +ilk_dummy_write(struct drm_i915_private *dev_priv) +{ + /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the + * chip from rc6 before touching it for real. MI_MODE is masked, hence + * harmless to write 0 into. */ + I915_WRITE_NOTRACE(MI_MODE, 0); +} + +static void +hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) +{ + if (IS_HASWELL(dev_priv->dev) && + (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { + DRM_ERROR("Unknown unclaimed register before writing to %x\n", + reg); + I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + } +} + +static void +hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) +{ + if (IS_HASWELL(dev_priv->dev) && + (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { + DRM_ERROR("Unclaimed write to %x\n", reg); + I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + } +} + +#define __i915_read(x, y) \ +u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ + unsigned long irqflags; \ + u##x val = 0; \ + spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ + if (IS_GEN5(dev_priv->dev)) \ + ilk_dummy_write(dev_priv); \ + if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ + if (dev_priv->forcewake_count == 0) \ + dev_priv->gt.force_wake_get(dev_priv); \ + val = read##y(dev_priv->regs + reg); \ + if (dev_priv->forcewake_count == 0) \ + dev_priv->gt.force_wake_put(dev_priv); \ + } else { \ + val = read##y(dev_priv->regs + reg); \ + } \ + spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ + trace_i915_reg_rw(false, reg, val, sizeof(val)); \ + return val; \ +} + +__i915_read(8, b) +__i915_read(16, w) +__i915_read(32, l) +__i915_read(64, q) +#undef __i915_read + +#define __i915_write(x, y) \ +void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ + unsigned long irqflags; \ + u32 __fifo_ret = 0; \ + trace_i915_reg_rw(true, reg, val, sizeof(val)); \ + spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ + if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ + __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ + } \ + if (IS_GEN5(dev_priv->dev)) \ + ilk_dummy_write(dev_priv); \ + hsw_unclaimed_reg_clear(dev_priv, reg); \ + write##y(val, dev_priv->regs + reg); \ + if (unlikely(__fifo_ret)) { \ + gen6_gt_check_fifodbg(dev_priv); \ + } \ + hsw_unclaimed_reg_check(dev_priv, reg); \ + spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ +} +__i915_write(8, b) +__i915_write(16, w) +__i915_write(32, l) +__i915_write(64, q) +#undef __i915_write + +static const struct register_whitelist { + uint64_t offset; + uint32_t size; + uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ +} whitelist[] = { + { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 }, +}; + +int i915_reg_read_ioctl(struct drm_device *dev, + void *data, struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_reg_read *reg = data; + struct register_whitelist const *entry = whitelist; + int i; + + for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { + if (entry->offset == reg->offset && + (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) + break; + } + + if (i == ARRAY_SIZE(whitelist)) + return -EINVAL; + + switch (entry->size) { + case 8: + reg->val = I915_READ64(reg->offset); + break; + case 4: + reg->val = I915_READ(reg->offset); + break; + case 2: + reg->val = I915_READ16(reg->offset); + break; + case 1: + reg->val = I915_READ8(reg->offset); + break; + default: + WARN_ON(1); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h new file mode 100644 index 0000000..47d8b68 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -0,0 +1,1965 @@ +/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- + */ +/* + * + * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _I915_DRV_H_ +#define _I915_DRV_H_ + +#include + +#include "i915_reg.h" +#include "intel_bios.h" +#include "intel_ringbuffer.h" +#include +#include +#include +#include +#include +#include +#include +#include + +/* General customization: + */ + +#define DRIVER_AUTHOR "Tungsten Graphics, Inc." + +#define DRIVER_NAME "i915" +#define DRIVER_DESC "Intel Graphics" +#define DRIVER_DATE "20080730" + +enum pipe { + PIPE_A = 0, + PIPE_B, + PIPE_C, + I915_MAX_PIPES +}; +#define pipe_name(p) ((p) + 'A') + +enum transcoder { + TRANSCODER_A = 0, + TRANSCODER_B, + TRANSCODER_C, + TRANSCODER_EDP = 0xF, +}; +#define transcoder_name(t) ((t) + 'A') + +enum plane { + PLANE_A = 0, + PLANE_B, + PLANE_C, +}; +#define plane_name(p) ((p) + 'A') + +enum port { + PORT_A = 0, + PORT_B, + PORT_C, + PORT_D, + PORT_E, + I915_MAX_PORTS +}; +#define port_name(p) ((p) + 'A') + +enum hpd_pin { + HPD_NONE = 0, + HPD_PORT_A = HPD_NONE, /* PORT_A is internal */ + HPD_TV = HPD_NONE, /* TV is known to be unreliable */ + HPD_CRT, + HPD_SDVO_B, + HPD_SDVO_C, + HPD_PORT_B, + HPD_PORT_C, + HPD_PORT_D, + HPD_NUM_PINS +}; + +#define I915_GEM_GPU_DOMAINS \ + (I915_GEM_DOMAIN_RENDER | \ + I915_GEM_DOMAIN_SAMPLER | \ + I915_GEM_DOMAIN_COMMAND | \ + I915_GEM_DOMAIN_INSTRUCTION | \ + I915_GEM_DOMAIN_VERTEX) + +#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++) + +#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ + list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ + if ((intel_encoder)->base.crtc == (__crtc)) + +struct intel_pch_pll { + int refcount; /* count of number of CRTCs sharing this PLL */ + int active; /* count of number of active CRTCs (i.e. DPMS on) */ + bool on; /* is the PLL actually active? Disabled during modeset */ + int pll_reg; + int fp0_reg; + int fp1_reg; +}; +#define I915_NUM_PLLS 2 + +/* Used by dp and fdi links */ +struct intel_link_m_n { + uint32_t tu; + uint32_t gmch_m; + uint32_t gmch_n; + uint32_t link_m; + uint32_t link_n; +}; + +void intel_link_compute_m_n(int bpp, int nlanes, + int pixel_clock, int link_clock, + struct intel_link_m_n *m_n); + +struct intel_ddi_plls { + int spll_refcount; + int wrpll1_refcount; + int wrpll2_refcount; +}; + +/* Interface history: + * + * 1.1: Original. + * 1.2: Add Power Management + * 1.3: Add vblank support + * 1.4: Fix cmdbuffer path, add heap destroy + * 1.5: Add vblank pipe configuration + * 1.6: - New ioctl for scheduling buffer swaps on vertical blank + * - Support vertical blank on secondary display pipe + */ +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 6 +#define DRIVER_PATCHLEVEL 0 + +#define WATCH_COHERENCY 0 +#define WATCH_LISTS 0 +#define WATCH_GTT 0 + +#define I915_GEM_PHYS_CURSOR_0 1 +#define I915_GEM_PHYS_CURSOR_1 2 +#define I915_GEM_PHYS_OVERLAY_REGS 3 +#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) + +struct drm_i915_gem_phys_object { + int id; + struct page **page_list; + drm_dma_handle_t *handle; + struct drm_i915_gem_object *cur_obj; +}; + +struct opregion_header; +struct opregion_acpi; +struct opregion_swsci; +struct opregion_asle; +struct drm_i915_private; + +struct intel_opregion { + struct opregion_header __iomem *header; + struct opregion_acpi __iomem *acpi; + struct opregion_swsci __iomem *swsci; + struct opregion_asle __iomem *asle; + void __iomem *vbt; + u32 __iomem *lid_state; +}; +#define OPREGION_SIZE (8*1024) + +struct intel_overlay; +struct intel_overlay_error_state; + +struct drm_i915_master_private { + drm_local_map_t *sarea; + struct _drm_i915_sarea *sarea_priv; +}; +#define I915_FENCE_REG_NONE -1 +#define I915_MAX_NUM_FENCES 32 +/* 32 fences + sign bit for FENCE_REG_NONE */ +#define I915_MAX_NUM_FENCE_BITS 6 + +struct drm_i915_fence_reg { + struct list_head lru_list; + struct drm_i915_gem_object *obj; + int pin_count; +}; + +struct sdvo_device_mapping { + u8 initialized; + u8 dvo_port; + u8 slave_addr; + u8 dvo_wiring; + u8 i2c_pin; + u8 ddc_pin; +}; + +struct intel_display_error_state; + +struct drm_i915_error_state { + struct kref ref; + u32 eir; + u32 pgtbl_er; + u32 ier; + u32 ccid; + u32 derrmr; + u32 forcewake; + bool waiting[I915_NUM_RINGS]; + u32 pipestat[I915_MAX_PIPES]; + u32 tail[I915_NUM_RINGS]; + u32 head[I915_NUM_RINGS]; + u32 ctl[I915_NUM_RINGS]; + u32 ipeir[I915_NUM_RINGS]; + u32 ipehr[I915_NUM_RINGS]; + u32 instdone[I915_NUM_RINGS]; + u32 acthd[I915_NUM_RINGS]; + u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; + u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1]; + u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ + /* our own tracking of ring head and tail */ + u32 cpu_ring_head[I915_NUM_RINGS]; + u32 cpu_ring_tail[I915_NUM_RINGS]; + u32 error; /* gen6+ */ + u32 err_int; /* gen7 */ + u32 instpm[I915_NUM_RINGS]; + u32 instps[I915_NUM_RINGS]; + u32 extra_instdone[I915_NUM_INSTDONE_REG]; + u32 seqno[I915_NUM_RINGS]; + u64 bbaddr; + u32 fault_reg[I915_NUM_RINGS]; + u32 done_reg; + u32 faddr[I915_NUM_RINGS]; + u64 fence[I915_MAX_NUM_FENCES]; + struct timeval time; + struct drm_i915_error_ring { + struct drm_i915_error_object { + int page_count; + u32 gtt_offset; + u32 *pages[0]; + } *ringbuffer, *batchbuffer, *ctx; + struct drm_i915_error_request { + long jiffies; + u32 seqno; + u32 tail; + } *requests; + int num_requests; + } ring[I915_NUM_RINGS]; + struct drm_i915_error_buffer { + u32 size; + u32 name; + u32 rseqno, wseqno; + u32 gtt_offset; + u32 read_domains; + u32 write_domain; + s32 fence_reg:I915_MAX_NUM_FENCE_BITS; + s32 pinned:2; + u32 tiling:2; + u32 dirty:1; + u32 purgeable:1; + s32 ring:4; + u32 cache_level:2; + } *active_bo, *pinned_bo; + u32 active_bo_count, pinned_bo_count; + struct intel_overlay_error_state *overlay; + struct intel_display_error_state *display; +}; + +struct intel_crtc_config; +struct intel_crtc; + +struct drm_i915_display_funcs { + bool (*fbc_enabled)(struct drm_device *dev); + void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); + void (*disable_fbc)(struct drm_device *dev); + int (*get_display_clock_speed)(struct drm_device *dev); + int (*get_fifo_size)(struct drm_device *dev, int plane); + void (*update_wm)(struct drm_device *dev); + void (*update_sprite_wm)(struct drm_device *dev, int pipe, + uint32_t sprite_width, int pixel_size); + void (*update_linetime_wm)(struct drm_device *dev, int pipe, + struct drm_display_mode *mode); + void (*modeset_global_resources)(struct drm_device *dev); + /* Returns the active state of the crtc, and if the crtc is active, + * fills out the pipe-config with the hw state. */ + bool (*get_pipe_config)(struct intel_crtc *, + struct intel_crtc_config *); + int (*crtc_mode_set)(struct drm_crtc *crtc, + int x, int y, + struct drm_framebuffer *old_fb); + void (*crtc_enable)(struct drm_crtc *crtc); + void (*crtc_disable)(struct drm_crtc *crtc); + void (*off)(struct drm_crtc *crtc); + void (*write_eld)(struct drm_connector *connector, + struct drm_crtc *crtc); + void (*fdi_link_train)(struct drm_crtc *crtc); + void (*init_clock_gating)(struct drm_device *dev); + int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj); + int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, + int x, int y); + void (*hpd_irq_setup)(struct drm_device *dev); + /* clock updates for mode set */ + /* cursor updates */ + /* render clock increase/decrease */ + /* display clock increase/decrease */ + /* pll clock increase/decrease */ +}; + +struct drm_i915_gt_funcs { + void (*force_wake_get)(struct drm_i915_private *dev_priv); + void (*force_wake_put)(struct drm_i915_private *dev_priv); +}; + +#define DEV_INFO_FLAGS \ + DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \ + DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \ + DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \ + DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \ + DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_llc) + +struct intel_device_info { + u32 display_mmio_offset; + u8 num_pipes:3; + u8 gen; + u8 is_mobile:1; + u8 is_i85x:1; + u8 is_i915g:1; + u8 is_i945gm:1; + u8 is_g33:1; + u8 need_gfx_hws:1; + u8 is_g4x:1; + u8 is_pineview:1; + u8 is_broadwater:1; + u8 is_crestline:1; + u8 is_ivybridge:1; + u8 is_valleyview:1; + u8 has_force_wake:1; + u8 is_haswell:1; + u8 has_fbc:1; + u8 has_pipe_cxsr:1; + u8 has_hotplug:1; + u8 cursor_needs_physical:1; + u8 has_overlay:1; + u8 overlay_needs_physical:1; + u8 supports_tv:1; + u8 has_bsd_ring:1; + u8 has_blt_ring:1; + u8 has_llc:1; +}; + +enum i915_cache_level { + I915_CACHE_NONE = 0, + I915_CACHE_LLC, + I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ +}; + +/* The Graphics Translation Table is the way in which GEN hardware translates a + * Graphics Virtual Address into a Physical Address. In addition to the normal + * collateral associated with any va->pa translations GEN hardware also has a + * portion of the GTT which can be mapped by the CPU and remain both coherent + * and correct (in cases like swizzling). That region is referred to as GMADR in + * the spec. + */ +struct i915_gtt { + unsigned long start; /* Start offset of used GTT */ + size_t total; /* Total size GTT can map */ + size_t stolen_size; /* Total size of stolen memory */ + + unsigned long mappable_end; /* End offset that we can CPU map */ + struct io_mapping *mappable; /* Mapping to our CPU mappable region */ + phys_addr_t mappable_base; /* PA of our GMADR */ + + /** "Graphics Stolen Memory" holds the global PTEs */ + void __iomem *gsm; + + bool do_idle_maps; + dma_addr_t scratch_page_dma; + struct page *scratch_page; + + /* global gtt ops */ + int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, + size_t *stolen, phys_addr_t *mappable_base, + unsigned long *mappable_end); + void (*gtt_remove)(struct drm_device *dev); + void (*gtt_clear_range)(struct drm_device *dev, + unsigned int first_entry, + unsigned int num_entries); + void (*gtt_insert_entries)(struct drm_device *dev, + struct sg_table *st, + unsigned int pg_start, + enum i915_cache_level cache_level); +}; +#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT) + +#define I915_PPGTT_PD_ENTRIES 512 +#define I915_PPGTT_PT_ENTRIES 1024 +struct i915_hw_ppgtt { + struct drm_device *dev; + unsigned num_pd_entries; + struct page **pt_pages; + uint32_t pd_offset; + dma_addr_t *pt_dma_addr; + dma_addr_t scratch_page_dma_addr; + + /* pte functions, mirroring the interface of the global gtt. */ + void (*clear_range)(struct i915_hw_ppgtt *ppgtt, + unsigned int first_entry, + unsigned int num_entries); + void (*insert_entries)(struct i915_hw_ppgtt *ppgtt, + struct sg_table *st, + unsigned int pg_start, + enum i915_cache_level cache_level); + int (*enable)(struct drm_device *dev); + void (*cleanup)(struct i915_hw_ppgtt *ppgtt); +}; + + +/* This must match up with the value previously used for execbuf2.rsvd1. */ +#define DEFAULT_CONTEXT_ID 0 +struct i915_hw_context { + int id; + bool is_initialized; + struct drm_i915_file_private *file_priv; + struct intel_ring_buffer *ring; + struct drm_i915_gem_object *obj; +}; + +enum no_fbc_reason { + FBC_NO_OUTPUT, /* no outputs enabled to compress */ + FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ + FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ + FBC_MODE_TOO_LARGE, /* mode too large for compression */ + FBC_BAD_PLANE, /* fbc not supported on plane */ + FBC_NOT_TILED, /* buffer not tiled */ + FBC_MULTIPLE_PIPES, /* more than one pipe active */ + FBC_MODULE_PARAM, +}; + +enum intel_pch { + PCH_NONE = 0, /* No PCH present */ + PCH_IBX, /* Ibexpeak PCH */ + PCH_CPT, /* Cougarpoint PCH */ + PCH_LPT, /* Lynxpoint PCH */ + PCH_NOP, +}; + +enum intel_sbi_destination { + SBI_ICLK, + SBI_MPHY, +}; + +#define QUIRK_PIPEA_FORCE (1<<0) +#define QUIRK_LVDS_SSC_DISABLE (1<<1) +#define QUIRK_INVERT_BRIGHTNESS (1<<2) +#define QUIRK_NO_PCH_PWM_ENABLE (1<<3) + +struct intel_fbdev; +struct intel_fbc_work; + +struct intel_gmbus { + struct i2c_adapter adapter; + u32 force_bit; + u32 reg0; + u32 gpio_reg; + struct i2c_algo_bit_data bit_algo; + struct drm_i915_private *dev_priv; +}; + +struct i915_suspend_saved_registers { + u8 saveLBB; + u32 saveDSPACNTR; + u32 saveDSPBCNTR; + u32 saveDSPARB; + u32 savePIPEACONF; + u32 savePIPEBCONF; + u32 savePIPEASRC; + u32 savePIPEBSRC; + u32 saveFPA0; + u32 saveFPA1; + u32 saveDPLL_A; + u32 saveDPLL_A_MD; + u32 saveHTOTAL_A; + u32 saveHBLANK_A; + u32 saveHSYNC_A; + u32 saveVTOTAL_A; + u32 saveVBLANK_A; + u32 saveVSYNC_A; + u32 saveBCLRPAT_A; + u32 saveTRANSACONF; + u32 saveTRANS_HTOTAL_A; + u32 saveTRANS_HBLANK_A; + u32 saveTRANS_HSYNC_A; + u32 saveTRANS_VTOTAL_A; + u32 saveTRANS_VBLANK_A; + u32 saveTRANS_VSYNC_A; + u32 savePIPEASTAT; + u32 saveDSPASTRIDE; + u32 saveDSPASIZE; + u32 saveDSPAPOS; + u32 saveDSPAADDR; + u32 saveDSPASURF; + u32 saveDSPATILEOFF; + u32 savePFIT_PGM_RATIOS; + u32 saveBLC_HIST_CTL; + u32 saveBLC_PWM_CTL; + u32 saveBLC_PWM_CTL2; + u32 saveBLC_CPU_PWM_CTL; + u32 saveBLC_CPU_PWM_CTL2; + u32 saveFPB0; + u32 saveFPB1; + u32 saveDPLL_B; + u32 saveDPLL_B_MD; + u32 saveHTOTAL_B; + u32 saveHBLANK_B; + u32 saveHSYNC_B; + u32 saveVTOTAL_B; + u32 saveVBLANK_B; + u32 saveVSYNC_B; + u32 saveBCLRPAT_B; + u32 saveTRANSBCONF; + u32 saveTRANS_HTOTAL_B; + u32 saveTRANS_HBLANK_B; + u32 saveTRANS_HSYNC_B; + u32 saveTRANS_VTOTAL_B; + u32 saveTRANS_VBLANK_B; + u32 saveTRANS_VSYNC_B; + u32 savePIPEBSTAT; + u32 saveDSPBSTRIDE; + u32 saveDSPBSIZE; + u32 saveDSPBPOS; + u32 saveDSPBADDR; + u32 saveDSPBSURF; + u32 saveDSPBTILEOFF; + u32 saveVGA0; + u32 saveVGA1; + u32 saveVGA_PD; + u32 saveVGACNTRL; + u32 saveADPA; + u32 saveLVDS; + u32 savePP_ON_DELAYS; + u32 savePP_OFF_DELAYS; + u32 saveDVOA; + u32 saveDVOB; + u32 saveDVOC; + u32 savePP_ON; + u32 savePP_OFF; + u32 savePP_CONTROL; + u32 savePP_DIVISOR; + u32 savePFIT_CONTROL; + u32 save_palette_a[256]; + u32 save_palette_b[256]; + u32 saveDPFC_CB_BASE; + u32 saveFBC_CFB_BASE; + u32 saveFBC_LL_BASE; + u32 saveFBC_CONTROL; + u32 saveFBC_CONTROL2; + u32 saveIER; + u32 saveIIR; + u32 saveIMR; + u32 saveDEIER; + u32 saveDEIMR; + u32 saveGTIER; + u32 saveGTIMR; + u32 saveFDI_RXA_IMR; + u32 saveFDI_RXB_IMR; + u32 saveCACHE_MODE_0; + u32 saveMI_ARB_STATE; + u32 saveSWF0[16]; + u32 saveSWF1[16]; + u32 saveSWF2[3]; + u8 saveMSR; + u8 saveSR[8]; + u8 saveGR[25]; + u8 saveAR_INDEX; + u8 saveAR[21]; + u8 saveDACMASK; + u8 saveCR[37]; + uint64_t saveFENCE[I915_MAX_NUM_FENCES]; + u32 saveCURACNTR; + u32 saveCURAPOS; + u32 saveCURABASE; + u32 saveCURBCNTR; + u32 saveCURBPOS; + u32 saveCURBBASE; + u32 saveCURSIZE; + u32 saveDP_B; + u32 saveDP_C; + u32 saveDP_D; + u32 savePIPEA_GMCH_DATA_M; + u32 savePIPEB_GMCH_DATA_M; + u32 savePIPEA_GMCH_DATA_N; + u32 savePIPEB_GMCH_DATA_N; + u32 savePIPEA_DP_LINK_M; + u32 savePIPEB_DP_LINK_M; + u32 savePIPEA_DP_LINK_N; + u32 savePIPEB_DP_LINK_N; + u32 saveFDI_RXA_CTL; + u32 saveFDI_TXA_CTL; + u32 saveFDI_RXB_CTL; + u32 saveFDI_TXB_CTL; + u32 savePFA_CTL_1; + u32 savePFB_CTL_1; + u32 savePFA_WIN_SZ; + u32 savePFB_WIN_SZ; + u32 savePFA_WIN_POS; + u32 savePFB_WIN_POS; + u32 savePCH_DREF_CONTROL; + u32 saveDISP_ARB_CTL; + u32 savePIPEA_DATA_M1; + u32 savePIPEA_DATA_N1; + u32 savePIPEA_LINK_M1; + u32 savePIPEA_LINK_N1; + u32 savePIPEB_DATA_M1; + u32 savePIPEB_DATA_N1; + u32 savePIPEB_LINK_M1; + u32 savePIPEB_LINK_N1; + u32 saveMCHBAR_RENDER_STANDBY; + u32 savePCH_PORT_HOTPLUG; +}; + +struct intel_gen6_power_mgmt { + struct work_struct work; + u32 pm_iir; + /* lock - irqsave spinlock that protectects the work_struct and + * pm_iir. */ + spinlock_t lock; + + /* The below variables an all the rps hw state are protected by + * dev->struct mutext. */ + u8 cur_delay; + u8 min_delay; + u8 max_delay; + u8 hw_max; + + struct delayed_work delayed_resume_work; + + /* + * Protects RPS/RC6 register access and PCU communication. + * Must be taken after struct_mutex if nested. + */ + struct mutex hw_lock; +}; + +/* defined intel_pm.c */ +extern spinlock_t mchdev_lock; + +struct intel_ilk_power_mgmt { + u8 cur_delay; + u8 min_delay; + u8 max_delay; + u8 fmax; + u8 fstart; + + u64 last_count1; + unsigned long last_time1; + unsigned long chipset_power; + u64 last_count2; + struct timespec last_time2; + unsigned long gfx_power; + u8 corr; + + int c_m; + int r_t; + + struct drm_i915_gem_object *pwrctx; + struct drm_i915_gem_object *renderctx; +}; + +struct i915_dri1_state { + unsigned allow_batchbuffer : 1; + u32 __iomem *gfx_hws_cpu_addr; + + unsigned int cpp; + int back_offset; + int front_offset; + int current_page; + int page_flipping; + + uint32_t counter; +}; + +struct intel_l3_parity { + u32 *remap_info; + struct work_struct error_work; +}; + +struct i915_gem_mm { + /** Memory allocator for GTT stolen memory */ + struct drm_mm stolen; + /** Memory allocator for GTT */ + struct drm_mm gtt_space; + /** List of all objects in gtt_space. Used to restore gtt + * mappings on resume */ + struct list_head bound_list; + /** + * List of objects which are not bound to the GTT (thus + * are idle and not used by the GPU) but still have + * (presumably uncached) pages still attached. + */ + struct list_head unbound_list; + + /** Usable portion of the GTT for GEM */ + unsigned long stolen_base; /* limited to low memory (32-bit) */ + + int gtt_mtrr; + + /** PPGTT used for aliasing the PPGTT with the GTT */ + struct i915_hw_ppgtt *aliasing_ppgtt; + + struct shrinker inactive_shrinker; + bool shrinker_no_lock_stealing; + + /** + * List of objects currently involved in rendering. + * + * Includes buffers having the contents of their GPU caches + * flushed, not necessarily primitives. last_rendering_seqno + * represents when the rendering involved will be completed. + * + * A reference is held on the buffer while on this list. + */ + struct list_head active_list; + + /** + * LRU list of objects which are not in the ringbuffer and + * are ready to unbind, but are still in the GTT. + * + * last_rendering_seqno is 0 while an object is in this list. + * + * A reference is not held on the buffer while on this list, + * as merely being GTT-bound shouldn't prevent its being + * freed, and we'll pull it off the list in the free path. + */ + struct list_head inactive_list; + + /** LRU list of objects with fence regs on them. */ + struct list_head fence_list; + + /** + * We leave the user IRQ off as much as possible, + * but this means that requests will finish and never + * be retired once the system goes idle. Set a timer to + * fire periodically while the ring is running. When it + * fires, go retire requests. + */ + struct delayed_work retire_work; + + /** + * Are we in a non-interruptible section of code like + * modesetting? + */ + bool interruptible; + + /** + * Flag if the X Server, and thus DRM, is not currently in + * control of the device. + * + * This is set between LeaveVT and EnterVT. It needs to be + * replaced with a semaphore. It also needs to be + * transitioned away from for kernel modesetting. + */ + int suspended; + + /** Bit 6 swizzling required for X tiling */ + uint32_t bit_6_swizzle_x; + /** Bit 6 swizzling required for Y tiling */ + uint32_t bit_6_swizzle_y; + + /* storage for physical objects */ + struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; + + /* accounting, useful for userland debugging */ + size_t object_memory; + u32 object_count; +}; + +struct i915_gpu_error { + /* For hangcheck timer */ +#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ +#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) + struct timer_list hangcheck_timer; + int hangcheck_count; + uint32_t last_acthd[I915_NUM_RINGS]; + uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; + + /* For reset and error_state handling. */ + spinlock_t lock; + /* Protected by the above dev->gpu_error.lock. */ + struct drm_i915_error_state *first_error; + struct work_struct work; + + unsigned long last_reset; + + /** + * State variable and reset counter controlling the reset flow + * + * Upper bits are for the reset counter. This counter is used by the + * wait_seqno code to race-free noticed that a reset event happened and + * that it needs to restart the entire ioctl (since most likely the + * seqno it waited for won't ever signal anytime soon). + * + * This is important for lock-free wait paths, where no contended lock + * naturally enforces the correct ordering between the bail-out of the + * waiter and the gpu reset work code. + * + * Lowest bit controls the reset state machine: Set means a reset is in + * progress. This state will (presuming we don't have any bugs) decay + * into either unset (successful reset) or the special WEDGED value (hw + * terminally sour). All waiters on the reset_queue will be woken when + * that happens. + */ + atomic_t reset_counter; + + /** + * Special values/flags for reset_counter + * + * Note that the code relies on + * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG + * being true. + */ +#define I915_RESET_IN_PROGRESS_FLAG 1 +#define I915_WEDGED 0xffffffff + + /** + * Waitqueue to signal when the reset has completed. Used by clients + * that wait for dev_priv->mm.wedged to settle. + */ + wait_queue_head_t reset_queue; + + /* For gpu hang simulation. */ + unsigned int stop_rings; +}; + +enum modeset_restore { + MODESET_ON_LID_OPEN, + MODESET_DONE, + MODESET_SUSPENDED, +}; + +typedef struct drm_i915_private { + struct drm_device *dev; + struct kmem_cache *slab; + + const struct intel_device_info *info; + + int relative_constants_mode; + + void __iomem *regs; + + struct drm_i915_gt_funcs gt; + /** gt_fifo_count and the subsequent register write are synchronized + * with dev->struct_mutex. */ + unsigned gt_fifo_count; + /** forcewake_count is protected by gt_lock */ + unsigned forcewake_count; + /** gt_lock is also taken in irq contexts. */ + spinlock_t gt_lock; + + struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; + + + /** gmbus_mutex protects against concurrent usage of the single hw gmbus + * controller on different i2c buses. */ + struct mutex gmbus_mutex; + + /** + * Base address of the gmbus and gpio block. + */ + uint32_t gpio_mmio_base; + + wait_queue_head_t gmbus_wait_queue; + + struct pci_dev *bridge_dev; + struct intel_ring_buffer ring[I915_NUM_RINGS]; + uint32_t last_seqno, next_seqno; + + drm_dma_handle_t *status_page_dmah; + struct resource mch_res; + + atomic_t irq_received; + + /* protects the irq masks */ + spinlock_t irq_lock; + + /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ + struct pm_qos_request pm_qos; + + /* DPIO indirect register protection */ + struct mutex dpio_lock; + + /** Cached value of IMR to avoid reads in updating the bitfield */ + u32 irq_mask; + u32 gt_irq_mask; + + struct work_struct hotplug_work; + bool enable_hotplug_processing; + struct { + unsigned long hpd_last_jiffies; + int hpd_cnt; + enum { + HPD_ENABLED = 0, + HPD_DISABLED = 1, + HPD_MARK_DISABLED = 2 + } hpd_mark; + } hpd_stats[HPD_NUM_PINS]; + struct timer_list hotplug_reenable_timer; + + int num_pch_pll; + int num_plane; + + unsigned long cfb_size; + unsigned int cfb_fb; + enum plane cfb_plane; + int cfb_y; + struct intel_fbc_work *fbc_work; + + struct intel_opregion opregion; + + /* overlay */ + struct intel_overlay *overlay; + unsigned int sprite_scaling_enabled; + + /* backlight */ + struct { + int level; + bool enabled; + struct backlight_device *device; + } backlight; + + /* LVDS info */ + struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ + struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ + + /* Feature bits from the VBIOS */ + unsigned int int_tv_support:1; + unsigned int lvds_dither:1; + unsigned int lvds_vbt:1; + unsigned int int_crt_support:1; + unsigned int lvds_use_ssc:1; + unsigned int display_clock_mode:1; + unsigned int fdi_rx_polarity_inverted:1; + int lvds_ssc_freq; + unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ + struct { + int rate; + int lanes; + int preemphasis; + int vswing; + + bool initialized; + bool support; + int bpp; + struct edp_power_seq pps; + } edp; + bool no_aux_handshake; + + int crt_ddc_pin; + struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ + int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ + int num_fence_regs; /* 8 on pre-965, 16 otherwise */ + + unsigned int fsb_freq, mem_freq, is_ddr3; + + struct workqueue_struct *wq; + + /* Display functions */ + struct drm_i915_display_funcs display; + + /* PCH chipset type */ + enum intel_pch pch_type; + unsigned short pch_id; + + unsigned long quirks; + + enum modeset_restore modeset_restore; + struct mutex modeset_restore_lock; + + struct i915_gtt gtt; + + struct i915_gem_mm mm; + + /* Kernel Modesetting */ + + struct sdvo_device_mapping sdvo_mappings[2]; + /* indicate whether the LVDS_BORDER should be enabled or not */ + unsigned int lvds_border_bits; + /* Panel fitter placement and size for Ironlake+ */ + u32 pch_pf_pos, pch_pf_size; + + struct drm_crtc *plane_to_crtc_mapping[3]; + struct drm_crtc *pipe_to_crtc_mapping[3]; + wait_queue_head_t pending_flip_queue; + + struct intel_pch_pll pch_plls[I915_NUM_PLLS]; + struct intel_ddi_plls ddi_plls; + + /* Reclocking support */ + bool render_reclock_avail; + bool lvds_downclock_avail; + /* indicates the reduced downclock for LVDS*/ + int lvds_downclock; + u16 orig_clock; + int child_dev_num; + struct child_device_config *child_dev; + + bool mchbar_need_disable; + + struct intel_l3_parity l3_parity; + + /* gen6+ rps state */ + struct intel_gen6_power_mgmt rps; + + /* ilk-only ips/rps state. Everything in here is protected by the global + * mchdev_lock in intel_pm.c */ + struct intel_ilk_power_mgmt ips; + + enum no_fbc_reason no_fbc_reason; + + struct drm_mm_node *compressed_fb; + struct drm_mm_node *compressed_llb; + + struct i915_gpu_error gpu_error; + + /* list of fbdev register on this device */ + struct intel_fbdev *fbdev; + + /* + * The console may be contended at resume, but we don't + * want it to block on it. + */ + struct work_struct console_resume_work; + + struct drm_property *broadcast_rgb_property; + struct drm_property *force_audio_property; + + bool hw_contexts_disabled; + uint32_t hw_context_size; + + u32 fdi_rx_config; + + struct i915_suspend_saved_registers regfile; + + /* Old dri1 support infrastructure, beware the dragons ya fools entering + * here! */ + struct i915_dri1_state dri1; +} drm_i915_private_t; + +/* Iterate over initialised rings */ +#define for_each_ring(ring__, dev_priv__, i__) \ + for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ + if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))) + +enum hdmi_force_audio { + HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ + HDMI_AUDIO_OFF, /* force turn off HDMI audio */ + HDMI_AUDIO_AUTO, /* trust EDID */ + HDMI_AUDIO_ON, /* force turn on HDMI audio */ +}; + +#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) + +struct drm_i915_gem_object_ops { + /* Interface between the GEM object and its backing storage. + * get_pages() is called once prior to the use of the associated set + * of pages before to binding them into the GTT, and put_pages() is + * called after we no longer need them. As we expect there to be + * associated cost with migrating pages between the backing storage + * and making them available for the GPU (e.g. clflush), we may hold + * onto the pages after they are no longer referenced by the GPU + * in case they may be used again shortly (for example migrating the + * pages to a different memory domain within the GTT). put_pages() + * will therefore most likely be called when the object itself is + * being released or under memory pressure (where we attempt to + * reap pages for the shrinker). + */ + int (*get_pages)(struct drm_i915_gem_object *); + void (*put_pages)(struct drm_i915_gem_object *); +}; + +struct drm_i915_gem_object { + struct drm_gem_object base; + + const struct drm_i915_gem_object_ops *ops; + + /** Current space allocated to this object in the GTT, if any. */ + struct drm_mm_node *gtt_space; + /** Stolen memory for this object, instead of being backed by shmem. */ + struct drm_mm_node *stolen; + struct list_head gtt_list; + + /** This object's place on the active/inactive lists */ + struct list_head ring_list; + struct list_head mm_list; + /** This object's place in the batchbuffer or on the eviction list */ + struct list_head exec_list; + + /** + * This is set if the object is on the active lists (has pending + * rendering and so a non-zero seqno), and is not set if it i s on + * inactive (ready to be unbound) list. + */ + unsigned int active:1; + + /** + * This is set if the object has been written to since last bound + * to the GTT + */ + unsigned int dirty:1; + + /** + * Fence register bits (if any) for this object. Will be set + * as needed when mapped into the GTT. + * Protected by dev->struct_mutex. + */ + signed int fence_reg:I915_MAX_NUM_FENCE_BITS; + + /** + * Advice: are the backing pages purgeable? + */ + unsigned int madv:2; + + /** + * Current tiling mode for the object. + */ + unsigned int tiling_mode:2; + /** + * Whether the tiling parameters for the currently associated fence + * register have changed. Note that for the purposes of tracking + * tiling changes we also treat the unfenced register, the register + * slot that the object occupies whilst it executes a fenced + * command (such as BLT on gen2/3), as a "fence". + */ + unsigned int fence_dirty:1; + + /** How many users have pinned this object in GTT space. The following + * users can each hold at most one reference: pwrite/pread, pin_ioctl + * (via user_pin_count), execbuffer (objects are not allowed multiple + * times for the same batchbuffer), and the framebuffer code. When + * switching/pageflipping, the framebuffer code has at most two buffers + * pinned per crtc. + * + * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 + * bits with absolutely no headroom. So use 4 bits. */ + unsigned int pin_count:4; +#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf + + /** + * Is the object at the current location in the gtt mappable and + * fenceable? Used to avoid costly recalculations. + */ + unsigned int map_and_fenceable:1; + + /** + * Whether the current gtt mapping needs to be mappable (and isn't just + * mappable by accident). Track pin and fault separate for a more + * accurate mappable working set. + */ + unsigned int fault_mappable:1; + unsigned int pin_mappable:1; + + /* + * Is the GPU currently using a fence to access this buffer, + */ + unsigned int pending_fenced_gpu_access:1; + unsigned int fenced_gpu_access:1; + + unsigned int cache_level:2; + + unsigned int has_aliasing_ppgtt_mapping:1; + unsigned int has_global_gtt_mapping:1; + unsigned int has_dma_mapping:1; + + struct sg_table *pages; + int pages_pin_count; + + /* prime dma-buf support */ + void *dma_buf_vmapping; + int vmapping_count; + + /** + * Used for performing relocations during execbuffer insertion. + */ + struct hlist_node exec_node; + unsigned long exec_handle; + struct drm_i915_gem_exec_object2 *exec_entry; + + /** + * Current offset of the object in GTT space. + * + * This is the same as gtt_space->start + */ + uint32_t gtt_offset; + + struct intel_ring_buffer *ring; + + /** Breadcrumb of last rendering to the buffer. */ + uint32_t last_read_seqno; + uint32_t last_write_seqno; + /** Breadcrumb of last fenced GPU access to the buffer. */ + uint32_t last_fenced_seqno; + + /** Current tiling stride for the object, if it's tiled. */ + uint32_t stride; + + /** Record of address bit 17 of each page at last unbind. */ + unsigned long *bit_17; + + /** User space pin count and filp owning the pin */ + uint32_t user_pin_count; + struct drm_file *pin_filp; + + /** for phy allocated objects */ + struct drm_i915_gem_phys_object *phys_obj; +}; +#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base) + +#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) + +/** + * Request queue structure. + * + * The request queue allows us to note sequence numbers that have been emitted + * and may be associated with active buffers to be retired. + * + * By keeping this list, we can avoid having to do questionable + * sequence-number comparisons on buffer last_rendering_seqnos, and associate + * an emission time with seqnos for tracking how far ahead of the GPU we are. + */ +struct drm_i915_gem_request { + /** On Which ring this request was generated */ + struct intel_ring_buffer *ring; + + /** GEM sequence number associated with this request. */ + uint32_t seqno; + + /** Postion in the ringbuffer of the end of the request */ + u32 tail; + + /** Time at which this request was emitted, in jiffies. */ + unsigned long emitted_jiffies; + + /** global list entry for this request */ + struct list_head list; + + struct drm_i915_file_private *file_priv; + /** file_priv list entry for this request */ + struct list_head client_list; +}; + +struct drm_i915_file_private { + struct { + spinlock_t lock; + struct list_head request_list; + } mm; + struct idr context_idr; +}; + +#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) + +#define IS_I830(dev) ((dev)->pci_device == 0x3577) +#define IS_845G(dev) ((dev)->pci_device == 0x2562) +#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) +#define IS_I865G(dev) ((dev)->pci_device == 0x2572) +#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) +#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) +#define IS_I945G(dev) ((dev)->pci_device == 0x2772) +#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) +#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) +#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) +#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) +#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) +#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) +#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) +#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) +#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) +#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) +#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) +#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) +#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ + (dev)->pci_device == 0x0152 || \ + (dev)->pci_device == 0x015a) +#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \ + (dev)->pci_device == 0x0106 || \ + (dev)->pci_device == 0x010A) +#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) +#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) +#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) +#define IS_ULT(dev) (IS_HASWELL(dev) && \ + ((dev)->pci_device & 0xFF00) == 0x0A00) + +/* + * The genX designation typically refers to the render engine, so render + * capability related checks should use IS_GEN, while display and other checks + * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular + * chips, etc.). + */ +#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) +#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) +#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) +#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) +#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) +#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) + +#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) +#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) +#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) +#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) + +#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) +#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev)) + +#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) +#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) + +/* Early gen2 have a totally busted CS tlb and require pinned batches. */ +#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) + +/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte + * rows, which changed the alignment requirements and fence programming. + */ +#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ + IS_I915GM(dev))) +#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) +#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) +#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) +#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) +#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) +#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) +/* dsparb controlled by hw only */ +#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) + +#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) +#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) +#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) + +#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) + +#define HAS_DDI(dev) (IS_HASWELL(dev)) +#define HAS_POWER_WELL(dev) (IS_HASWELL(dev)) + +#define INTEL_PCH_DEVICE_ID_MASK 0xff00 +#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 +#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 +#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 +#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 +#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 + +#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) +#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) +#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) +#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) +#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) +#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) + +#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) + +#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) + +#define GT_FREQUENCY_MULTIPLIER 50 + +#include "i915_trace.h" + +/** + * RC6 is a special power stage which allows the GPU to enter an very + * low-voltage mode when idle, using down to 0V while at this stage. This + * stage is entered automatically when the GPU is idle when RC6 support is + * enabled, and as soon as new workload arises GPU wakes up automatically as well. + * + * There are different RC6 modes available in Intel GPU, which differentiate + * among each other with the latency required to enter and leave RC6 and + * voltage consumed by the GPU in different states. + * + * The combination of the following flags define which states GPU is allowed + * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and + * RC6pp is deepest RC6. Their support by hardware varies according to the + * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one + * which brings the most power savings; deeper states save more power, but + * require higher latency to switch to and wake up. + */ +#define INTEL_RC6_ENABLE (1<<0) +#define INTEL_RC6p_ENABLE (1<<1) +#define INTEL_RC6pp_ENABLE (1<<2) + +extern struct drm_ioctl_desc i915_ioctls[]; +extern int i915_max_ioctl; +extern unsigned int i915_fbpercrtc __always_unused; +extern int i915_panel_ignore_lid __read_mostly; +extern unsigned int i915_powersave __read_mostly; +extern int i915_semaphores __read_mostly; +extern unsigned int i915_lvds_downclock __read_mostly; +extern int i915_lvds_channel_mode __read_mostly; +extern int i915_panel_use_ssc __read_mostly; +extern int i915_vbt_sdvo_panel_type __read_mostly; +extern int i915_enable_rc6 __read_mostly; +extern int i915_enable_fbc __read_mostly; +extern bool i915_enable_hangcheck __read_mostly; +extern int i915_enable_ppgtt __read_mostly; +extern unsigned int i915_preliminary_hw_support __read_mostly; +extern int i915_disable_power_well __read_mostly; + +extern int i915_suspend(struct drm_device *dev, pm_message_t state); +extern int i915_resume(struct drm_device *dev); +extern int i915_master_create(struct drm_device *dev, struct drm_master *master); +extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); + + /* i915_dma.c */ +void i915_update_dri1_breadcrumb(struct drm_device *dev); +extern void i915_kernel_lost_context(struct drm_device * dev); +extern int i915_driver_load(struct drm_device *, unsigned long flags); +extern int i915_driver_unload(struct drm_device *); +extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); +extern void i915_driver_lastclose(struct drm_device * dev); +extern void i915_driver_preclose(struct drm_device *dev, + struct drm_file *file_priv); +extern void i915_driver_postclose(struct drm_device *dev, + struct drm_file *file_priv); +extern int i915_driver_device_is_agp(struct drm_device * dev); +#ifdef CONFIG_COMPAT +extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg); +#endif +extern int i915_emit_box(struct drm_device *dev, + struct drm_clip_rect *box, + int DR1, int DR4); +extern int intel_gpu_reset(struct drm_device *dev); +extern int i915_reset(struct drm_device *dev); +extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); +extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); +extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); +extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); + +extern void intel_console_resume(struct work_struct *work); + +/* i915_irq.c */ +void i915_hangcheck_elapsed(unsigned long data); +void i915_handle_error(struct drm_device *dev, bool wedged); + +extern void intel_irq_init(struct drm_device *dev); +extern void intel_pm_init(struct drm_device *dev); +extern void intel_hpd_init(struct drm_device *dev); +extern void intel_gt_init(struct drm_device *dev); +extern void intel_gt_sanitize(struct drm_device *dev); + +void i915_error_state_free(struct kref *error_ref); + +void +i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); + +void +i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); + +void intel_enable_asle(struct drm_device *dev); + +#ifdef CONFIG_DEBUG_FS +extern void i915_destroy_error_state(struct drm_device *dev); +#else +#define i915_destroy_error_state(x) +#endif + + +/* i915_gem.c */ +int i915_gem_init_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_pread_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_execbuffer(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_execbuffer2(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_pin_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_busy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_set_tiling(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_get_tiling(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +void i915_gem_load(struct drm_device *dev); +void *i915_gem_object_alloc(struct drm_device *dev); +void i915_gem_object_free(struct drm_i915_gem_object *obj); +int i915_gem_init_object(struct drm_gem_object *obj); +void i915_gem_object_init(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_object_ops *ops); +struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, + size_t size); +void i915_gem_free_object(struct drm_gem_object *obj); + +int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, + uint32_t alignment, + bool map_and_fenceable, + bool nonblocking); +void i915_gem_object_unpin(struct drm_i915_gem_object *obj); +int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); +int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); +void i915_gem_release_mmap(struct drm_i915_gem_object *obj); +void i915_gem_lastclose(struct drm_device *dev); + +int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); +static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) +{ + struct sg_page_iter sg_iter; + + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n) + return sg_page_iter_page(&sg_iter); + + return NULL; +} +static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) +{ + BUG_ON(obj->pages == NULL); + obj->pages_pin_count++; +} +static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) +{ + BUG_ON(obj->pages_pin_count == 0); + obj->pages_pin_count--; +} + +int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); +int i915_gem_object_sync(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *to); +void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *ring); + +int i915_gem_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); +int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, + uint32_t handle, uint64_t *offset); +int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, + uint32_t handle); +/** + * Returns true if seq1 is later than seq2. + */ +static inline bool +i915_seqno_passed(uint32_t seq1, uint32_t seq2) +{ + return (int32_t)(seq1 - seq2) >= 0; +} + +int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); +int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); +int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); +int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); + +static inline bool +i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) +{ + if (obj->fence_reg != I915_FENCE_REG_NONE) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + dev_priv->fence_regs[obj->fence_reg].pin_count++; + return true; + } else + return false; +} + +static inline void +i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) +{ + if (obj->fence_reg != I915_FENCE_REG_NONE) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + dev_priv->fence_regs[obj->fence_reg].pin_count--; + } +} + +void i915_gem_retire_requests(struct drm_device *dev); +void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); +int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, + bool interruptible); +static inline bool i915_reset_in_progress(struct i915_gpu_error *error) +{ + return unlikely(atomic_read(&error->reset_counter) + & I915_RESET_IN_PROGRESS_FLAG); +} + +static inline bool i915_terminally_wedged(struct i915_gpu_error *error) +{ + return atomic_read(&error->reset_counter) == I915_WEDGED; +} + +void i915_gem_reset(struct drm_device *dev); +void i915_gem_clflush_object(struct drm_i915_gem_object *obj); +int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, + uint32_t read_domains, + uint32_t write_domain); +int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); +int __must_check i915_gem_init(struct drm_device *dev); +int __must_check i915_gem_init_hw(struct drm_device *dev); +void i915_gem_l3_remap(struct drm_device *dev); +void i915_gem_init_swizzling(struct drm_device *dev); +void i915_gem_cleanup_ringbuffer(struct drm_device *dev); +int __must_check i915_gpu_idle(struct drm_device *dev); +int __must_check i915_gem_idle(struct drm_device *dev); +int i915_add_request(struct intel_ring_buffer *ring, + struct drm_file *file, + u32 *seqno); +int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, + uint32_t seqno); +int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); +int __must_check +i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, + bool write); +int __must_check +i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); +int __must_check +i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, + u32 alignment, + struct intel_ring_buffer *pipelined); +int i915_gem_attach_phys_object(struct drm_device *dev, + struct drm_i915_gem_object *obj, + int id, + int align); +void i915_gem_detach_phys_object(struct drm_device *dev, + struct drm_i915_gem_object *obj); +void i915_gem_free_all_phys_object(struct drm_device *dev); +void i915_gem_release(struct drm_device *dev, struct drm_file *file); + +uint32_t +i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); +uint32_t +i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, + int tiling_mode, bool fenced); + +int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level); + +struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); + +struct dma_buf *i915_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *gem_obj, int flags); + +void i915_gem_restore_fences(struct drm_device *dev); + +/* i915_gem_context.c */ +void i915_gem_context_init(struct drm_device *dev); +void i915_gem_context_fini(struct drm_device *dev); +void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); +int i915_switch_context(struct intel_ring_buffer *ring, + struct drm_file *file, int to_id); +int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); + +/* i915_gem_gtt.c */ +void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); +void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, + struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level); +void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, + struct drm_i915_gem_object *obj); + +void i915_gem_restore_gtt_mappings(struct drm_device *dev); +int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); +void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level); +void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); +void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); +void i915_gem_init_global_gtt(struct drm_device *dev); +void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, + unsigned long mappable_end, unsigned long end); +int i915_gem_gtt_init(struct drm_device *dev); +static inline void i915_gem_chipset_flush(struct drm_device *dev) +{ + if (INTEL_INFO(dev)->gen < 6) + intel_gtt_chipset_flush(); +} + + +/* i915_gem_evict.c */ +int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, + unsigned alignment, + unsigned cache_level, + bool mappable, + bool nonblock); +int i915_gem_evict_everything(struct drm_device *dev); + +/* i915_gem_stolen.c */ +int i915_gem_init_stolen(struct drm_device *dev); +int i915_gem_stolen_setup_compression(struct drm_device *dev, int size); +void i915_gem_stolen_cleanup_compression(struct drm_device *dev); +void i915_gem_cleanup_stolen(struct drm_device *dev); +struct drm_i915_gem_object * +i915_gem_object_create_stolen(struct drm_device *dev, u32 size); +struct drm_i915_gem_object * +i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, + u32 stolen_offset, + u32 gtt_offset, + u32 size); +void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); + +/* i915_gem_tiling.c */ +inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) +{ + drm_i915_private_t *dev_priv = obj->base.dev->dev_private; + + return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && + obj->tiling_mode != I915_TILING_NONE; +} + +void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); +void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); +void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); + +/* i915_gem_debug.c */ +void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, + const char *where, uint32_t mark); +#if WATCH_LISTS +int i915_verify_lists(struct drm_device *dev); +#else +#define i915_verify_lists(dev) 0 +#endif +void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, + int handle); +void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, + const char *where, uint32_t mark); + +/* i915_debugfs.c */ +int i915_debugfs_init(struct drm_minor *minor); +void i915_debugfs_cleanup(struct drm_minor *minor); + +/* i915_suspend.c */ +extern int i915_save_state(struct drm_device *dev); +extern int i915_restore_state(struct drm_device *dev); + +/* i915_ums.c */ +void i915_save_display_reg(struct drm_device *dev); +void i915_restore_display_reg(struct drm_device *dev); + +/* i915_sysfs.c */ +void i915_setup_sysfs(struct drm_device *dev_priv); +void i915_teardown_sysfs(struct drm_device *dev_priv); + +/* intel_i2c.c */ +extern int intel_setup_gmbus(struct drm_device *dev); +extern void intel_teardown_gmbus(struct drm_device *dev); +extern inline bool intel_gmbus_is_port_valid(unsigned port) +{ + return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD); +} + +extern struct i2c_adapter *intel_gmbus_get_adapter( + struct drm_i915_private *dev_priv, unsigned port); +extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); +extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); +extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) +{ + return container_of(adapter, struct intel_gmbus, adapter)->force_bit; +} +extern void intel_i2c_reset(struct drm_device *dev); + +/* intel_opregion.c */ +extern int intel_opregion_setup(struct drm_device *dev); +#ifdef CONFIG_ACPI +extern void intel_opregion_init(struct drm_device *dev); +extern void intel_opregion_fini(struct drm_device *dev); +extern void intel_opregion_asle_intr(struct drm_device *dev); +extern void intel_opregion_gse_intr(struct drm_device *dev); +extern void intel_opregion_enable_asle(struct drm_device *dev); +#else +static inline void intel_opregion_init(struct drm_device *dev) { return; } +static inline void intel_opregion_fini(struct drm_device *dev) { return; } +static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } +static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; } +static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; } +#endif + +/* intel_acpi.c */ +#ifdef CONFIG_ACPI +extern void intel_register_dsm_handler(void); +extern void intel_unregister_dsm_handler(void); +#else +static inline void intel_register_dsm_handler(void) { return; } +static inline void intel_unregister_dsm_handler(void) { return; } +#endif /* CONFIG_ACPI */ + +/* modesetting */ +extern void intel_modeset_init_hw(struct drm_device *dev); +extern void intel_modeset_init(struct drm_device *dev); +extern void intel_modeset_gem_init(struct drm_device *dev); +extern void intel_modeset_cleanup(struct drm_device *dev); +extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); +extern void intel_modeset_setup_hw_state(struct drm_device *dev, + bool force_restore); +extern void i915_redisable_vga(struct drm_device *dev); +extern bool intel_fbc_enabled(struct drm_device *dev); +extern void intel_disable_fbc(struct drm_device *dev); +extern bool ironlake_set_drps(struct drm_device *dev, u8 val); +extern void intel_init_pch_refclk(struct drm_device *dev); +extern void gen6_set_rps(struct drm_device *dev, u8 val); +extern void intel_detect_pch(struct drm_device *dev); +extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); +extern int intel_enable_rc6(const struct drm_device *dev); + +extern bool i915_semaphore_is_enabled(struct drm_device *dev); +int i915_reg_read_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); + +/* overlay */ +#ifdef CONFIG_DEBUG_FS +extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); +extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); + +extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); +extern void intel_display_print_error_state(struct seq_file *m, + struct drm_device *dev, + struct intel_display_error_state *error); +#endif + +/* On SNB platform, before reading ring registers forcewake bit + * must be set to prevent GT core from power down and stale values being + * returned. + */ +void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); +void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); +int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); + +int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); +int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); +int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val); +int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val); + +#define __i915_read(x, y) \ + u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); + +__i915_read(8, b) +__i915_read(16, w) +__i915_read(32, l) +__i915_read(64, q) +#undef __i915_read + +#define __i915_write(x, y) \ + void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); + +__i915_write(8, b) +__i915_write(16, w) +__i915_write(32, l) +__i915_write(64, q) +#undef __i915_write + +#define I915_READ8(reg) i915_read8(dev_priv, (reg)) +#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) + +#define I915_READ16(reg) i915_read16(dev_priv, (reg)) +#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) +#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) +#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) + +#define I915_READ(reg) i915_read32(dev_priv, (reg)) +#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) +#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) +#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) + +#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) +#define I915_READ64(reg) i915_read64(dev_priv, (reg)) + +#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) +#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) + +/* "Broadcast RGB" property */ +#define INTEL_BROADCAST_RGB_AUTO 0 +#define INTEL_BROADCAST_RGB_FULL 1 +#define INTEL_BROADCAST_RGB_LIMITED 2 + +static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) +{ + if (HAS_PCH_SPLIT(dev)) + return CPU_VGACNTRL; + else if (IS_VALLEYVIEW(dev)) + return VLV_VGACNTRL; + else + return VGACNTRL; +} + +static inline void __user *to_user_ptr(u64 address) +{ + return (void __user *)(uintptr_t)address; +} + +static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) +{ + unsigned long j = msecs_to_jiffies(m); + + return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); +} + +static inline unsigned long +timespec_to_jiffies_timeout(const struct timespec *value) +{ + unsigned long j = timespec_to_jiffies(value); + + return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); +} + +#endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c new file mode 100644 index 0000000..5d5a230 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -0,0 +1,4525 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +#include +#include +#include "i915_drv.h" +#include "i915_trace.h" +#include "intel_drv.h" +#include +#include +#include +#include +#include + +static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); +static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); +static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, + unsigned alignment, + bool map_and_fenceable, + bool nonblocking); +static int i915_gem_phys_pwrite(struct drm_device *dev, + struct drm_i915_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file); + +static void i915_gem_write_fence(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj); +static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, + struct drm_i915_fence_reg *fence, + bool enable); + +static int i915_gem_inactive_shrink(struct shrinker *shrinker, + struct shrink_control *sc); +static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); +static void i915_gem_shrink_all(struct drm_i915_private *dev_priv); +static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); + +static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) +{ + if (obj->tiling_mode) + i915_gem_release_mmap(obj); + + /* As we do not have an associated fence register, we will force + * a tiling change if we ever need to acquire one. + */ + obj->fence_dirty = false; + obj->fence_reg = I915_FENCE_REG_NONE; +} + +/* some bookkeeping */ +static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, + size_t size) +{ + dev_priv->mm.object_count++; + dev_priv->mm.object_memory += size; +} + +static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, + size_t size) +{ + dev_priv->mm.object_count--; + dev_priv->mm.object_memory -= size; +} + +static int +i915_gem_wait_for_error(struct i915_gpu_error *error) +{ + int ret; + +#define EXIT_COND (!i915_reset_in_progress(error) || \ + i915_terminally_wedged(error)) + if (EXIT_COND) + return 0; + + /* + * Only wait 10 seconds for the gpu reset to complete to avoid hanging + * userspace. If it takes that long something really bad is going on and + * we should simply try to bail out and fail as gracefully as possible. + */ + ret = wait_event_interruptible_timeout(error->reset_queue, + EXIT_COND, + 10*HZ); + if (ret == 0) { + DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); + return -EIO; + } else if (ret < 0) { + return ret; + } +#undef EXIT_COND + + return 0; +} + +int i915_mutex_lock_interruptible(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = i915_gem_wait_for_error(&dev_priv->gpu_error); + if (ret) + return ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + WARN_ON(i915_verify_lists(dev)); + return 0; +} + +static inline bool +i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) +{ + return obj->gtt_space && !obj->active; +} + +int +i915_gem_init_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_init *args = data; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + if (args->gtt_start >= args->gtt_end || + (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) + return -EINVAL; + + /* GEM with user mode setting was never supported on ilk and later. */ + if (INTEL_INFO(dev)->gen >= 5) + return -ENODEV; + + mutex_lock(&dev->struct_mutex); + i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end, + args->gtt_end); + dev_priv->gtt.mappable_end = args->gtt_end; + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +int +i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_get_aperture *args = data; + struct drm_i915_gem_object *obj; + size_t pinned; + + pinned = 0; + mutex_lock(&dev->struct_mutex); + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) + if (obj->pin_count) + pinned += obj->gtt_space->size; + mutex_unlock(&dev->struct_mutex); + + args->aper_size = dev_priv->gtt.total; + args->aper_available_size = args->aper_size - pinned; + + return 0; +} + +void *i915_gem_object_alloc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO); +} + +void i915_gem_object_free(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + kmem_cache_free(dev_priv->slab, obj); +} + +static int +i915_gem_create(struct drm_file *file, + struct drm_device *dev, + uint64_t size, + uint32_t *handle_p) +{ + struct drm_i915_gem_object *obj; + int ret; + u32 handle; + + size = roundup(size, PAGE_SIZE); + if (size == 0) + return -EINVAL; + + /* Allocate the new object */ + obj = i915_gem_alloc_object(dev, size); + if (obj == NULL) + return -ENOMEM; + + ret = drm_gem_handle_create(file, &obj->base, &handle); + if (ret) { + drm_gem_object_release(&obj->base); + i915_gem_info_remove_obj(dev->dev_private, obj->base.size); + i915_gem_object_free(obj); + return ret; + } + + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference(&obj->base); + trace_i915_gem_object_create(obj); + + *handle_p = handle; + return 0; +} + +int +i915_gem_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + /* have to work out size/pitch and return them */ + args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); + args->size = args->pitch * args->height; + return i915_gem_create(file, dev, + args->size, &args->handle); +} + +int i915_gem_dumb_destroy(struct drm_file *file, + struct drm_device *dev, + uint32_t handle) +{ + return drm_gem_handle_delete(file, handle); +} + +/** + * Creates a new mm object and returns a handle to it. + */ +int +i915_gem_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_create *args = data; + + return i915_gem_create(file, dev, + args->size, &args->handle); +} + +static inline int +__copy_to_user_swizzled(char __user *cpu_vaddr, + const char *gpu_vaddr, int gpu_offset, + int length) +{ + int ret, cpu_offset = 0; + + while (length > 0) { + int cacheline_end = ALIGN(gpu_offset + 1, 64); + int this_length = min(cacheline_end - gpu_offset, length); + int swizzled_gpu_offset = gpu_offset ^ 64; + + ret = __copy_to_user(cpu_vaddr + cpu_offset, + gpu_vaddr + swizzled_gpu_offset, + this_length); + if (ret) + return ret + length; + + cpu_offset += this_length; + gpu_offset += this_length; + length -= this_length; + } + + return 0; +} + +static inline int +__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, + const char __user *cpu_vaddr, + int length) +{ + int ret, cpu_offset = 0; + + while (length > 0) { + int cacheline_end = ALIGN(gpu_offset + 1, 64); + int this_length = min(cacheline_end - gpu_offset, length); + int swizzled_gpu_offset = gpu_offset ^ 64; + + ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, + cpu_vaddr + cpu_offset, + this_length); + if (ret) + return ret + length; + + cpu_offset += this_length; + gpu_offset += this_length; + length -= this_length; + } + + return 0; +} + +/* Per-page copy function for the shmem pread fastpath. + * Flushes invalid cachelines before reading the target if + * needs_clflush is set. */ +static int +shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, + char __user *user_data, + bool page_do_bit17_swizzling, bool needs_clflush) +{ + char *vaddr; + int ret; + + if (unlikely(page_do_bit17_swizzling)) + return -EINVAL; + + vaddr = kmap_atomic(page); + if (needs_clflush) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); + ret = __copy_to_user_inatomic(user_data, + vaddr + shmem_page_offset, + page_length); + kunmap_atomic(vaddr); + + return ret ? -EFAULT : 0; +} + +static void +shmem_clflush_swizzled_range(char *addr, unsigned long length, + bool swizzled) +{ + if (unlikely(swizzled)) { + unsigned long start = (unsigned long) addr; + unsigned long end = (unsigned long) addr + length; + + /* For swizzling simply ensure that we always flush both + * channels. Lame, but simple and it works. Swizzled + * pwrite/pread is far from a hotpath - current userspace + * doesn't use it at all. */ + start = round_down(start, 128); + end = round_up(end, 128); + + drm_clflush_virt_range((void *)start, end - start); + } else { + drm_clflush_virt_range(addr, length); + } + +} + +/* Only difference to the fast-path function is that this can handle bit17 + * and uses non-atomic copy and kmap functions. */ +static int +shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, + char __user *user_data, + bool page_do_bit17_swizzling, bool needs_clflush) +{ + char *vaddr; + int ret; + + vaddr = kmap(page); + if (needs_clflush) + shmem_clflush_swizzled_range(vaddr + shmem_page_offset, + page_length, + page_do_bit17_swizzling); + + if (page_do_bit17_swizzling) + ret = __copy_to_user_swizzled(user_data, + vaddr, shmem_page_offset, + page_length); + else + ret = __copy_to_user(user_data, + vaddr + shmem_page_offset, + page_length); + kunmap(page); + + return ret ? - EFAULT : 0; +} + +static int +i915_gem_shmem_pread(struct drm_device *dev, + struct drm_i915_gem_object *obj, + struct drm_i915_gem_pread *args, + struct drm_file *file) +{ + char __user *user_data; + ssize_t remain; + loff_t offset; + int shmem_page_offset, page_length, ret = 0; + int obj_do_bit17_swizzling, page_do_bit17_swizzling; + int prefaulted = 0; + int needs_clflush = 0; + struct sg_page_iter sg_iter; + + user_data = to_user_ptr(args->data_ptr); + remain = args->size; + + obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); + + if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { + /* If we're not in the cpu read domain, set ourself into the gtt + * read domain and manually flush cachelines (if required). This + * optimizes for the case when the gpu will dirty the data + * anyway again before the next pread happens. */ + if (obj->cache_level == I915_CACHE_NONE) + needs_clflush = 1; + if (obj->gtt_space) { + ret = i915_gem_object_set_to_gtt_domain(obj, false); + if (ret) + return ret; + } + } + + ret = i915_gem_object_get_pages(obj); + if (ret) + return ret; + + i915_gem_object_pin_pages(obj); + + offset = args->offset; + + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, + offset >> PAGE_SHIFT) { + struct page *page = sg_page_iter_page(&sg_iter); + + if (remain <= 0) + break; + + /* Operation in this page + * + * shmem_page_offset = offset within page in shmem file + * page_length = bytes to copy for this page + */ + shmem_page_offset = offset_in_page(offset); + page_length = remain; + if ((shmem_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - shmem_page_offset; + + page_do_bit17_swizzling = obj_do_bit17_swizzling && + (page_to_phys(page) & (1 << 17)) != 0; + + ret = shmem_pread_fast(page, shmem_page_offset, page_length, + user_data, page_do_bit17_swizzling, + needs_clflush); + if (ret == 0) + goto next_page; + + mutex_unlock(&dev->struct_mutex); + + if (!prefaulted) { + ret = fault_in_multipages_writeable(user_data, remain); + /* Userspace is tricking us, but we've already clobbered + * its pages with the prefault and promised to write the + * data up to the first fault. Hence ignore any errors + * and just continue. */ + (void)ret; + prefaulted = 1; + } + + ret = shmem_pread_slow(page, shmem_page_offset, page_length, + user_data, page_do_bit17_swizzling, + needs_clflush); + + mutex_lock(&dev->struct_mutex); + +next_page: + mark_page_accessed(page); + + if (ret) + goto out; + + remain -= page_length; + user_data += page_length; + offset += page_length; + } + +out: + i915_gem_object_unpin_pages(obj); + + return ret; +} + +/** + * Reads data from the object referenced by handle. + * + * On error, the contents of *data are undefined. + */ +int +i915_gem_pread_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_pread *args = data; + struct drm_i915_gem_object *obj; + int ret = 0; + + if (args->size == 0) + return 0; + + if (!access_ok(VERIFY_WRITE, + to_user_ptr(args->data_ptr), + args->size)) + return -EFAULT; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); + if (&obj->base == NULL) { + ret = -ENOENT; + goto unlock; + } + + /* Bounds check source. */ + if (args->offset > obj->base.size || + args->size > obj->base.size - args->offset) { + ret = -EINVAL; + goto out; + } + + /* prime objects have no backing filp to GEM pread/pwrite + * pages from. + */ + if (!obj->base.filp) { + ret = -EINVAL; + goto out; + } + + trace_i915_gem_object_pread(obj, args->offset, args->size); + + ret = i915_gem_shmem_pread(dev, obj, args, file); + +out: + drm_gem_object_unreference(&obj->base); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +/* This is the fast write path which cannot handle + * page faults in the source data + */ + +static inline int +fast_user_write(struct io_mapping *mapping, + loff_t page_base, int page_offset, + char __user *user_data, + int length) +{ + void __iomem *vaddr_atomic; + void *vaddr; + unsigned long unwritten; + + vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); + /* We can use the cpu mem copy function because this is X86. */ + vaddr = (void __force*)vaddr_atomic + page_offset; + unwritten = __copy_from_user_inatomic_nocache(vaddr, + user_data, length); + io_mapping_unmap_atomic(vaddr_atomic); + return unwritten; +} + +/** + * This is the fast pwrite path, where we copy the data directly from the + * user into the GTT, uncached. + */ +static int +i915_gem_gtt_pwrite_fast(struct drm_device *dev, + struct drm_i915_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + ssize_t remain; + loff_t offset, page_base; + char __user *user_data; + int page_offset, page_length, ret; + + ret = i915_gem_object_pin(obj, 0, true, true); + if (ret) + goto out; + + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + goto out_unpin; + + ret = i915_gem_object_put_fence(obj); + if (ret) + goto out_unpin; + + user_data = to_user_ptr(args->data_ptr); + remain = args->size; + + offset = obj->gtt_offset + args->offset; + + while (remain > 0) { + /* Operation in this page + * + * page_base = page offset within aperture + * page_offset = offset within page + * page_length = bytes to copy for this page + */ + page_base = offset & PAGE_MASK; + page_offset = offset_in_page(offset); + page_length = remain; + if ((page_offset + remain) > PAGE_SIZE) + page_length = PAGE_SIZE - page_offset; + + /* If we get a fault while copying data, then (presumably) our + * source page isn't available. Return the error and we'll + * retry in the slow path. + */ + if (fast_user_write(dev_priv->gtt.mappable, page_base, + page_offset, user_data, page_length)) { + ret = -EFAULT; + goto out_unpin; + } + + remain -= page_length; + user_data += page_length; + offset += page_length; + } + +out_unpin: + i915_gem_object_unpin(obj); +out: + return ret; +} + +/* Per-page copy function for the shmem pwrite fastpath. + * Flushes invalid cachelines before writing to the target if + * needs_clflush_before is set and flushes out any written cachelines after + * writing if needs_clflush is set. */ +static int +shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, + char __user *user_data, + bool page_do_bit17_swizzling, + bool needs_clflush_before, + bool needs_clflush_after) +{ + char *vaddr; + int ret; + + if (unlikely(page_do_bit17_swizzling)) + return -EINVAL; + + vaddr = kmap_atomic(page); + if (needs_clflush_before) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); + ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset, + user_data, + page_length); + if (needs_clflush_after) + drm_clflush_virt_range(vaddr + shmem_page_offset, + page_length); + kunmap_atomic(vaddr); + + return ret ? -EFAULT : 0; +} + +/* Only difference to the fast-path function is that this can handle bit17 + * and uses non-atomic copy and kmap functions. */ +static int +shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, + char __user *user_data, + bool page_do_bit17_swizzling, + bool needs_clflush_before, + bool needs_clflush_after) +{ + char *vaddr; + int ret; + + vaddr = kmap(page); + if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) + shmem_clflush_swizzled_range(vaddr + shmem_page_offset, + page_length, + page_do_bit17_swizzling); + if (page_do_bit17_swizzling) + ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, + user_data, + page_length); + else + ret = __copy_from_user(vaddr + shmem_page_offset, + user_data, + page_length); + if (needs_clflush_after) + shmem_clflush_swizzled_range(vaddr + shmem_page_offset, + page_length, + page_do_bit17_swizzling); + kunmap(page); + + return ret ? -EFAULT : 0; +} + +static int +i915_gem_shmem_pwrite(struct drm_device *dev, + struct drm_i915_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file) +{ + ssize_t remain; + loff_t offset; + char __user *user_data; + int shmem_page_offset, page_length, ret = 0; + int obj_do_bit17_swizzling, page_do_bit17_swizzling; + int hit_slowpath = 0; + int needs_clflush_after = 0; + int needs_clflush_before = 0; + struct sg_page_iter sg_iter; + + user_data = to_user_ptr(args->data_ptr); + remain = args->size; + + obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); + + if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { + /* If we're not in the cpu write domain, set ourself into the gtt + * write domain and manually flush cachelines (if required). This + * optimizes for the case when the gpu will use the data + * right away and we therefore have to clflush anyway. */ + if (obj->cache_level == I915_CACHE_NONE) + needs_clflush_after = 1; + if (obj->gtt_space) { + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + return ret; + } + } + /* Same trick applies for invalidate partially written cachelines before + * writing. */ + if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU) + && obj->cache_level == I915_CACHE_NONE) + needs_clflush_before = 1; + + ret = i915_gem_object_get_pages(obj); + if (ret) + return ret; + + i915_gem_object_pin_pages(obj); + + offset = args->offset; + obj->dirty = 1; + + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, + offset >> PAGE_SHIFT) { + struct page *page = sg_page_iter_page(&sg_iter); + int partial_cacheline_write; + + if (remain <= 0) + break; + + /* Operation in this page + * + * shmem_page_offset = offset within page in shmem file + * page_length = bytes to copy for this page + */ + shmem_page_offset = offset_in_page(offset); + + page_length = remain; + if ((shmem_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - shmem_page_offset; + + /* If we don't overwrite a cacheline completely we need to be + * careful to have up-to-date data by first clflushing. Don't + * overcomplicate things and flush the entire patch. */ + partial_cacheline_write = needs_clflush_before && + ((shmem_page_offset | page_length) + & (boot_cpu_data.x86_clflush_size - 1)); + + page_do_bit17_swizzling = obj_do_bit17_swizzling && + (page_to_phys(page) & (1 << 17)) != 0; + + ret = shmem_pwrite_fast(page, shmem_page_offset, page_length, + user_data, page_do_bit17_swizzling, + partial_cacheline_write, + needs_clflush_after); + if (ret == 0) + goto next_page; + + hit_slowpath = 1; + mutex_unlock(&dev->struct_mutex); + ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, + user_data, page_do_bit17_swizzling, + partial_cacheline_write, + needs_clflush_after); + + mutex_lock(&dev->struct_mutex); + +next_page: + set_page_dirty(page); + mark_page_accessed(page); + + if (ret) + goto out; + + remain -= page_length; + user_data += page_length; + offset += page_length; + } + +out: + i915_gem_object_unpin_pages(obj); + + if (hit_slowpath) { + /* + * Fixup: Flush cpu caches in case we didn't flush the dirty + * cachelines in-line while writing and the object moved + * out of the cpu write domain while we've dropped the lock. + */ + if (!needs_clflush_after && + obj->base.write_domain != I915_GEM_DOMAIN_CPU) { + i915_gem_clflush_object(obj); + i915_gem_chipset_flush(dev); + } + } + + if (needs_clflush_after) + i915_gem_chipset_flush(dev); + + return ret; +} + +/** + * Writes data to the object referenced by handle. + * + * On error, the contents of the buffer that were to be modified are undefined. + */ +int +i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_pwrite *args = data; + struct drm_i915_gem_object *obj; + int ret; + + if (args->size == 0) + return 0; + + if (!access_ok(VERIFY_READ, + to_user_ptr(args->data_ptr), + args->size)) + return -EFAULT; + + ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), + args->size); + if (ret) + return -EFAULT; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); + if (&obj->base == NULL) { + ret = -ENOENT; + goto unlock; + } + + /* Bounds check destination. */ + if (args->offset > obj->base.size || + args->size > obj->base.size - args->offset) { + ret = -EINVAL; + goto out; + } + + /* prime objects have no backing filp to GEM pread/pwrite + * pages from. + */ + if (!obj->base.filp) { + ret = -EINVAL; + goto out; + } + + trace_i915_gem_object_pwrite(obj, args->offset, args->size); + + ret = -EFAULT; + /* We can only do the GTT pwrite on untiled buffers, as otherwise + * it would end up going through the fenced access, and we'll get + * different detiling behavior between reading and writing. + * pread/pwrite currently are reading and writing from the CPU + * perspective, requiring manual detiling by the client. + */ + if (obj->phys_obj) { + ret = i915_gem_phys_pwrite(dev, obj, args, file); + goto out; + } + + if (obj->cache_level == I915_CACHE_NONE && + obj->tiling_mode == I915_TILING_NONE && + obj->base.write_domain != I915_GEM_DOMAIN_CPU) { + ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); + /* Note that the gtt paths might fail with non-page-backed user + * pointers (e.g. gtt mappings when moving data between + * textures). Fallback to the shmem path in that case. */ + } + + if (ret == -EFAULT || ret == -ENOSPC) + ret = i915_gem_shmem_pwrite(dev, obj, args, file); + +out: + drm_gem_object_unreference(&obj->base); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int +i915_gem_check_wedge(struct i915_gpu_error *error, + bool interruptible) +{ + if (i915_reset_in_progress(error)) { + /* Non-interruptible callers can't handle -EAGAIN, hence return + * -EIO unconditionally for these. */ + if (!interruptible) + return -EIO; + + /* Recovery complete, but the reset failed ... */ + if (i915_terminally_wedged(error)) + return -EIO; + + return -EAGAIN; + } + + return 0; +} + +/* + * Compare seqno against outstanding lazy request. Emit a request if they are + * equal. + */ +static int +i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) +{ + int ret; + + BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); + + ret = 0; + if (seqno == ring->outstanding_lazy_request) + ret = i915_add_request(ring, NULL, NULL); + + return ret; +} + +/** + * __wait_seqno - wait until execution of seqno has finished + * @ring: the ring expected to report seqno + * @seqno: duh! + * @reset_counter: reset sequence associated with the given seqno + * @interruptible: do an interruptible wait (normally yes) + * @timeout: in - how long to wait (NULL forever); out - how much time remaining + * + * Note: It is of utmost importance that the passed in seqno and reset_counter + * values have been read by the caller in an smp safe manner. Where read-side + * locks are involved, it is sufficient to read the reset_counter before + * unlocking the lock that protects the seqno. For lockless tricks, the + * reset_counter _must_ be read before, and an appropriate smp_rmb must be + * inserted. + * + * Returns 0 if the seqno was found within the alloted time. Else returns the + * errno with remaining time filled in timeout argument. + */ +static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, + unsigned reset_counter, + bool interruptible, struct timespec *timeout) +{ + drm_i915_private_t *dev_priv = ring->dev->dev_private; + struct timespec before, now, wait_time={1,0}; + unsigned long timeout_jiffies; + long end; + bool wait_forever = true; + int ret; + + if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) + return 0; + + trace_i915_gem_request_wait_begin(ring, seqno); + + if (timeout != NULL) { + wait_time = *timeout; + wait_forever = false; + } + + timeout_jiffies = timespec_to_jiffies_timeout(&wait_time); + + if (WARN_ON(!ring->irq_get(ring))) + return -ENODEV; + + /* Record current time in case interrupted by signal, or wedged * */ + getrawmonotonic(&before); + +#define EXIT_COND \ + (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ + i915_reset_in_progress(&dev_priv->gpu_error) || \ + reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) + do { + if (interruptible) + end = wait_event_interruptible_timeout(ring->irq_queue, + EXIT_COND, + timeout_jiffies); + else + end = wait_event_timeout(ring->irq_queue, EXIT_COND, + timeout_jiffies); + + /* We need to check whether any gpu reset happened in between + * the caller grabbing the seqno and now ... */ + if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) + end = -EAGAIN; + + /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely + * gone. */ + ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); + if (ret) + end = ret; + } while (end == 0 && wait_forever); + + getrawmonotonic(&now); + + ring->irq_put(ring); + trace_i915_gem_request_wait_end(ring, seqno); +#undef EXIT_COND + + if (timeout) { + struct timespec sleep_time = timespec_sub(now, before); + *timeout = timespec_sub(*timeout, sleep_time); + if (!timespec_valid(timeout)) /* i.e. negative time remains */ + set_normalized_timespec(timeout, 0, 0); + } + + switch (end) { + case -EIO: + case -EAGAIN: /* Wedged */ + case -ERESTARTSYS: /* Signal */ + return (int)end; + case 0: /* Timeout */ + return -ETIME; + default: /* Completed */ + WARN_ON(end < 0); /* We're not aware of other errors */ + return 0; + } +} + +/** + * Waits for a sequence number to be signaled, and cleans up the + * request and object lists appropriately for that event. + */ +int +i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + bool interruptible = dev_priv->mm.interruptible; + int ret; + + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + BUG_ON(seqno == 0); + + ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); + if (ret) + return ret; + + ret = i915_gem_check_olr(ring, seqno); + if (ret) + return ret; + + return __wait_seqno(ring, seqno, + atomic_read(&dev_priv->gpu_error.reset_counter), + interruptible, NULL); +} + +/** + * Ensures that all rendering to the object has completed and the object is + * safe to unbind from the GTT or access from the CPU. + */ +static __must_check int +i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, + bool readonly) +{ + struct intel_ring_buffer *ring = obj->ring; + u32 seqno; + int ret; + + seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; + if (seqno == 0) + return 0; + + ret = i915_wait_seqno(ring, seqno); + if (ret) + return ret; + + i915_gem_retire_requests_ring(ring); + + /* Manually manage the write flush as we may have not yet + * retired the buffer. + */ + if (obj->last_write_seqno && + i915_seqno_passed(seqno, obj->last_write_seqno)) { + obj->last_write_seqno = 0; + obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; + } + + return 0; +} + +/* A nonblocking variant of the above wait. This is a highly dangerous routine + * as the object state may change during this call. + */ +static __must_check int +i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, + bool readonly) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = obj->ring; + unsigned reset_counter; + u32 seqno; + int ret; + + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + BUG_ON(!dev_priv->mm.interruptible); + + seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; + if (seqno == 0) + return 0; + + ret = i915_gem_check_wedge(&dev_priv->gpu_error, true); + if (ret) + return ret; + + ret = i915_gem_check_olr(ring, seqno); + if (ret) + return ret; + + reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); + mutex_unlock(&dev->struct_mutex); + ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); + mutex_lock(&dev->struct_mutex); + + i915_gem_retire_requests_ring(ring); + + /* Manually manage the write flush as we may have not yet + * retired the buffer. + */ + if (ret == 0 && + obj->last_write_seqno && + i915_seqno_passed(seqno, obj->last_write_seqno)) { + obj->last_write_seqno = 0; + obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; + } + + return ret; +} + +/** + * Called when user space prepares to use an object with the CPU, either + * through the mmap ioctl's mapping or a GTT mapping. + */ +int +i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_set_domain *args = data; + struct drm_i915_gem_object *obj; + uint32_t read_domains = args->read_domains; + uint32_t write_domain = args->write_domain; + int ret; + + /* Only handle setting domains to types used by the CPU. */ + if (write_domain & I915_GEM_GPU_DOMAINS) + return -EINVAL; + + if (read_domains & I915_GEM_GPU_DOMAINS) + return -EINVAL; + + /* Having something in the write domain implies it's in the read + * domain, and only that read domain. Enforce that in the request. + */ + if (write_domain != 0 && read_domains != write_domain) + return -EINVAL; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); + if (&obj->base == NULL) { + ret = -ENOENT; + goto unlock; + } + + /* Try to flush the object off the GPU without holding the lock. + * We will repeat the flush holding the lock in the normal manner + * to catch cases where we are gazumped. + */ + ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain); + if (ret) + goto unref; + + if (read_domains & I915_GEM_DOMAIN_GTT) { + ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); + + /* Silently promote "you're not bound, there was nothing to do" + * to success, since the client was just asking us to + * make sure everything was done. + */ + if (ret == -EINVAL) + ret = 0; + } else { + ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); + } + +unref: + drm_gem_object_unreference(&obj->base); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +/** + * Called when user space has done writes to this buffer + */ +int +i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_sw_finish *args = data; + struct drm_i915_gem_object *obj; + int ret = 0; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); + if (&obj->base == NULL) { + ret = -ENOENT; + goto unlock; + } + + /* Pinned buffers may be scanout, so flush the cache */ + if (obj->pin_count) + i915_gem_object_flush_cpu_write_domain(obj); + + drm_gem_object_unreference(&obj->base); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +/** + * Maps the contents of an object, returning the address it is mapped + * into. + * + * While the mapping holds a reference on the contents of the object, it doesn't + * imply a ref on the object itself. + */ +int +i915_gem_mmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_mmap *args = data; + struct drm_gem_object *obj; + unsigned long addr; + + obj = drm_gem_object_lookup(dev, file, args->handle); + if (obj == NULL) + return -ENOENT; + + /* prime objects have no backing filp to GEM mmap + * pages from. + */ + if (!obj->filp) { + drm_gem_object_unreference_unlocked(obj); + return -EINVAL; + } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)) + down_write(¤t->mm->mmap_sem); + addr = do_mmap(obj->filp, 0, args->size, + PROT_READ | PROT_WRITE, MAP_SHARED, + args->offset); + up_write(¤t->mm->mmap_sem); +#else + addr = vm_mmap(obj->filp, 0, args->size, + PROT_READ | PROT_WRITE, MAP_SHARED, + args->offset); +#endif + drm_gem_object_unreference_unlocked(obj); + if (IS_ERR((void *)addr)) + return addr; + + args->addr_ptr = (uint64_t) addr; + + return 0; +} + +/** + * i915_gem_fault - fault a page into the GTT + * vma: VMA in question + * vmf: fault info + * + * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped + * from userspace. The fault handler takes care of binding the object to + * the GTT (if needed), allocating and programming a fence register (again, + * only if needed based on whether the old reg is still valid or the object + * is tiled) and inserting a new PTE into the faulting process. + * + * Note that the faulting process may involve evicting existing objects + * from the GTT and/or fence registers to make room. So performance may + * suffer if the GTT working set is large or there are few fence registers + * left. + */ +int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); + struct drm_device *dev = obj->base.dev; + drm_i915_private_t *dev_priv = dev->dev_private; + pgoff_t page_offset; + unsigned long pfn; + int ret = 0; + bool write = !!(vmf->flags & FAULT_FLAG_WRITE); + + /* We don't use vmf->pgoff since that has the fake offset */ + page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> + PAGE_SHIFT; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto out; + + trace_i915_gem_object_fault(obj, page_offset, true, write); + + /* Access to snoopable pages through the GTT is incoherent. */ + if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) { + ret = -EINVAL; + goto unlock; + } + + /* Now bind it into the GTT if needed */ + ret = i915_gem_object_pin(obj, 0, true, false); + if (ret) + goto unlock; + + ret = i915_gem_object_set_to_gtt_domain(obj, write); + if (ret) + goto unpin; + + ret = i915_gem_object_get_fence(obj); + if (ret) + goto unpin; + + obj->fault_mappable = true; + + pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) + + page_offset; + + /* Finally, remap it using the new GTT offset */ + ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); +unpin: + i915_gem_object_unpin(obj); +unlock: + mutex_unlock(&dev->struct_mutex); +out: + switch (ret) { + case -EIO: + /* If this -EIO is due to a gpu hang, give the reset code a + * chance to clean up the mess. Otherwise return the proper + * SIGBUS. */ + if (i915_terminally_wedged(&dev_priv->gpu_error)) + return VM_FAULT_SIGBUS; + case -EAGAIN: + /* Give the error handler a chance to run and move the + * objects off the GPU active list. Next time we service the + * fault, we should be able to transition the page into the + * GTT without touching the GPU (and so avoid further + * EIO/EGAIN). If the GPU is wedged, then there is no issue + * with coherency, just lost writes. + */ + set_need_resched(); + case 0: + case -ERESTARTSYS: + case -EINTR: + case -EBUSY: + /* + * EBUSY is ok: this just means that another thread + * already did the job. + */ + return VM_FAULT_NOPAGE; + case -ENOMEM: + return VM_FAULT_OOM; + case -ENOSPC: + return VM_FAULT_SIGBUS; + default: + WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); + return VM_FAULT_SIGBUS; + } +} + +/** + * i915_gem_release_mmap - remove physical page mappings + * @obj: obj in question + * + * Preserve the reservation of the mmapping with the DRM core code, but + * relinquish ownership of the pages back to the system. + * + * It is vital that we remove the page mapping if we have mapped a tiled + * object through the GTT and then lose the fence register due to + * resource pressure. Similarly if the object has been moved out of the + * aperture, than pages mapped into userspace must be revoked. Removing the + * mapping will then trigger a page fault on the next user access, allowing + * fixup by i915_gem_fault(). + */ +void +i915_gem_release_mmap(struct drm_i915_gem_object *obj) +{ + if (!obj->fault_mappable) + return; + + if (obj->base.dev->dev_mapping) + unmap_mapping_range(obj->base.dev->dev_mapping, + (loff_t)obj->base.map_list.hash.key<base.size, 1); + + obj->fault_mappable = false; +} + +uint32_t +i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) +{ + uint32_t gtt_size; + + if (INTEL_INFO(dev)->gen >= 4 || + tiling_mode == I915_TILING_NONE) + return size; + + /* Previous chips need a power-of-two fence region when tiling */ + if (INTEL_INFO(dev)->gen == 3) + gtt_size = 1024*1024; + else + gtt_size = 512*1024; + + while (gtt_size < size) + gtt_size <<= 1; + + return gtt_size; +} + +/** + * i915_gem_get_gtt_alignment - return required GTT alignment for an object + * @obj: object to check + * + * Return the required GTT alignment for an object, taking into account + * potential fence register mapping. + */ +uint32_t +i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, + int tiling_mode, bool fenced) +{ + /* + * Minimum alignment is 4k (GTT page size), but might be greater + * if a fence register is needed for the object. + */ + if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) || + tiling_mode == I915_TILING_NONE) + return 4096; + + /* + * Previous chips need to be aligned to the size of the smallest + * fence register that can contain the object. + */ + return i915_gem_get_gtt_size(dev, size, tiling_mode); +} + +static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + int ret; + + if (obj->base.map_list.map) + return 0; + + dev_priv->mm.shrinker_no_lock_stealing = true; + + ret = drm_gem_create_mmap_offset(&obj->base); + if (ret != -ENOSPC) + goto out; + + /* Badly fragmented mmap space? The only way we can recover + * space is by destroying unwanted objects. We can't randomly release + * mmap_offsets as userspace expects them to be persistent for the + * lifetime of the objects. The closest we can is to release the + * offsets on purgeable objects by truncating it and marking it purged, + * which prevents userspace from ever using that object again. + */ + i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT); + ret = drm_gem_create_mmap_offset(&obj->base); + if (ret != -ENOSPC) + goto out; + + i915_gem_shrink_all(dev_priv); + ret = drm_gem_create_mmap_offset(&obj->base); +out: + dev_priv->mm.shrinker_no_lock_stealing = false; + + return ret; +} + +static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) +{ + if (!obj->base.map_list.map) + return; + + drm_gem_free_mmap_offset(&obj->base); +} + +int +i915_gem_mmap_gtt(struct drm_file *file, + struct drm_device *dev, + uint32_t handle, + uint64_t *offset) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); + if (&obj->base == NULL) { + ret = -ENOENT; + goto unlock; + } + + if (obj->base.size > dev_priv->gtt.mappable_end) { + ret = -E2BIG; + goto out; + } + + if (obj->madv != I915_MADV_WILLNEED) { + DRM_ERROR("Attempting to mmap a purgeable buffer\n"); + ret = -EINVAL; + goto out; + } + + ret = i915_gem_object_create_mmap_offset(obj); + if (ret) + goto out; + + *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; + +out: + drm_gem_object_unreference(&obj->base); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +/** + * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing + * @dev: DRM device + * @data: GTT mapping ioctl data + * @file: GEM object info + * + * Simply returns the fake offset to userspace so it can mmap it. + * The mmap call will end up in drm_gem_mmap(), which will set things + * up so we can get faults in the handler above. + * + * The fault handler will take care of binding the object into the GTT + * (since it may have been evicted to make room for something), allocating + * a fence register, and mapping the appropriate aperture address into + * userspace. + */ +int +i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_mmap_gtt *args = data; + + return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); +} + +/* Immediately discard the backing storage */ +static void +i915_gem_object_truncate(struct drm_i915_gem_object *obj) +{ + struct inode *inode; + + i915_gem_object_free_mmap_offset(obj); + + if (obj->base.filp == NULL) + return; + + /* Our goal here is to return as much of the memory as + * is possible back to the system as we are called from OOM. + * To do this we must instruct the shmfs to drop all of its + * backing pages, *now*. + */ + inode = file_inode(obj->base.filp); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) + shmem_truncate_range(inode, 0, (loff_t)-1); +#else + truncate_inode_pages(inode->i_mapping, 0); + if (inode->i_op->truncate_range) + inode->i_op->truncate_range(inode, 0, (loff_t)-1); +#endif + + obj->madv = __I915_MADV_PURGED; +} + +static inline int +i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) +{ + return obj->madv == I915_MADV_DONTNEED; +} + +static void +i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) +{ + struct sg_page_iter sg_iter; + int ret; + + BUG_ON(obj->madv == __I915_MADV_PURGED); + + ret = i915_gem_object_set_to_cpu_domain(obj, true); + if (ret) { + /* In the event of a disaster, abandon all caches and + * hope for the best. + */ + WARN_ON(ret != -EIO); + i915_gem_clflush_object(obj); + obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; + } + + if (i915_gem_object_needs_bit17_swizzle(obj)) + i915_gem_object_save_bit_17_swizzle(obj); + + if (obj->madv == I915_MADV_DONTNEED) + obj->dirty = 0; + + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { + struct page *page = sg_page_iter_page(&sg_iter); + + if (obj->dirty) + set_page_dirty(page); + + if (obj->madv == I915_MADV_WILLNEED) + mark_page_accessed(page); + + page_cache_release(page); + } + obj->dirty = 0; + + sg_free_table(obj->pages); + kfree(obj->pages); +} + +int +i915_gem_object_put_pages(struct drm_i915_gem_object *obj) +{ + const struct drm_i915_gem_object_ops *ops = obj->ops; + + if (obj->pages == NULL) + return 0; + + BUG_ON(obj->gtt_space); + + if (obj->pages_pin_count) + return -EBUSY; + + /* ->put_pages might need to allocate memory for the bit17 swizzle + * array, hence protect them from being reaped by removing them from gtt + * lists early. */ + list_del(&obj->gtt_list); + + ops->put_pages(obj); + obj->pages = NULL; + + if (i915_gem_object_is_purgeable(obj)) + i915_gem_object_truncate(obj); + + return 0; +} + +static long +__i915_gem_shrink(struct drm_i915_private *dev_priv, long target, + bool purgeable_only) +{ + struct drm_i915_gem_object *obj, *next; + long count = 0; + + list_for_each_entry_safe(obj, next, + &dev_priv->mm.unbound_list, + gtt_list) { + if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && + i915_gem_object_put_pages(obj) == 0) { + count += obj->base.size >> PAGE_SHIFT; + if (count >= target) + return count; + } + } + + list_for_each_entry_safe(obj, next, + &dev_priv->mm.inactive_list, + mm_list) { + if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && + i915_gem_object_unbind(obj) == 0 && + i915_gem_object_put_pages(obj) == 0) { + count += obj->base.size >> PAGE_SHIFT; + if (count >= target) + return count; + } + } + + return count; +} + +static long +i915_gem_purge(struct drm_i915_private *dev_priv, long target) +{ + return __i915_gem_shrink(dev_priv, target, true); +} + +static void +i915_gem_shrink_all(struct drm_i915_private *dev_priv) +{ + struct drm_i915_gem_object *obj, *next; + + i915_gem_evict_everything(dev_priv->dev); + + list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) + i915_gem_object_put_pages(obj); +} + +static int +i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + int page_count, i; + struct address_space *mapping; + struct sg_table *st; + struct scatterlist *sg; + struct sg_page_iter sg_iter; + struct page *page; + unsigned long last_pfn = 0; /* suppress gcc warning */ + gfp_t gfp; + + /* Assert that the object is not currently in any GPU domain. As it + * wasn't in the GTT, there shouldn't be any way it could have been in + * a GPU cache + */ + BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); + BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (st == NULL) + return -ENOMEM; + + page_count = obj->base.size / PAGE_SIZE; + if (sg_alloc_table(st, page_count, GFP_KERNEL)) { + sg_free_table(st); + kfree(st); + return -ENOMEM; + } + + /* Get the list of pages out of our struct file. They'll be pinned + * at this point until we release them. + * + * Fail silently without starting the shrinker + */ + mapping = file_inode(obj->base.filp)->i_mapping; + gfp = mapping_gfp_mask(mapping); + gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; + gfp &= ~(__GFP_IO | __GFP_WAIT); + sg = st->sgl; + st->nents = 0; + for (i = 0; i < page_count; i++) { + page = shmem_read_mapping_page_gfp(mapping, i, gfp); + if (IS_ERR(page)) { + i915_gem_purge(dev_priv, page_count); + page = shmem_read_mapping_page_gfp(mapping, i, gfp); + } + if (IS_ERR(page)) { + /* We've tried hard to allocate the memory by reaping + * our own buffer, now let the real VM do its job and + * go down in flames if truly OOM. + */ + gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD); + gfp |= __GFP_IO | __GFP_WAIT; + + i915_gem_shrink_all(dev_priv); + page = shmem_read_mapping_page_gfp(mapping, i, gfp); + if (IS_ERR(page)) + goto err_pages; + + gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; + gfp &= ~(__GFP_IO | __GFP_WAIT); + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +#ifdef CONFIG_SWIOTLB + if (swiotlb_nr_tbl()) { + st->nents++; + sg_set_page(sg, page, PAGE_SIZE, 0); + sg = sg_next(sg); + continue; + } +#endif +#endif + if (!i || page_to_pfn(page) != last_pfn + 1) { + if (i) + sg = sg_next(sg); + st->nents++; + sg_set_page(sg, page, PAGE_SIZE, 0); + } else { + sg->length += PAGE_SIZE; + } + last_pfn = page_to_pfn(page); + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +#ifdef CONFIG_SWIOTLB + if (!swiotlb_nr_tbl()) +#endif +#endif + sg_mark_end(sg); + obj->pages = st; + + if (i915_gem_object_needs_bit17_swizzle(obj)) + i915_gem_object_do_bit_17_swizzle(obj); + + return 0; + +err_pages: + sg_mark_end(sg); + for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) + page_cache_release(sg_page_iter_page(&sg_iter)); + sg_free_table(st); + kfree(st); + return PTR_ERR(page); +} + +/* Ensure that the associated pages are gathered from the backing storage + * and pinned into our object. i915_gem_object_get_pages() may be called + * multiple times before they are released by a single call to + * i915_gem_object_put_pages() - once the pages are no longer referenced + * either as a result of memory pressure (reaping pages under the shrinker) + * or as the object is itself released. + */ +int +i915_gem_object_get_pages(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + const struct drm_i915_gem_object_ops *ops = obj->ops; + int ret; + + if (obj->pages) + return 0; + + if (obj->madv != I915_MADV_WILLNEED) { + DRM_ERROR("Attempting to obtain a purgeable object\n"); + return -EINVAL; + } + + BUG_ON(obj->pages_pin_count); + + ret = ops->get_pages(obj); + if (ret) + return ret; + + list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); + return 0; +} + +void +i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *ring) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 seqno = intel_ring_get_seqno(ring); + + BUG_ON(ring == NULL); + if (obj->ring != ring && obj->last_write_seqno) { + /* Keep the seqno relative to the current ring */ + obj->last_write_seqno = seqno; + } + obj->ring = ring; + + /* Add a reference if we're newly entering the active list. */ + if (!obj->active) { + drm_gem_object_reference(&obj->base); + obj->active = 1; + } + + /* Move from whatever list we were on to the tail of execution. */ + list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); + list_move_tail(&obj->ring_list, &ring->active_list); + + obj->last_read_seqno = seqno; + + if (obj->fenced_gpu_access) { + obj->last_fenced_seqno = seqno; + + /* Bump MRU to take account of the delayed flush */ + if (obj->fence_reg != I915_FENCE_REG_NONE) { + struct drm_i915_fence_reg *reg; + + reg = &dev_priv->fence_regs[obj->fence_reg]; + list_move_tail(®->lru_list, + &dev_priv->mm.fence_list); + } + } +} + +static void +i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); + BUG_ON(!obj->active); + + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + + list_del_init(&obj->ring_list); + obj->ring = NULL; + + obj->last_read_seqno = 0; + obj->last_write_seqno = 0; + obj->base.write_domain = 0; + + obj->last_fenced_seqno = 0; + obj->fenced_gpu_access = false; + + obj->active = 0; + drm_gem_object_unreference(&obj->base); + + WARN_ON(i915_verify_lists(dev)); +} + +static int +i915_gem_init_seqno(struct drm_device *dev, u32 seqno) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + int ret, i, j; + + /* Carefully retire all requests without writing to the rings */ + for_each_ring(ring, dev_priv, i) { + ret = intel_ring_idle(ring); + if (ret) + return ret; + } + i915_gem_retire_requests(dev); + + /* Finally reset hw state */ + for_each_ring(ring, dev_priv, i) { + intel_ring_init_seqno(ring, seqno); + + for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) + ring->sync_seqno[j] = 0; + } + + return 0; +} + +int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + if (seqno == 0) + return -EINVAL; + + /* HWS page needs to be set less than what we + * will inject to ring + */ + ret = i915_gem_init_seqno(dev, seqno - 1); + if (ret) + return ret; + + /* Carefully set the last_seqno value so that wrap + * detection still works + */ + dev_priv->next_seqno = seqno; + dev_priv->last_seqno = seqno - 1; + if (dev_priv->last_seqno == 0) + dev_priv->last_seqno--; + + return 0; +} + +int +i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* reserve 0 for non-seqno */ + if (dev_priv->next_seqno == 0) { + int ret = i915_gem_init_seqno(dev, 0); + if (ret) + return ret; + + dev_priv->next_seqno = 1; + } + + *seqno = dev_priv->last_seqno = dev_priv->next_seqno++; + return 0; +} + +int +i915_add_request(struct intel_ring_buffer *ring, + struct drm_file *file, + u32 *out_seqno) +{ + drm_i915_private_t *dev_priv = ring->dev->dev_private; + struct drm_i915_gem_request *request; + u32 request_ring_position; + int was_empty; + int ret; + + /* + * Emit any outstanding flushes - execbuf can fail to emit the flush + * after having emitted the batchbuffer command. Hence we need to fix + * things up similar to emitting the lazy request. The difference here + * is that the flush _must_ happen before the next request, no matter + * what. + */ + ret = intel_ring_flush_all_caches(ring); + if (ret) + return ret; + + request = kmalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return -ENOMEM; + + + /* Record the position of the start of the request so that + * should we detect the updated seqno part-way through the + * GPU processing the request, we never over-estimate the + * position of the head. + */ + request_ring_position = intel_ring_get_tail(ring); + + ret = ring->add_request(ring); + if (ret) { + kfree(request); + return ret; + } + + request->seqno = intel_ring_get_seqno(ring); + request->ring = ring; + request->tail = request_ring_position; + request->emitted_jiffies = jiffies; + was_empty = list_empty(&ring->request_list); + list_add_tail(&request->list, &ring->request_list); + request->file_priv = NULL; + + if (file) { + struct drm_i915_file_private *file_priv = file->driver_priv; + + spin_lock(&file_priv->mm.lock); + request->file_priv = file_priv; + list_add_tail(&request->client_list, + &file_priv->mm.request_list); + spin_unlock(&file_priv->mm.lock); + } + + trace_i915_gem_request_add(ring, request->seqno); + ring->outstanding_lazy_request = 0; + + if (!dev_priv->mm.suspended) { + if (i915_enable_hangcheck) { + mod_timer(&dev_priv->gpu_error.hangcheck_timer, + round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); + } + if (was_empty) { + queue_delayed_work(dev_priv->wq, + &dev_priv->mm.retire_work, + round_jiffies_up_relative(HZ)); + intel_mark_busy(dev_priv->dev); + } + } + + if (out_seqno) + *out_seqno = request->seqno; + return 0; +} + +static inline void +i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) +{ + struct drm_i915_file_private *file_priv = request->file_priv; + + if (!file_priv) + return; + + spin_lock(&file_priv->mm.lock); + if (request->file_priv) { + list_del(&request->client_list); + request->file_priv = NULL; + } + spin_unlock(&file_priv->mm.lock); +} + +static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, + struct intel_ring_buffer *ring) +{ + while (!list_empty(&ring->request_list)) { + struct drm_i915_gem_request *request; + + request = list_first_entry(&ring->request_list, + struct drm_i915_gem_request, + list); + + list_del(&request->list); + i915_gem_request_remove_from_client(request); + kfree(request); + } + + while (!list_empty(&ring->active_list)) { + struct drm_i915_gem_object *obj; + + obj = list_first_entry(&ring->active_list, + struct drm_i915_gem_object, + ring_list); + + i915_gem_object_move_to_inactive(obj); + } +} + +void i915_gem_restore_fences(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + for (i = 0; i < dev_priv->num_fence_regs; i++) { + struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; + + /* + * Commit delayed tiling changes if we have an object still + * attached to the fence, otherwise just clear the fence. + */ + if (reg->obj) { + i915_gem_object_update_fence(reg->obj, reg, + reg->obj->tiling_mode); + } else { + i915_gem_write_fence(dev, i, NULL); + } + } +} + +void i915_gem_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + struct intel_ring_buffer *ring; + int i; + + for_each_ring(ring, dev_priv, i) + i915_gem_reset_ring_lists(dev_priv, ring); + + /* Move everything out of the GPU domains to ensure we do any + * necessary invalidation upon reuse. + */ + list_for_each_entry(obj, + &dev_priv->mm.inactive_list, + mm_list) + { + obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; + } + + i915_gem_restore_fences(dev); +} + +/** + * This function clears the request list as sequence numbers are passed. + */ +void +i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) +{ + uint32_t seqno; + + if (list_empty(&ring->request_list)) + return; + + WARN_ON(i915_verify_lists(ring->dev)); + + seqno = ring->get_seqno(ring, true); + + while (!list_empty(&ring->request_list)) { + struct drm_i915_gem_request *request; + + request = list_first_entry(&ring->request_list, + struct drm_i915_gem_request, + list); + + if (!i915_seqno_passed(seqno, request->seqno)) + break; + + trace_i915_gem_request_retire(ring, request->seqno); + /* We know the GPU must have read the request to have + * sent us the seqno + interrupt, so use the position + * of tail of the request to update the last known position + * of the GPU head. + */ + ring->last_retired_head = request->tail; + + list_del(&request->list); + i915_gem_request_remove_from_client(request); + kfree(request); + } + + /* Move any buffers on the active list that are no longer referenced + * by the ringbuffer to the flushing/inactive lists as appropriate. + */ + while (!list_empty(&ring->active_list)) { + struct drm_i915_gem_object *obj; + + obj = list_first_entry(&ring->active_list, + struct drm_i915_gem_object, + ring_list); + + if (!i915_seqno_passed(seqno, obj->last_read_seqno)) + break; + + i915_gem_object_move_to_inactive(obj); + } + + if (unlikely(ring->trace_irq_seqno && + i915_seqno_passed(seqno, ring->trace_irq_seqno))) { + ring->irq_put(ring); + ring->trace_irq_seqno = 0; + } + + WARN_ON(i915_verify_lists(ring->dev)); +} + +void +i915_gem_retire_requests(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + int i; + + for_each_ring(ring, dev_priv, i) + i915_gem_retire_requests_ring(ring); +} + +static void +i915_gem_retire_work_handler(struct work_struct *work) +{ + drm_i915_private_t *dev_priv; + struct drm_device *dev; + struct intel_ring_buffer *ring; + bool idle; + int i; + + dev_priv = container_of(work, drm_i915_private_t, + mm.retire_work.work); + dev = dev_priv->dev; + + /* Come back later if the device is busy... */ + if (!mutex_trylock(&dev->struct_mutex)) { + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, + round_jiffies_up_relative(HZ)); + return; + } + + i915_gem_retire_requests(dev); + + /* Send a periodic flush down the ring so we don't hold onto GEM + * objects indefinitely. + */ + idle = true; + for_each_ring(ring, dev_priv, i) { + if (ring->gpu_caches_dirty) + i915_add_request(ring, NULL, NULL); + + idle &= list_empty(&ring->request_list); + } + + if (!dev_priv->mm.suspended && !idle) + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, + round_jiffies_up_relative(HZ)); + if (idle) + intel_mark_idle(dev); + + mutex_unlock(&dev->struct_mutex); +} + +/** + * Ensures that an object will eventually get non-busy by flushing any required + * write domains, emitting any outstanding lazy request and retiring and + * completed requests. + */ +static int +i915_gem_object_flush_active(struct drm_i915_gem_object *obj) +{ + int ret; + + if (obj->active) { + ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno); + if (ret) + return ret; + + i915_gem_retire_requests_ring(obj->ring); + } + + return 0; +} + +/** + * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT + * @DRM_IOCTL_ARGS: standard ioctl arguments + * + * Returns 0 if successful, else an error is returned with the remaining time in + * the timeout parameter. + * -ETIME: object is still busy after timeout + * -ERESTARTSYS: signal interrupted the wait + * -ENONENT: object doesn't exist + * Also possible, but rare: + * -EAGAIN: GPU wedged + * -ENOMEM: damn + * -ENODEV: Internal IRQ fail + * -E?: The add request failed + * + * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any + * non-zero timeout parameter the wait ioctl will wait for the given number of + * nanoseconds on an object becoming unbusy. Since the wait itself does so + * without holding struct_mutex the object may become re-busied before this + * function completes. A similar but shorter * race condition exists in the busy + * ioctl + */ +int +i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_wait *args = data; + struct drm_i915_gem_object *obj; + struct intel_ring_buffer *ring = NULL; + struct timespec timeout_stack, *timeout = NULL; + unsigned reset_counter; + u32 seqno = 0; + int ret = 0; + + if (args->timeout_ns >= 0) { + timeout_stack = ns_to_timespec(args->timeout_ns); + timeout = &timeout_stack; + } + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle)); + if (&obj->base == NULL) { + mutex_unlock(&dev->struct_mutex); + return -ENOENT; + } + + /* Need to make sure the object gets inactive eventually. */ + ret = i915_gem_object_flush_active(obj); + if (ret) + goto out; + + if (obj->active) { + seqno = obj->last_read_seqno; + ring = obj->ring; + } + + if (seqno == 0) + goto out; + + /* Do this after OLR check to make sure we make forward progress polling + * on this IOCTL with a 0 timeout (like busy ioctl) + */ + if (!args->timeout_ns) { + ret = -ETIME; + goto out; + } + + drm_gem_object_unreference(&obj->base); + reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); + mutex_unlock(&dev->struct_mutex); + + ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); + if (timeout) + args->timeout_ns = timespec_to_ns(timeout); + return ret; + +out: + drm_gem_object_unreference(&obj->base); + mutex_unlock(&dev->struct_mutex); + return ret; +} + +/** + * i915_gem_object_sync - sync an object to a ring. + * + * @obj: object which may be in use on another ring. + * @to: ring we wish to use the object on. May be NULL. + * + * This code is meant to abstract object synchronization with the GPU. + * Calling with NULL implies synchronizing the object with the CPU + * rather than a particular GPU ring. + * + * Returns 0 if successful, else propagates up the lower layer error. + */ +int +i915_gem_object_sync(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *to) +{ + struct intel_ring_buffer *from = obj->ring; + u32 seqno; + int ret, idx; + + if (from == NULL || to == from) + return 0; + + if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) + return i915_gem_object_wait_rendering(obj, false); + + idx = intel_ring_sync_index(from, to); + + seqno = obj->last_read_seqno; + if (seqno <= from->sync_seqno[idx]) + return 0; + + ret = i915_gem_check_olr(obj->ring, seqno); + if (ret) + return ret; + + ret = to->sync_to(to, from, seqno); + if (!ret) + /* We use last_read_seqno because sync_to() + * might have just caused seqno wrap under + * the radar. + */ + from->sync_seqno[idx] = obj->last_read_seqno; + + return ret; +} + +static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) +{ + u32 old_write_domain, old_read_domains; + + /* Force a pagefault for domain tracking on next user access */ + i915_gem_release_mmap(obj); + + if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) + return; + + /* Wait for any direct GTT access to complete */ + mb(); + + old_read_domains = obj->base.read_domains; + old_write_domain = obj->base.write_domain; + + obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT; + obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT; + + trace_i915_gem_object_change_domain(obj, + old_read_domains, + old_write_domain); +} + +/** + * Unbinds an object from the GTT aperture. + */ +int +i915_gem_object_unbind(struct drm_i915_gem_object *obj) +{ + drm_i915_private_t *dev_priv = obj->base.dev->dev_private; + int ret; + + if (obj->gtt_space == NULL) + return 0; + + if (obj->pin_count) + return -EBUSY; + + BUG_ON(obj->pages == NULL); + + ret = i915_gem_object_finish_gpu(obj); + if (ret) + return ret; + /* Continue on if we fail due to EIO, the GPU is hung so we + * should be safe and we need to cleanup or else we might + * cause memory corruption through use-after-free. + */ + + i915_gem_object_finish_gtt(obj); + + /* release the fence reg _after_ flushing */ + ret = i915_gem_object_put_fence(obj); + if (ret) + return ret; + + trace_i915_gem_object_unbind(obj); + + if (obj->has_global_gtt_mapping) + i915_gem_gtt_unbind_object(obj); + if (obj->has_aliasing_ppgtt_mapping) { + i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj); + obj->has_aliasing_ppgtt_mapping = 0; + } + i915_gem_gtt_finish_object(obj); + + list_del(&obj->mm_list); + list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); + /* Avoid an unnecessary call to unbind on rebind. */ + obj->map_and_fenceable = true; + + drm_mm_put_block(obj->gtt_space); + obj->gtt_space = NULL; + obj->gtt_offset = 0; + + return 0; +} + +int i915_gpu_idle(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + int ret, i; + + /* Flush everything onto the inactive list. */ + for_each_ring(ring, dev_priv, i) { + ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); + if (ret) + return ret; + + ret = intel_ring_idle(ring); + if (ret) + return ret; + } + + return 0; +} + +static void i965_write_fence_reg(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int fence_reg; + int fence_pitch_shift; + + if (INTEL_INFO(dev)->gen >= 6) { + fence_reg = FENCE_REG_SANDYBRIDGE_0; + fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT; + } else { + fence_reg = FENCE_REG_965_0; + fence_pitch_shift = I965_FENCE_PITCH_SHIFT; + } + + fence_reg += reg * 8; + + /* To w/a incoherency with non-atomic 64-bit register updates, + * we split the 64-bit update into two 32-bit writes. In order + * for a partial fence not to be evaluated between writes, we + * precede the update with write to turn off the fence register, + * and only enable the fence as the last step. + * + * For extra levels of paranoia, we make sure each step lands + * before applying the next step. + */ + I915_WRITE(fence_reg, 0); + POSTING_READ(fence_reg); + + if (obj) { + u32 size = obj->gtt_space->size; + uint64_t val; + + val = (uint64_t)((obj->gtt_offset + size - 4096) & + 0xfffff000) << 32; + val |= obj->gtt_offset & 0xfffff000; + val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; + if (obj->tiling_mode == I915_TILING_Y) + val |= 1 << I965_FENCE_TILING_Y_SHIFT; + val |= I965_FENCE_REG_VALID; + + I915_WRITE(fence_reg + 4, val >> 32); + POSTING_READ(fence_reg + 4); + + I915_WRITE(fence_reg + 0, val); + POSTING_READ(fence_reg); + } else { + I915_WRITE(fence_reg + 4, 0); + POSTING_READ(fence_reg + 4); + } +} + +static void i915_write_fence_reg(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u32 val; + + if (obj) { + u32 size = obj->gtt_space->size; + int pitch_val; + int tile_width; + + WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || + (size & -size) != size || + (obj->gtt_offset & (size - 1)), + "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", + obj->gtt_offset, obj->map_and_fenceable, size); + + if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) + tile_width = 128; + else + tile_width = 512; + + /* Note: pitch better be a power of two tile widths */ + pitch_val = obj->stride / tile_width; + pitch_val = ffs(pitch_val) - 1; + + val = obj->gtt_offset; + if (obj->tiling_mode == I915_TILING_Y) + val |= 1 << I830_FENCE_TILING_Y_SHIFT; + val |= I915_FENCE_SIZE_BITS(size); + val |= pitch_val << I830_FENCE_PITCH_SHIFT; + val |= I830_FENCE_REG_VALID; + } else + val = 0; + + if (reg < 8) + reg = FENCE_REG_830_0 + reg * 4; + else + reg = FENCE_REG_945_8 + (reg - 8) * 4; + + I915_WRITE(reg, val); + POSTING_READ(reg); +} + +static void i830_write_fence_reg(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + uint32_t val; + + if (obj) { + u32 size = obj->gtt_space->size; + uint32_t pitch_val; + + WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || + (size & -size) != size || + (obj->gtt_offset & (size - 1)), + "object 0x%08x not 512K or pot-size 0x%08x aligned\n", + obj->gtt_offset, size); + + pitch_val = obj->stride / 128; + pitch_val = ffs(pitch_val) - 1; + + val = obj->gtt_offset; + if (obj->tiling_mode == I915_TILING_Y) + val |= 1 << I830_FENCE_TILING_Y_SHIFT; + val |= I830_FENCE_SIZE_BITS(size); + val |= pitch_val << I830_FENCE_PITCH_SHIFT; + val |= I830_FENCE_REG_VALID; + } else + val = 0; + + I915_WRITE(FENCE_REG_830_0 + reg * 4, val); + POSTING_READ(FENCE_REG_830_0 + reg * 4); +} + +inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj) +{ + return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT; +} + +static void i915_gem_write_fence(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Ensure that all CPU reads are completed before installing a fence + * and all writes before removing the fence. + */ + if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) + mb(); + + WARN(obj && (!obj->stride || !obj->tiling_mode), + "bogus fence setup with stride: 0x%x, tiling mode: %i\n", + obj->stride, obj->tiling_mode); + + switch (INTEL_INFO(dev)->gen) { + case 7: + case 6: + case 5: + case 4: i965_write_fence_reg(dev, reg, obj); break; + case 3: i915_write_fence_reg(dev, reg, obj); break; + case 2: i830_write_fence_reg(dev, reg, obj); break; + default: BUG(); + } + + /* And similarly be paranoid that no direct access to this region + * is reordered to before the fence is installed. + */ + if (i915_gem_object_needs_mb(obj)) + mb(); +} + +static inline int fence_number(struct drm_i915_private *dev_priv, + struct drm_i915_fence_reg *fence) +{ + return fence - dev_priv->fence_regs; +} + +static void i915_gem_write_fence__ipi(void *data) +{ + wbinvd(); +} + +static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, + struct drm_i915_fence_reg *fence, + bool enable) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int fence_reg = fence_number(dev_priv, fence); + + /* In order to fully serialize access to the fenced region and + * the update to the fence register we need to take extreme + * measures on SNB+. In theory, the write to the fence register + * flushes all memory transactions before, and coupled with the + * mb() placed around the register write we serialise all memory + * operations with respect to the changes in the tiler. Yet, on + * SNB+ we need to take a step further and emit an explicit wbinvd() + * on each processor in order to manually flush all memory + * transactions before updating the fence register. + */ + if (HAS_LLC(obj->base.dev)) + on_each_cpu(i915_gem_write_fence__ipi, NULL, 1); + i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL); + + if (enable) { + obj->fence_reg = fence_reg; + fence->obj = obj; + list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); + } else { + obj->fence_reg = I915_FENCE_REG_NONE; + fence->obj = NULL; + list_del_init(&fence->lru_list); + } + obj->fence_dirty = false; +} + +static int +i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) +{ + if (obj->last_fenced_seqno) { + int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); + if (ret) + return ret; + + obj->last_fenced_seqno = 0; + } + + obj->fenced_gpu_access = false; + return 0; +} + +int +i915_gem_object_put_fence(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_fence_reg *fence; + int ret; + + ret = i915_gem_object_wait_fence(obj); + if (ret) + return ret; + + if (obj->fence_reg == I915_FENCE_REG_NONE) + return 0; + + fence = &dev_priv->fence_regs[obj->fence_reg]; + + i915_gem_object_fence_lost(obj); + i915_gem_object_update_fence(obj, fence, false); + + return 0; +} + +static struct drm_i915_fence_reg * +i915_find_fence_reg(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_fence_reg *reg, *avail; + int i; + + /* First try to find a free reg */ + avail = NULL; + for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { + reg = &dev_priv->fence_regs[i]; + if (!reg->obj) + return reg; + + if (!reg->pin_count) + avail = reg; + } + + if (avail == NULL) + return NULL; + + /* None available, try to steal one or wait for a user to finish */ + list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { + if (reg->pin_count) + continue; + + return reg; + } + + return NULL; +} + +/** + * i915_gem_object_get_fence - set up fencing for an object + * @obj: object to map through a fence reg + * + * When mapping objects through the GTT, userspace wants to be able to write + * to them without having to worry about swizzling if the object is tiled. + * This function walks the fence regs looking for a free one for @obj, + * stealing one if it can't find any. + * + * It then sets up the reg based on the object's properties: address, pitch + * and tiling format. + * + * For an untiled surface, this removes any existing fence. + */ +int +i915_gem_object_get_fence(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + bool enable = obj->tiling_mode != I915_TILING_NONE; + struct drm_i915_fence_reg *reg; + int ret; + + /* Have we updated the tiling parameters upon the object and so + * will need to serialise the write to the associated fence register? + */ + if (obj->fence_dirty) { + ret = i915_gem_object_wait_fence(obj); + if (ret) + return ret; + } + + /* Just update our place in the LRU if our fence is getting reused. */ + if (obj->fence_reg != I915_FENCE_REG_NONE) { + reg = &dev_priv->fence_regs[obj->fence_reg]; + if (!obj->fence_dirty) { + list_move_tail(®->lru_list, + &dev_priv->mm.fence_list); + return 0; + } + } else if (enable) { + reg = i915_find_fence_reg(dev); + if (reg == NULL) + return -EDEADLK; + + if (reg->obj) { + struct drm_i915_gem_object *old = reg->obj; + + ret = i915_gem_object_wait_fence(old); + if (ret) + return ret; + + i915_gem_object_fence_lost(old); + } + } else + return 0; + + i915_gem_object_update_fence(obj, reg, enable); + + return 0; +} + +static bool i915_gem_valid_gtt_space(struct drm_device *dev, + struct drm_mm_node *gtt_space, + unsigned long cache_level) +{ + struct drm_mm_node *other; + + /* On non-LLC machines we have to be careful when putting differing + * types of snoopable memory together to avoid the prefetcher + * crossing memory domains and dying. + */ + if (HAS_LLC(dev)) + return true; + + if (gtt_space == NULL) + return true; + + if (list_empty(>t_space->node_list)) + return true; + + other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list); + if (other->allocated && !other->hole_follows && other->color != cache_level) + return false; + + other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list); + if (other->allocated && !gtt_space->hole_follows && other->color != cache_level) + return false; + + return true; +} + +static void i915_gem_verify_gtt(struct drm_device *dev) +{ +#if WATCH_GTT + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + int err = 0; + + list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { + if (obj->gtt_space == NULL) { + printk(KERN_ERR "object found on GTT list with no space reserved\n"); + err++; + continue; + } + + if (obj->cache_level != obj->gtt_space->color) { + printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", + obj->gtt_space->start, + obj->gtt_space->start + obj->gtt_space->size, + obj->cache_level, + obj->gtt_space->color); + err++; + continue; + } + + if (!i915_gem_valid_gtt_space(dev, + obj->gtt_space, + obj->cache_level)) { + printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", + obj->gtt_space->start, + obj->gtt_space->start + obj->gtt_space->size, + obj->cache_level); + err++; + continue; + } + } + + WARN_ON(err); +#endif +} + +/** + * Finds free space in the GTT aperture and binds the object there. + */ +static int +i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, + unsigned alignment, + bool map_and_fenceable, + bool nonblocking) +{ + struct drm_device *dev = obj->base.dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_mm_node *node; + u32 size, fence_size, fence_alignment, unfenced_alignment; + bool mappable, fenceable; + int ret; + + fence_size = i915_gem_get_gtt_size(dev, + obj->base.size, + obj->tiling_mode); + fence_alignment = i915_gem_get_gtt_alignment(dev, + obj->base.size, + obj->tiling_mode, true); + unfenced_alignment = + i915_gem_get_gtt_alignment(dev, + obj->base.size, + obj->tiling_mode, false); + + if (alignment == 0) + alignment = map_and_fenceable ? fence_alignment : + unfenced_alignment; + if (map_and_fenceable && alignment & (fence_alignment - 1)) { + DRM_ERROR("Invalid object alignment requested %u\n", alignment); + return -EINVAL; + } + + size = map_and_fenceable ? fence_size : obj->base.size; + + /* If the object is bigger than the entire aperture, reject it early + * before evicting everything in a vain attempt to find space. + */ + if (obj->base.size > + (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) { + DRM_ERROR("Attempting to bind an object larger than the aperture\n"); + return -E2BIG; + } + + ret = i915_gem_object_get_pages(obj); + if (ret) + return ret; + + i915_gem_object_pin_pages(obj); + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (node == NULL) { + i915_gem_object_unpin_pages(obj); + return -ENOMEM; + } + + search_free: + if (map_and_fenceable) + ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, + size, alignment, obj->cache_level, + 0, dev_priv->gtt.mappable_end); + else + ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node, + size, alignment, obj->cache_level); + if (ret) { + ret = i915_gem_evict_something(dev, size, alignment, + obj->cache_level, + map_and_fenceable, + nonblocking); + if (ret == 0) + goto search_free; + + i915_gem_object_unpin_pages(obj); + kfree(node); + return ret; + } + if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) { + i915_gem_object_unpin_pages(obj); + drm_mm_put_block(node); + return -EINVAL; + } + + ret = i915_gem_gtt_prepare_object(obj); + if (ret) { + i915_gem_object_unpin_pages(obj); + drm_mm_put_block(node); + return ret; + } + + list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); + list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + + obj->gtt_space = node; + obj->gtt_offset = node->start; + + fenceable = + node->size == fence_size && + (node->start & (fence_alignment - 1)) == 0; + + mappable = + obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end; + + obj->map_and_fenceable = mappable && fenceable; + + i915_gem_object_unpin_pages(obj); + trace_i915_gem_object_bind(obj, map_and_fenceable); + i915_gem_verify_gtt(dev); + return 0; +} + +void +i915_gem_clflush_object(struct drm_i915_gem_object *obj) +{ + /* If we don't have a page list set up, then we're not pinned + * to GPU, and we can ignore the cache flush because it'll happen + * again at bind time. + */ + if (obj->pages == NULL) + return; + + /* + * Stolen memory is always coherent with the GPU as it is explicitly + * marked as wc by the system, or the system is cache-coherent. + */ + if (obj->stolen) + return; + + /* If the GPU is snooping the contents of the CPU cache, + * we do not need to manually clear the CPU cache lines. However, + * the caches are only snooped when the render cache is + * flushed/invalidated. As we always have to emit invalidations + * and flushes when moving into and out of the RENDER domain, correct + * snooping behaviour occurs naturally as the result of our domain + * tracking. + */ + if (obj->cache_level != I915_CACHE_NONE) + return; + + trace_i915_gem_object_clflush(obj); + + drm_clflush_sg(obj->pages); +} + +/** Flushes the GTT write domain for the object if it's dirty. */ +static void +i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) +{ + uint32_t old_write_domain; + + if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) + return; + + /* No actual flushing is required for the GTT write domain. Writes + * to it immediately go to main memory as far as we know, so there's + * no chipset flush. It also doesn't land in render cache. + * + * However, we do have to enforce the order so that all writes through + * the GTT land before any writes to the device, such as updates to + * the GATT itself. + */ + wmb(); + + old_write_domain = obj->base.write_domain; + obj->base.write_domain = 0; + + trace_i915_gem_object_change_domain(obj, + obj->base.read_domains, + old_write_domain); +} + +/** Flushes the CPU write domain for the object if it's dirty. */ +static void +i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) +{ + uint32_t old_write_domain; + + if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) + return; + + i915_gem_clflush_object(obj); + i915_gem_chipset_flush(obj->base.dev); + old_write_domain = obj->base.write_domain; + obj->base.write_domain = 0; + + trace_i915_gem_object_change_domain(obj, + obj->base.read_domains, + old_write_domain); +} + +/** + * Moves a single object to the GTT read, and possibly write domain. + * + * This function returns when the move is complete, including waiting on + * flushes to occur. + */ +int +i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) +{ + drm_i915_private_t *dev_priv = obj->base.dev->dev_private; + uint32_t old_write_domain, old_read_domains; + int ret; + + /* Not valid to be called on unbound objects. */ + if (obj->gtt_space == NULL) + return -EINVAL; + + if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) + return 0; + + ret = i915_gem_object_wait_rendering(obj, !write); + if (ret) + return ret; + + i915_gem_object_flush_cpu_write_domain(obj); + + /* Serialise direct access to this object with the barriers for + * coherent writes from the GPU, by effectively invalidating the + * GTT domain upon first access. + */ + if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) + mb(); + + old_write_domain = obj->base.write_domain; + old_read_domains = obj->base.read_domains; + + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. + */ + BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); + obj->base.read_domains |= I915_GEM_DOMAIN_GTT; + if (write) { + obj->base.read_domains = I915_GEM_DOMAIN_GTT; + obj->base.write_domain = I915_GEM_DOMAIN_GTT; + obj->dirty = 1; + } + + trace_i915_gem_object_change_domain(obj, + old_read_domains, + old_write_domain); + + /* And bump the LRU for this access */ + if (i915_gem_object_is_inactive(obj)) + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + + return 0; +} + +int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level) +{ + struct drm_device *dev = obj->base.dev; + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + if (obj->cache_level == cache_level) + return 0; + + if (obj->pin_count) { + DRM_DEBUG("can not change the cache level of pinned objects\n"); + return -EBUSY; + } + + if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) { + ret = i915_gem_object_unbind(obj); + if (ret) + return ret; + } + + if (obj->gtt_space) { + ret = i915_gem_object_finish_gpu(obj); + if (ret) + return ret; + + i915_gem_object_finish_gtt(obj); + + /* Before SandyBridge, you could not use tiling or fence + * registers with snooped memory, so relinquish any fences + * currently pointing to our region in the aperture. + */ + if (INTEL_INFO(dev)->gen < 6) { + ret = i915_gem_object_put_fence(obj); + if (ret) + return ret; + } + + if (obj->has_global_gtt_mapping) + i915_gem_gtt_bind_object(obj, cache_level); + if (obj->has_aliasing_ppgtt_mapping) + i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, + obj, cache_level); + + obj->gtt_space->color = cache_level; + } + + if (cache_level == I915_CACHE_NONE) { + u32 old_read_domains, old_write_domain; + + /* If we're coming from LLC cached, then we haven't + * actually been tracking whether the data is in the + * CPU cache or not, since we only allow one bit set + * in obj->write_domain and have been skipping the clflushes. + * Just set it to the CPU cache for now. + */ + WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); + WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU); + + old_read_domains = obj->base.read_domains; + old_write_domain = obj->base.write_domain; + + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + obj->base.write_domain = I915_GEM_DOMAIN_CPU; + + trace_i915_gem_object_change_domain(obj, + old_read_domains, + old_write_domain); + } + + obj->cache_level = cache_level; + i915_gem_verify_gtt(dev); + return 0; +} + +int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_caching *args = data; + struct drm_i915_gem_object *obj; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); + if (&obj->base == NULL) { + ret = -ENOENT; + goto unlock; + } + + args->caching = obj->cache_level != I915_CACHE_NONE; + + drm_gem_object_unreference(&obj->base); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_caching *args = data; + struct drm_i915_gem_object *obj; + enum i915_cache_level level; + int ret; + + switch (args->caching) { + case I915_CACHING_NONE: + level = I915_CACHE_NONE; + break; + case I915_CACHING_CACHED: + level = I915_CACHE_LLC; + break; + default: + return -EINVAL; + } + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); + if (&obj->base == NULL) { + ret = -ENOENT; + goto unlock; + } + + ret = i915_gem_object_set_cache_level(obj, level); + + drm_gem_object_unreference(&obj->base); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +/* + * Prepare buffer for display plane (scanout, cursors, etc). + * Can be called from an uninterruptible phase (modesetting) and allows + * any flushes to be pipelined (for pageflips). + */ +int +i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, + u32 alignment, + struct intel_ring_buffer *pipelined) +{ + u32 old_read_domains, old_write_domain; + int ret; + + if (pipelined != obj->ring) { + ret = i915_gem_object_sync(obj, pipelined); + if (ret) + return ret; + } + + /* The display engine is not coherent with the LLC cache on gen6. As + * a result, we make sure that the pinning that is about to occur is + * done with uncached PTEs. This is lowest common denominator for all + * chipsets. + * + * However for gen6+, we could do better by using the GFDT bit instead + * of uncaching, which would allow us to flush all the LLC-cached data + * with that bit in the PTE to main memory with just one PIPE_CONTROL. + */ + ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); + if (ret) + return ret; + + /* As the user may map the buffer once pinned in the display plane + * (e.g. libkms for the bootup splash), we have to ensure that we + * always use map_and_fenceable for all scanout buffers. + */ + ret = i915_gem_object_pin(obj, alignment, true, false); + if (ret) + return ret; + + i915_gem_object_flush_cpu_write_domain(obj); + + old_write_domain = obj->base.write_domain; + old_read_domains = obj->base.read_domains; + + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. + */ + obj->base.write_domain = 0; + obj->base.read_domains |= I915_GEM_DOMAIN_GTT; + + trace_i915_gem_object_change_domain(obj, + old_read_domains, + old_write_domain); + + return 0; +} + +int +i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) +{ + int ret; + + if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) + return 0; + + ret = i915_gem_object_wait_rendering(obj, false); + if (ret) + return ret; + + /* Ensure that we invalidate the GPU's caches and TLBs. */ + obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; + return 0; +} + +/** + * Moves a single object to the CPU read, and possibly write domain. + * + * This function returns when the move is complete, including waiting on + * flushes to occur. + */ +int +i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) +{ + uint32_t old_write_domain, old_read_domains; + int ret; + + if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) + return 0; + + ret = i915_gem_object_wait_rendering(obj, !write); + if (ret) + return ret; + + i915_gem_object_flush_gtt_write_domain(obj); + + old_write_domain = obj->base.write_domain; + old_read_domains = obj->base.read_domains; + + /* Flush the CPU cache if it's still invalid. */ + if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { + i915_gem_clflush_object(obj); + + obj->base.read_domains |= I915_GEM_DOMAIN_CPU; + } + + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. + */ + BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); + + /* If we're writing through the CPU, then the GPU read domains will + * need to be invalidated at next use. + */ + if (write) { + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + obj->base.write_domain = I915_GEM_DOMAIN_CPU; + } + + trace_i915_gem_object_change_domain(obj, + old_read_domains, + old_write_domain); + + return 0; +} + +/* Throttle our rendering by waiting until the ring has completed our requests + * emitted over 20 msec ago. + * + * Note that if we were to use the current jiffies each time around the loop, + * we wouldn't escape the function with any frames outstanding if the time to + * render a frame was over 20ms. + * + * This should get us reasonable parallelism between CPU and GPU but also + * relatively low latency when blocking on a particular request to finish. + */ +static int +i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_file_private *file_priv = file->driver_priv; + unsigned long recent_enough = jiffies - msecs_to_jiffies(20); + struct drm_i915_gem_request *request; + struct intel_ring_buffer *ring = NULL; + unsigned reset_counter; + u32 seqno = 0; + int ret; + + ret = i915_gem_wait_for_error(&dev_priv->gpu_error); + if (ret) + return ret; + + ret = i915_gem_check_wedge(&dev_priv->gpu_error, false); + if (ret) + return ret; + + spin_lock(&file_priv->mm.lock); + list_for_each_entry(request, &file_priv->mm.request_list, client_list) { + if (time_after_eq(request->emitted_jiffies, recent_enough)) + break; + + ring = request->ring; + seqno = request->seqno; + } + reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); + spin_unlock(&file_priv->mm.lock); + + if (seqno == 0) + return 0; + + ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); + if (ret == 0) + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); + + return ret; +} + +int +i915_gem_object_pin(struct drm_i915_gem_object *obj, + uint32_t alignment, + bool map_and_fenceable, + bool nonblocking) +{ + int ret; + + if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) + return -EBUSY; + + if (obj->gtt_space != NULL) { + if ((alignment && obj->gtt_offset & (alignment - 1)) || + (map_and_fenceable && !obj->map_and_fenceable)) { + WARN(obj->pin_count, + "bo is already pinned with incorrect alignment:" + " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," + " obj->map_and_fenceable=%d\n", + obj->gtt_offset, alignment, + map_and_fenceable, + obj->map_and_fenceable); + ret = i915_gem_object_unbind(obj); + if (ret) + return ret; + } + } + + if (obj->gtt_space == NULL) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + + ret = i915_gem_object_bind_to_gtt(obj, alignment, + map_and_fenceable, + nonblocking); + if (ret) + return ret; + + if (!dev_priv->mm.aliasing_ppgtt) + i915_gem_gtt_bind_object(obj, obj->cache_level); + } + + if (!obj->has_global_gtt_mapping && map_and_fenceable) + i915_gem_gtt_bind_object(obj, obj->cache_level); + + obj->pin_count++; + obj->pin_mappable |= map_and_fenceable; + + return 0; +} + +void +i915_gem_object_unpin(struct drm_i915_gem_object *obj) +{ + BUG_ON(obj->pin_count == 0); + BUG_ON(obj->gtt_space == NULL); + + if (--obj->pin_count == 0) + obj->pin_mappable = false; +} + +int +i915_gem_pin_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_pin *args = data; + struct drm_i915_gem_object *obj; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); + if (&obj->base == NULL) { + ret = -ENOENT; + goto unlock; + } + + if (obj->madv != I915_MADV_WILLNEED) { + DRM_ERROR("Attempting to pin a purgeable buffer\n"); + ret = -EINVAL; + goto out; + } + + if (obj->pin_filp != NULL && obj->pin_filp != file) { + DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", + args->handle); + ret = -EINVAL; + goto out; + } + + if (obj->user_pin_count == 0) { + ret = i915_gem_object_pin(obj, args->alignment, true, false); + if (ret) + goto out; + } + + obj->user_pin_count++; + obj->pin_filp = file; + + /* XXX - flush the CPU caches for pinned objects + * as the X server doesn't manage domains yet + */ + i915_gem_object_flush_cpu_write_domain(obj); + args->offset = obj->gtt_offset; +out: + drm_gem_object_unreference(&obj->base); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int +i915_gem_unpin_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_pin *args = data; + struct drm_i915_gem_object *obj; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); + if (&obj->base == NULL) { + ret = -ENOENT; + goto unlock; + } + + if (obj->pin_filp != file) { + DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", + args->handle); + ret = -EINVAL; + goto out; + } + obj->user_pin_count--; + if (obj->user_pin_count == 0) { + obj->pin_filp = NULL; + i915_gem_object_unpin(obj); + } + +out: + drm_gem_object_unreference(&obj->base); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int +i915_gem_busy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_busy *args = data; + struct drm_i915_gem_object *obj; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); + if (&obj->base == NULL) { + ret = -ENOENT; + goto unlock; + } + + /* Count all active objects as busy, even if they are currently not used + * by the gpu. Users of this interface expect objects to eventually + * become non-busy without any further actions, therefore emit any + * necessary flushes here. + */ + ret = i915_gem_object_flush_active(obj); + + args->busy = obj->active; + if (obj->ring) { + BUILD_BUG_ON(I915_NUM_RINGS > 16); + args->busy |= intel_ring_flag(obj->ring) << 16; + } + + drm_gem_object_unreference(&obj->base); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int +i915_gem_throttle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return i915_gem_ring_throttle(dev, file_priv); +} + +int +i915_gem_madvise_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_madvise *args = data; + struct drm_i915_gem_object *obj; + int ret; + + switch (args->madv) { + case I915_MADV_DONTNEED: + case I915_MADV_WILLNEED: + break; + default: + return -EINVAL; + } + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); + if (&obj->base == NULL) { + ret = -ENOENT; + goto unlock; + } + + if (obj->pin_count) { + ret = -EINVAL; + goto out; + } + + if (obj->madv != __I915_MADV_PURGED) + obj->madv = args->madv; + + /* if the object is no longer attached, discard its backing storage */ + if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL) + i915_gem_object_truncate(obj); + + args->retained = obj->madv != __I915_MADV_PURGED; + +out: + drm_gem_object_unreference(&obj->base); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +void i915_gem_object_init(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_object_ops *ops) +{ + INIT_LIST_HEAD(&obj->mm_list); + INIT_LIST_HEAD(&obj->gtt_list); + INIT_LIST_HEAD(&obj->ring_list); + INIT_LIST_HEAD(&obj->exec_list); + + obj->ops = ops; + + obj->fence_reg = I915_FENCE_REG_NONE; + obj->madv = I915_MADV_WILLNEED; + /* Avoid an unnecessary call to unbind on the first bind. */ + obj->map_and_fenceable = true; + + i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); +} + +static const struct drm_i915_gem_object_ops i915_gem_object_ops = { + .get_pages = i915_gem_object_get_pages_gtt, + .put_pages = i915_gem_object_put_pages_gtt, +}; + +struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, + size_t size) +{ + struct drm_i915_gem_object *obj; + struct address_space *mapping; + gfp_t mask; + + obj = i915_gem_object_alloc(dev); + if (obj == NULL) + return NULL; + + if (drm_gem_object_init(dev, &obj->base, size) != 0) { + i915_gem_object_free(obj); + return NULL; + } + + mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; + if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) { + /* 965gm cannot relocate objects above 4GiB. */ + mask &= ~__GFP_HIGHMEM; + mask |= __GFP_DMA32; + } + + mapping = file_inode(obj->base.filp)->i_mapping; + mapping_set_gfp_mask(mapping, mask); + + i915_gem_object_init(obj, &i915_gem_object_ops); + + obj->base.write_domain = I915_GEM_DOMAIN_CPU; + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + + if (HAS_LLC(dev)) { + /* On some devices, we can have the GPU use the LLC (the CPU + * cache) for about a 10% performance improvement + * compared to uncached. Graphics requests other than + * display scanout are coherent with the CPU in + * accessing this cache. This means in this mode we + * don't need to clflush on the CPU side, and on the + * GPU side we only need to flush internal caches to + * get data visible to the CPU. + * + * However, we maintain the display planes as UC, and so + * need to rebind when first used as such. + */ + obj->cache_level = I915_CACHE_LLC; + } else + obj->cache_level = I915_CACHE_NONE; + + return obj; +} + +int i915_gem_init_object(struct drm_gem_object *obj) +{ + BUG(); + + return 0; +} + +void i915_gem_free_object(struct drm_gem_object *gem_obj) +{ + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + struct drm_device *dev = obj->base.dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + trace_i915_gem_object_destroy(obj); + + if (obj->phys_obj) + i915_gem_detach_phys_object(dev, obj); + + obj->pin_count = 0; + if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) { + bool was_interruptible; + + was_interruptible = dev_priv->mm.interruptible; + dev_priv->mm.interruptible = false; + + WARN_ON(i915_gem_object_unbind(obj)); + + dev_priv->mm.interruptible = was_interruptible; + } + + obj->pages_pin_count = 0; + i915_gem_object_put_pages(obj); + i915_gem_object_free_mmap_offset(obj); + i915_gem_object_release_stolen(obj); + + BUG_ON(obj->pages); + + if (obj->base.import_attach) + drm_prime_gem_destroy(&obj->base, NULL); + + drm_gem_object_release(&obj->base); + i915_gem_info_remove_obj(dev_priv, obj->base.size); + + kfree(obj->bit_17); + i915_gem_object_free(obj); +} + +int +i915_gem_idle(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + mutex_lock(&dev->struct_mutex); + + if (dev_priv->mm.suspended) { + mutex_unlock(&dev->struct_mutex); + return 0; + } + + ret = i915_gpu_idle(dev); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return ret; + } + i915_gem_retire_requests(dev); + + /* Under UMS, be paranoid and evict. */ + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + i915_gem_evict_everything(dev); + + /* Hack! Don't let anybody do execbuf while we don't control the chip. + * We need to replace this with a semaphore, or something. + * And not confound mm.suspended! + */ + dev_priv->mm.suspended = 1; + del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); + + i915_kernel_lost_context(dev); + i915_gem_cleanup_ringbuffer(dev); + + mutex_unlock(&dev->struct_mutex); + + /* Cancel the retire work handler, which should be idle now. */ + cancel_delayed_work_sync(&dev_priv->mm.retire_work); + + return 0; +} + +void i915_gem_l3_remap(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u32 misccpctl; + int i; + + if (!HAS_L3_GPU_CACHE(dev)) + return; + + if (!dev_priv->l3_parity.remap_info) + return; + + misccpctl = I915_READ(GEN7_MISCCPCTL); + I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); + POSTING_READ(GEN7_MISCCPCTL); + + for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { + u32 remap = I915_READ(GEN7_L3LOG_BASE + i); + if (remap && remap != dev_priv->l3_parity.remap_info[i/4]) + DRM_DEBUG("0x%x was already programmed to %x\n", + GEN7_L3LOG_BASE + i, remap); + if (remap && !dev_priv->l3_parity.remap_info[i/4]) + DRM_DEBUG_DRIVER("Clearing remapped register\n"); + I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]); + } + + /* Make sure all the writes land before disabling dop clock gating */ + POSTING_READ(GEN7_L3LOG_BASE); + + I915_WRITE(GEN7_MISCCPCTL, misccpctl); +} + +void i915_gem_init_swizzling(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + if (INTEL_INFO(dev)->gen < 5 || + dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) + return; + + I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | + DISP_TILE_SURFACE_SWIZZLING); + + if (IS_GEN5(dev)) + return; + + I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); + if (IS_GEN6(dev)) + I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); + else if (IS_GEN7(dev)) + I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); + else + BUG(); +} + +static bool +intel_enable_blt(struct drm_device *dev) +{ + if (!HAS_BLT(dev)) + return false; + + /* The blitter was dysfunctional on early prototypes */ + if (IS_GEN6(dev) && dev->pdev->revision < 8) { + DRM_INFO("BLT not supported on this pre-production hardware;" + " graphics performance will be degraded.\n"); + return false; + } + + return true; +} + +static int i915_gem_init_rings(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = intel_init_render_ring_buffer(dev); + if (ret) + return ret; + + if (HAS_BSD(dev)) { + ret = intel_init_bsd_ring_buffer(dev); + if (ret) + goto cleanup_render_ring; + } + + if (intel_enable_blt(dev)) { + ret = intel_init_blt_ring_buffer(dev); + if (ret) + goto cleanup_bsd_ring; + } + + ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); + if (ret) + goto cleanup_blt_ring; + + return 0; + +cleanup_blt_ring: + intel_cleanup_ring_buffer(&dev_priv->ring[BCS]); +cleanup_bsd_ring: + intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); +cleanup_render_ring: + intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); + + return ret; +} + +int +i915_gem_init_hw(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + if (INTEL_INFO(dev)->gen < 6) + return -EIO; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) + if (!intel_enable_gtt()) + return -EIO; +#endif + + if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) + I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); + + if (HAS_PCH_NOP(dev)) { + u32 temp = I915_READ(GEN7_MSG_CTL); + temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); + I915_WRITE(GEN7_MSG_CTL, temp); + } + + i915_gem_l3_remap(dev); + + i915_gem_init_swizzling(dev); + + ret = i915_gem_init_rings(dev); + if (ret) + return ret; + + /* + * XXX: There was some w/a described somewhere suggesting loading + * contexts before PPGTT. + */ + i915_gem_context_init(dev); + if (dev_priv->mm.aliasing_ppgtt) { + ret = dev_priv->mm.aliasing_ppgtt->enable(dev); + if (ret) { + i915_gem_cleanup_aliasing_ppgtt(dev); + DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n"); + } + } + + return 0; +} + +int i915_gem_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + mutex_lock(&dev->struct_mutex); + + if (IS_VALLEYVIEW(dev)) { + /* VLVA0 (potential hack), BIOS isn't actually waking us */ + I915_WRITE(VLV_GTLC_WAKE_CTRL, 1); + if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10)) + DRM_DEBUG_DRIVER("allow wake ack timed out\n"); + } + + i915_gem_init_global_gtt(dev); + + ret = i915_gem_init_hw(dev); + mutex_unlock(&dev->struct_mutex); + if (ret) { + i915_gem_cleanup_aliasing_ppgtt(dev); + return ret; + } + + /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */ + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + dev_priv->dri1.allow_batchbuffer = 1; + return 0; +} + +void +i915_gem_cleanup_ringbuffer(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + int i; + + for_each_ring(ring, dev_priv, i) + intel_cleanup_ring_buffer(ring); +} + +int +i915_gem_entervt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; + + if (i915_reset_in_progress(&dev_priv->gpu_error)) { + DRM_ERROR("Reenabling wedged hardware, good luck\n"); + atomic_set(&dev_priv->gpu_error.reset_counter, 0); + } + + mutex_lock(&dev->struct_mutex); + dev_priv->mm.suspended = 0; + + ret = i915_gem_init_hw(dev); + if (ret != 0) { + mutex_unlock(&dev->struct_mutex); + return ret; + } + + BUG_ON(!list_empty(&dev_priv->mm.active_list)); + mutex_unlock(&dev->struct_mutex); + + ret = drm_irq_install(dev); + if (ret) + goto cleanup_ringbuffer; + + return 0; + +cleanup_ringbuffer: + mutex_lock(&dev->struct_mutex); + i915_gem_cleanup_ringbuffer(dev); + dev_priv->mm.suspended = 1; + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +int +i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; + + drm_irq_uninstall(dev); + return i915_gem_idle(dev); +} + +void +i915_gem_lastclose(struct drm_device *dev) +{ + int ret; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + ret = i915_gem_idle(dev); + if (ret) + DRM_ERROR("failed to idle hardware: %d\n", ret); +} + +static void +init_ring_lists(struct intel_ring_buffer *ring) +{ + INIT_LIST_HEAD(&ring->active_list); + INIT_LIST_HEAD(&ring->request_list); +} + +void +i915_gem_load(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int i; + + dev_priv->slab = + kmem_cache_create("i915_gem_object", + sizeof(struct drm_i915_gem_object), 0, + SLAB_HWCACHE_ALIGN, + NULL); + + INIT_LIST_HEAD(&dev_priv->mm.active_list); + INIT_LIST_HEAD(&dev_priv->mm.inactive_list); + INIT_LIST_HEAD(&dev_priv->mm.unbound_list); + INIT_LIST_HEAD(&dev_priv->mm.bound_list); + INIT_LIST_HEAD(&dev_priv->mm.fence_list); + for (i = 0; i < I915_NUM_RINGS; i++) + init_ring_lists(&dev_priv->ring[i]); + for (i = 0; i < I915_MAX_NUM_FENCES; i++) + INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); + INIT_DELAYED_WORK(&dev_priv->mm.retire_work, + i915_gem_retire_work_handler); + init_waitqueue_head(&dev_priv->gpu_error.reset_queue); + + /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ + if (IS_GEN3(dev)) { + I915_WRITE(MI_ARB_STATE, + _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); + } + + dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; + + /* Old X drivers will take 0-2 for front, back, depth buffers */ + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + dev_priv->fence_reg_start = 3; + + if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) + dev_priv->num_fence_regs = 32; + else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + dev_priv->num_fence_regs = 16; + else + dev_priv->num_fence_regs = 8; + + /* Initialize fence registers to zero */ + INIT_LIST_HEAD(&dev_priv->mm.fence_list); + i915_gem_restore_fences(dev); + + i915_gem_detect_bit_6_swizzle(dev); + init_waitqueue_head(&dev_priv->pending_flip_queue); + + dev_priv->mm.interruptible = true; + + dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; + dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; + register_shrinker(&dev_priv->mm.inactive_shrinker); +} + +/* + * Create a physically contiguous memory object for this object + * e.g. for cursor + overlay regs + */ +static int i915_gem_init_phys_object(struct drm_device *dev, + int id, int size, int align) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_phys_object *phys_obj; + int ret; + + if (dev_priv->mm.phys_objs[id - 1] || !size) + return 0; + + phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL); + if (!phys_obj) + return -ENOMEM; + + phys_obj->id = id; + + phys_obj->handle = drm_pci_alloc(dev, size, align); + if (!phys_obj->handle) { + ret = -ENOMEM; + goto kfree_obj; + } +#ifdef CONFIG_X86 + set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); +#endif + + dev_priv->mm.phys_objs[id - 1] = phys_obj; + + return 0; +kfree_obj: + kfree(phys_obj); + return ret; +} + +static void i915_gem_free_phys_object(struct drm_device *dev, int id) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_phys_object *phys_obj; + + if (!dev_priv->mm.phys_objs[id - 1]) + return; + + phys_obj = dev_priv->mm.phys_objs[id - 1]; + if (phys_obj->cur_obj) { + i915_gem_detach_phys_object(dev, phys_obj->cur_obj); + } + +#ifdef CONFIG_X86 + set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); +#endif + drm_pci_free(dev, phys_obj->handle); + kfree(phys_obj); + dev_priv->mm.phys_objs[id - 1] = NULL; +} + +void i915_gem_free_all_phys_object(struct drm_device *dev) +{ + int i; + + for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++) + i915_gem_free_phys_object(dev, i); +} + +void i915_gem_detach_phys_object(struct drm_device *dev, + struct drm_i915_gem_object *obj) +{ + struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; + char *vaddr; + int i; + int page_count; + + if (!obj->phys_obj) + return; + vaddr = obj->phys_obj->handle->vaddr; + + page_count = obj->base.size / PAGE_SIZE; + for (i = 0; i < page_count; i++) { + struct page *page = shmem_read_mapping_page(mapping, i); + if (!IS_ERR(page)) { + char *dst = kmap_atomic(page); + memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); + kunmap_atomic(dst); + + drm_clflush_pages(&page, 1); + + set_page_dirty(page); + mark_page_accessed(page); + page_cache_release(page); + } + } + i915_gem_chipset_flush(dev); + + obj->phys_obj->cur_obj = NULL; + obj->phys_obj = NULL; +} + +int +i915_gem_attach_phys_object(struct drm_device *dev, + struct drm_i915_gem_object *obj, + int id, + int align) +{ + struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; + drm_i915_private_t *dev_priv = dev->dev_private; + int ret = 0; + int page_count; + int i; + + if (id > I915_MAX_PHYS_OBJECT) + return -EINVAL; + + if (obj->phys_obj) { + if (obj->phys_obj->id == id) + return 0; + i915_gem_detach_phys_object(dev, obj); + } + + /* create a new object */ + if (!dev_priv->mm.phys_objs[id - 1]) { + ret = i915_gem_init_phys_object(dev, id, + obj->base.size, align); + if (ret) { + DRM_ERROR("failed to init phys object %d size: %zu\n", + id, obj->base.size); + return ret; + } + } + + /* bind to the object */ + obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; + obj->phys_obj->cur_obj = obj; + + page_count = obj->base.size / PAGE_SIZE; + + for (i = 0; i < page_count; i++) { + struct page *page; + char *dst, *src; + + page = shmem_read_mapping_page(mapping, i); + if (IS_ERR(page)) + return PTR_ERR(page); + + src = kmap_atomic(page); + dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); + memcpy(dst, src, PAGE_SIZE); + kunmap_atomic(src); + + mark_page_accessed(page); + page_cache_release(page); + } + + return 0; +} + +static int +i915_gem_phys_pwrite(struct drm_device *dev, + struct drm_i915_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) +{ + void *vaddr = obj->phys_obj->handle->vaddr + args->offset; + char __user *user_data = to_user_ptr(args->data_ptr); + + if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { + unsigned long unwritten; + + /* The physical object once assigned is fixed for the lifetime + * of the obj, so we can safely drop the lock and continue + * to access vaddr. + */ + mutex_unlock(&dev->struct_mutex); + unwritten = copy_from_user(vaddr, user_data, args->size); + mutex_lock(&dev->struct_mutex); + if (unwritten) + return -EFAULT; + } + + i915_gem_chipset_flush(dev); + return 0; +} + +void i915_gem_release(struct drm_device *dev, struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + + /* Clean up our request list when the client is going away, so that + * later retire_requests won't dereference our soon-to-be-gone + * file_priv. + */ + spin_lock(&file_priv->mm.lock); + while (!list_empty(&file_priv->mm.request_list)) { + struct drm_i915_gem_request *request; + + request = list_first_entry(&file_priv->mm.request_list, + struct drm_i915_gem_request, + client_list); + list_del(&request->client_list); + request->file_priv = NULL; + } + spin_unlock(&file_priv->mm.lock); +} + +static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) +{ + if (!mutex_is_locked(mutex)) + return false; + +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) + return mutex->owner == task; +#else + /* Since UP may be pre-empted, we cannot assume that we own the lock */ + return false; +#endif +} + +static int +i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) +{ + struct drm_i915_private *dev_priv = + container_of(shrinker, + struct drm_i915_private, + mm.inactive_shrinker); + struct drm_device *dev = dev_priv->dev; + struct drm_i915_gem_object *obj; + int nr_to_scan = sc->nr_to_scan; + bool unlock = true; + int cnt; + + if (!mutex_trylock(&dev->struct_mutex)) { + if (!mutex_is_locked_by(&dev->struct_mutex, current)) + return 0; + + if (dev_priv->mm.shrinker_no_lock_stealing) + return 0; + + unlock = false; + } + + if (nr_to_scan) { + nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); + if (nr_to_scan > 0) + nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan, + false); + if (nr_to_scan > 0) + i915_gem_shrink_all(dev_priv); + } + + cnt = 0; + list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) + if (obj->pages_pin_count == 0) + cnt += obj->base.size >> PAGE_SHIFT; + list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) + if (obj->pin_count == 0 && obj->pages_pin_count == 0) + cnt += obj->base.size >> PAGE_SHIFT; + + if (unlock) + mutex_unlock(&dev->struct_mutex); + return cnt; +} diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c new file mode 100644 index 0000000..3bc8a58 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -0,0 +1,521 @@ +/* + * Copyright © 2011-2012 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Ben Widawsky + * + */ + +/* + * This file implements HW context support. On gen5+ a HW context consists of an + * opaque GPU object which is referenced at times of context saves and restores. + * With RC6 enabled, the context is also referenced as the GPU enters and exists + * from RC6 (GPU has it's own internal power context, except on gen5). Though + * something like a context does exist for the media ring, the code only + * supports contexts for the render ring. + * + * In software, there is a distinction between contexts created by the user, + * and the default HW context. The default HW context is used by GPU clients + * that do not request setup of their own hardware context. The default + * context's state is never restored to help prevent programming errors. This + * would happen if a client ran and piggy-backed off another clients GPU state. + * The default context only exists to give the GPU some offset to load as the + * current to invoke a save of the context we actually care about. In fact, the + * code could likely be constructed, albeit in a more complicated fashion, to + * never use the default context, though that limits the driver's ability to + * swap out, and/or destroy other contexts. + * + * All other contexts are created as a request by the GPU client. These contexts + * store GPU state, and thus allow GPU clients to not re-emit state (and + * potentially query certain state) at any time. The kernel driver makes + * certain that the appropriate commands are inserted. + * + * The context life cycle is semi-complicated in that context BOs may live + * longer than the context itself because of the way the hardware, and object + * tracking works. Below is a very crude representation of the state machine + * describing the context life. + * refcount pincount active + * S0: initial state 0 0 0 + * S1: context created 1 0 0 + * S2: context is currently running 2 1 X + * S3: GPU referenced, but not current 2 0 1 + * S4: context is current, but destroyed 1 1 0 + * S5: like S3, but destroyed 1 0 1 + * + * The most common (but not all) transitions: + * S0->S1: client creates a context + * S1->S2: client submits execbuf with context + * S2->S3: other clients submits execbuf with context + * S3->S1: context object was retired + * S3->S2: clients submits another execbuf + * S2->S4: context destroy called with current context + * S3->S5->S0: destroy path + * S4->S5->S0: destroy path on current context + * + * There are two confusing terms used above: + * The "current context" means the context which is currently running on the + * GPU. The GPU has loaded it's state already and has stored away the gtt + * offset of the BO. The GPU is not actively referencing the data at this + * offset, but it will on the next context switch. The only way to avoid this + * is to do a GPU reset. + * + * An "active context' is one which was previously the "current context" and is + * on the active list waiting for the next context switch to occur. Until this + * happens, the object must remain at the same gtt offset. It is therefore + * possible to destroy a context, but it is still active. + * + */ + +#include +#include +#include "i915_drv.h" + +/* This is a HW constraint. The value below is the largest known requirement + * I've seen in a spec to date, and that was a workaround for a non-shipping + * part. It should be safe to decrease this, but it's more future proof as is. + */ +#define CONTEXT_ALIGN (64<<10) + +static struct i915_hw_context * +i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); +static int do_switch(struct i915_hw_context *to); + +static int get_context_size(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + u32 reg; + + switch (INTEL_INFO(dev)->gen) { + case 6: + reg = I915_READ(CXT_SIZE); + ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; + break; + case 7: + reg = I915_READ(GEN7_CXT_SIZE); + if (IS_HASWELL(dev)) + ret = HSW_CXT_TOTAL_SIZE; + else + ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; + break; + default: + BUG(); + } + + return ret; +} + +static void do_destroy(struct i915_hw_context *ctx) +{ + if (ctx->file_priv) + idr_remove(&ctx->file_priv->context_idr, ctx->id); + + drm_gem_object_unreference(&ctx->obj->base); + kfree(ctx); +} + +static struct i915_hw_context * +create_hw_context(struct drm_device *dev, + struct drm_i915_file_private *file_priv) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_hw_context *ctx; + int ret; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (ctx == NULL) + return ERR_PTR(-ENOMEM); + + ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); + if (ctx->obj == NULL) { + kfree(ctx); + DRM_DEBUG_DRIVER("Context object allocated failed\n"); + return ERR_PTR(-ENOMEM); + } + + if (INTEL_INFO(dev)->gen >= 7) { + ret = i915_gem_object_set_cache_level(ctx->obj, + I915_CACHE_LLC_MLC); + if (ret) + goto err_out; + } + + /* The ring associated with the context object is handled by the normal + * object tracking code. We give an initial ring value simple to pass an + * assertion in the context switch code. + */ + ctx->ring = &dev_priv->ring[RCS]; + + /* Default context will never have a file_priv */ + if (file_priv == NULL) + return ctx; + + ctx->file_priv = file_priv; + + ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0, + GFP_KERNEL); + if (ret < 0) + goto err_out; + ctx->id = ret; + + return ctx; + +err_out: + do_destroy(ctx); + return ERR_PTR(ret); +} + +static inline bool is_default_context(struct i915_hw_context *ctx) +{ + return (ctx == ctx->ring->default_context); +} + +/** + * The default context needs to exist per ring that uses contexts. It stores the + * context state of the GPU for applications that don't utilize HW contexts, as + * well as an idle case. + */ +static int create_default_context(struct drm_i915_private *dev_priv) +{ + struct i915_hw_context *ctx; + int ret; + + BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); + + ctx = create_hw_context(dev_priv->dev, NULL); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + /* We may need to do things with the shrinker which require us to + * immediately switch back to the default context. This can cause a + * problem as pinning the default context also requires GTT space which + * may not be available. To avoid this we always pin the + * default context. + */ + dev_priv->ring[RCS].default_context = ctx; + ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false); + if (ret) + goto err_destroy; + + ret = do_switch(ctx); + if (ret) + goto err_unpin; + + DRM_DEBUG_DRIVER("Default HW context loaded\n"); + return 0; + +err_unpin: + i915_gem_object_unpin(ctx->obj); +err_destroy: + do_destroy(ctx); + return ret; +} + +void i915_gem_context_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!HAS_HW_CONTEXTS(dev)) { + dev_priv->hw_contexts_disabled = true; + return; + } + + /* If called from reset, or thaw... we've been here already */ + if (dev_priv->hw_contexts_disabled || + dev_priv->ring[RCS].default_context) + return; + + dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); + + if (dev_priv->hw_context_size > (1<<20)) { + dev_priv->hw_contexts_disabled = true; + return; + } + + if (create_default_context(dev_priv)) { + dev_priv->hw_contexts_disabled = true; + return; + } + + DRM_DEBUG_DRIVER("HW context support initialized\n"); +} + +void i915_gem_context_fini(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->hw_contexts_disabled) + return; + + /* The only known way to stop the gpu from accessing the hw context is + * to reset it. Do this as the very last operation to avoid confusing + * other code, leading to spurious errors. */ + intel_gpu_reset(dev); + + i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj); + + do_destroy(dev_priv->ring[RCS].default_context); +} + +static int context_idr_cleanup(int id, void *p, void *data) +{ + struct i915_hw_context *ctx = p; + + BUG_ON(id == DEFAULT_CONTEXT_ID); + + do_destroy(ctx); + + return 0; +} + +void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + + mutex_lock(&dev->struct_mutex); + idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); + idr_destroy(&file_priv->context_idr); + mutex_unlock(&dev->struct_mutex); +} + +static struct i915_hw_context * +i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) +{ + return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id); +} + +static inline int +mi_set_context(struct intel_ring_buffer *ring, + struct i915_hw_context *new_context, + u32 hw_flags) +{ + int ret; + + /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB + * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value + * explicitly, so we rely on the value at ring init, stored in + * itlb_before_ctx_switch. + */ + if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) { + ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0); + if (ret) + return ret; + } + + ret = intel_ring_begin(ring, 6); + if (ret) + return ret; + + if (IS_GEN7(ring->dev)) + intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); + else + intel_ring_emit(ring, MI_NOOP); + + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_SET_CONTEXT); + intel_ring_emit(ring, new_context->obj->gtt_offset | + MI_MM_SPACE_GTT | + MI_SAVE_EXT_STATE_EN | + MI_RESTORE_EXT_STATE_EN | + hw_flags); + /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */ + intel_ring_emit(ring, MI_NOOP); + + if (IS_GEN7(ring->dev)) + intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); + else + intel_ring_emit(ring, MI_NOOP); + + intel_ring_advance(ring); + + return ret; +} + +static int do_switch(struct i915_hw_context *to) +{ + struct intel_ring_buffer *ring = to->ring; + struct drm_i915_gem_object *from_obj = ring->last_context_obj; + u32 hw_flags = 0; + int ret; + + BUG_ON(from_obj != NULL && from_obj->pin_count == 0); + + if (from_obj == to->obj) + return 0; + + ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false); + if (ret) + return ret; + + /* Clear this page out of any CPU caches for coherent swap-in/out. Note + * that thanks to write = false in this call and us not setting any gpu + * write domains when putting a context object onto the active list + * (when switching away from it), this won't block. + * XXX: We need a real interface to do this instead of trickery. */ + ret = i915_gem_object_set_to_gtt_domain(to->obj, false); + if (ret) { + i915_gem_object_unpin(to->obj); + return ret; + } + + if (!to->obj->has_global_gtt_mapping) + i915_gem_gtt_bind_object(to->obj, to->obj->cache_level); + + if (!to->is_initialized || is_default_context(to)) + hw_flags |= MI_RESTORE_INHIBIT; + else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */ + hw_flags |= MI_FORCE_RESTORE; + + ret = mi_set_context(ring, to, hw_flags); + if (ret) { + i915_gem_object_unpin(to->obj); + return ret; + } + + /* The backing object for the context is done after switching to the + * *next* context. Therefore we cannot retire the previous context until + * the next context has already started running. In fact, the below code + * is a bit suboptimal because the retiring can occur simply after the + * MI_SET_CONTEXT instead of when the next seqno has completed. + */ + if (from_obj != NULL) { + from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; + i915_gem_object_move_to_active(from_obj, ring); + /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the + * whole damn pipeline, we don't need to explicitly mark the + * object dirty. The only exception is that the context must be + * correct in case the object gets swapped out. Ideally we'd be + * able to defer doing this until we know the object would be + * swapped, but there is no way to do that yet. + */ + from_obj->dirty = 1; + BUG_ON(from_obj->ring != ring); + i915_gem_object_unpin(from_obj); + + drm_gem_object_unreference(&from_obj->base); + } + + drm_gem_object_reference(&to->obj->base); + ring->last_context_obj = to->obj; + to->is_initialized = true; + + return 0; +} + +/** + * i915_switch_context() - perform a GPU context switch. + * @ring: ring for which we'll execute the context switch + * @file_priv: file_priv associated with the context, may be NULL + * @id: context id number + * @seqno: sequence number by which the new context will be switched to + * @flags: + * + * The context life cycle is simple. The context refcount is incremented and + * decremented by 1 and create and destroy. If the context is in use by the GPU, + * it will have a refoucnt > 1. This allows us to destroy the context abstract + * object while letting the normal object tracking destroy the backing BO. + */ +int i915_switch_context(struct intel_ring_buffer *ring, + struct drm_file *file, + int to_id) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct i915_hw_context *to; + + if (dev_priv->hw_contexts_disabled) + return 0; + + if (ring != &dev_priv->ring[RCS]) + return 0; + + if (to_id == DEFAULT_CONTEXT_ID) { + to = ring->default_context; + } else { + if (file == NULL) + return -EINVAL; + + to = i915_gem_context_get(file->driver_priv, to_id); + if (to == NULL) + return -ENOENT; + } + + return do_switch(to); +} + +int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_context_create *args = data; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct i915_hw_context *ctx; + int ret; + + if (!(dev->driver->driver_features & DRIVER_GEM)) + return -ENODEV; + + if (dev_priv->hw_contexts_disabled) + return -ENODEV; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ctx = create_hw_context(dev, file_priv); + mutex_unlock(&dev->struct_mutex); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + args->ctx_id = ctx->id; + DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); + + return 0; +} + +int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_context_destroy *args = data; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct i915_hw_context *ctx; + int ret; + + if (!(dev->driver->driver_features & DRIVER_GEM)) + return -ENODEV; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ctx = i915_gem_context_get(file_priv, args->ctx_id); + if (!ctx) { + mutex_unlock(&dev->struct_mutex); + return -ENOENT; + } + + do_destroy(ctx); + + mutex_unlock(&dev->struct_mutex); + + DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c new file mode 100644 index 0000000..582e6a5 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -0,0 +1,187 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Keith Packard + * + */ + +#include +#include +#include "i915_drv.h" + +#if WATCH_LISTS +int +i915_verify_lists(struct drm_device *dev) +{ + static int warned; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + int err = 0; + + if (warned) + return 0; + + list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) { + if (obj->base.dev != dev || + !atomic_read(&obj->base.refcount.refcount)) { + DRM_ERROR("freed render active %p\n", obj); + err++; + break; + } else if (!obj->active || + (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) { + DRM_ERROR("invalid render active %p (a %d r %x)\n", + obj, + obj->active, + obj->base.read_domains); + err++; + } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) { + DRM_ERROR("invalid render active %p (w %x, gwl %d)\n", + obj, + obj->base.write_domain, + !list_empty(&obj->gpu_write_list)); + err++; + } + } + + list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) { + if (obj->base.dev != dev || + !atomic_read(&obj->base.refcount.refcount)) { + DRM_ERROR("freed flushing %p\n", obj); + err++; + break; + } else if (!obj->active || + (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 || + list_empty(&obj->gpu_write_list)) { + DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n", + obj, + obj->active, + obj->base.write_domain, + !list_empty(&obj->gpu_write_list)); + err++; + } + } + + list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) { + if (obj->base.dev != dev || + !atomic_read(&obj->base.refcount.refcount)) { + DRM_ERROR("freed gpu write %p\n", obj); + err++; + break; + } else if (!obj->active || + (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) { + DRM_ERROR("invalid gpu write %p (a %d w %x)\n", + obj, + obj->active, + obj->base.write_domain); + err++; + } + } + + list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) { + if (obj->base.dev != dev || + !atomic_read(&obj->base.refcount.refcount)) { + DRM_ERROR("freed inactive %p\n", obj); + err++; + break; + } else if (obj->pin_count || obj->active || + (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) { + DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n", + obj, + obj->pin_count, obj->active, + obj->base.write_domain); + err++; + } + } + + return warned = err; +} +#endif /* WATCH_INACTIVE */ + +#if WATCH_COHERENCY +void +i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) +{ + struct drm_device *dev = obj->base.dev; + int page; + uint32_t *gtt_mapping; + uint32_t *backing_map = NULL; + int bad_count = 0; + + DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", + __func__, obj, obj->gtt_offset, handle, + obj->size / 1024); + + gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset, + obj->base.size); + if (gtt_mapping == NULL) { + DRM_ERROR("failed to map GTT space\n"); + return; + } + + for (page = 0; page < obj->size / PAGE_SIZE; page++) { + int i; + + backing_map = kmap_atomic(obj->pages[page]); + + if (backing_map == NULL) { + DRM_ERROR("failed to map backing page\n"); + goto out; + } + + for (i = 0; i < PAGE_SIZE / 4; i++) { + uint32_t cpuval = backing_map[i]; + uint32_t gttval = readl(gtt_mapping + + page * 1024 + i); + + if (cpuval != gttval) { + DRM_INFO("incoherent CPU vs GPU at 0x%08x: " + "0x%08x vs 0x%08x\n", + (int)(obj->gtt_offset + + page * PAGE_SIZE + i * 4), + cpuval, gttval); + if (bad_count++ >= 8) { + DRM_INFO("...\n"); + goto out; + } + } + } + kunmap_atomic(backing_map); + backing_map = NULL; + } + + out: + if (backing_map != NULL) + kunmap_atomic(backing_map); + iounmap(gtt_mapping); + + /* give syslog time to catch up */ + msleep(1); + + /* Directly flush the object, since we just loaded values with the CPU + * from the backing pages and we don't want to disturb the cache + * management that we're trying to observe. + */ + + i915_gem_clflush_object(obj); +} +#endif diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c new file mode 100644 index 0000000..dc53a52 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -0,0 +1,308 @@ +/* + * Copyright 2012 Red Hat Inc + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Dave Airlie + */ +#include +#include "i915_drv.h" +#include + +static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction dir) +{ + struct drm_i915_gem_object *obj = attachment->dmabuf->priv; + struct sg_table *st; + struct scatterlist *src, *dst; + int ret, i; + + ret = i915_mutex_lock_interruptible(obj->base.dev); + if (ret) + return ERR_PTR(ret); + + ret = i915_gem_object_get_pages(obj); + if (ret) { + st = ERR_PTR(ret); + goto out; + } + + /* Copy sg so that we make an independent mapping */ + st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (st == NULL) { + st = ERR_PTR(-ENOMEM); + goto out; + } + + ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); + if (ret) { + kfree(st); + st = ERR_PTR(ret); + goto out; + } + + src = obj->pages->sgl; + dst = st->sgl; + for (i = 0; i < obj->pages->nents; i++) { + sg_set_page(dst, sg_page(src), src->length, 0); + dst = sg_next(dst); + src = sg_next(src); + } + + if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { + sg_free_table(st); + kfree(st); + st = ERR_PTR(-ENOMEM); + goto out; + } + + i915_gem_object_pin_pages(obj); + +out: + mutex_unlock(&obj->base.dev->struct_mutex); + return st; +} + +static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *sg, + enum dma_data_direction dir) +{ + dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); + sg_free_table(sg); + kfree(sg); +} + +static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) +{ + struct drm_i915_gem_object *obj = dma_buf->priv; + + if (obj->base.export_dma_buf == dma_buf) { + /* drop the reference on the export fd holds */ + obj->base.export_dma_buf = NULL; + drm_gem_object_unreference_unlocked(&obj->base); + } +} + +static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) +{ + struct drm_i915_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->base.dev; + struct sg_page_iter sg_iter; + struct page **pages; + int ret, i; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ERR_PTR(ret); + + if (obj->dma_buf_vmapping) { + obj->vmapping_count++; + goto out_unlock; + } + + ret = i915_gem_object_get_pages(obj); + if (ret) + goto error; + + ret = -ENOMEM; + + pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); + if (pages == NULL) + goto error; + + i = 0; + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) + pages[i++] = sg_page_iter_page(&sg_iter); + + obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); + drm_free_large(pages); + + if (!obj->dma_buf_vmapping) + goto error; + + obj->vmapping_count = 1; + i915_gem_object_pin_pages(obj); +out_unlock: + mutex_unlock(&dev->struct_mutex); + return obj->dma_buf_vmapping; + +error: + mutex_unlock(&dev->struct_mutex); + return ERR_PTR(ret); +} + +static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) +{ + struct drm_i915_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->base.dev; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return; + + if (--obj->vmapping_count == 0) { + vunmap(obj->dma_buf_vmapping); + obj->dma_buf_vmapping = NULL; + + i915_gem_object_unpin_pages(obj); + } + mutex_unlock(&dev->struct_mutex); +} + +static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) +{ + return NULL; +} + +static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr) +{ + +} +static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) +{ + return NULL; +} + +static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) +{ + +} + +static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) +{ + return -EINVAL; +} + +static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction) +{ + struct drm_i915_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->base.dev; + int ret; + bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ret = i915_gem_object_set_to_cpu_domain(obj, write); + mutex_unlock(&dev->struct_mutex); + return ret; +} + +static const struct dma_buf_ops i915_dmabuf_ops = { + .map_dma_buf = i915_gem_map_dma_buf, + .unmap_dma_buf = i915_gem_unmap_dma_buf, + .release = i915_gem_dmabuf_release, + .kmap = i915_gem_dmabuf_kmap, + .kmap_atomic = i915_gem_dmabuf_kmap_atomic, + .kunmap = i915_gem_dmabuf_kunmap, + .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic, + .mmap = i915_gem_dmabuf_mmap, + .vmap = i915_gem_dmabuf_vmap, + .vunmap = i915_gem_dmabuf_vunmap, + .begin_cpu_access = i915_gem_begin_cpu_access, +}; + +struct dma_buf *i915_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *gem_obj, int flags) +{ + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + + return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags); +} + +static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) +{ + struct sg_table *sg; + + sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sg)) + return PTR_ERR(sg); + + obj->pages = sg; + obj->has_dma_mapping = true; + return 0; +} + +static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj) +{ + dma_buf_unmap_attachment(obj->base.import_attach, + obj->pages, DMA_BIDIRECTIONAL); + obj->has_dma_mapping = false; +} + +static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { + .get_pages = i915_gem_object_get_pages_dmabuf, + .put_pages = i915_gem_object_put_pages_dmabuf, +}; + +struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct dma_buf_attachment *attach; + struct drm_i915_gem_object *obj; + int ret; + + /* is this one of own objects? */ + if (dma_buf->ops == &i915_dmabuf_ops) { + obj = dma_buf->priv; + /* is it from our device? */ + if (obj->base.dev == dev) { + /* + * Importing dmabuf exported from out own gem increases + * refcount on gem itself instead of f_count of dmabuf. + */ + drm_gem_object_reference(&obj->base); + return &obj->base; + } + } + + /* need to attach */ + attach = dma_buf_attach(dma_buf, dev->dev); + if (IS_ERR(attach)) + return ERR_CAST(attach); + + get_dma_buf(dma_buf); + + obj = i915_gem_object_alloc(dev); + if (obj == NULL) { + ret = -ENOMEM; + goto fail_detach; + } + + ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size); + if (ret) { + i915_gem_object_free(obj); + goto fail_detach; + } + + i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); + obj->base.import_attach = attach; + + return &obj->base; + +fail_detach: + dma_buf_detach(dma_buf, attach); + dma_buf_put(dma_buf); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c new file mode 100644 index 0000000..c86d5d9 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -0,0 +1,185 @@ +/* + * Copyright © 2008-2010 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * Chris Wilson + * + */ + +#include +#include "i915_drv.h" +#include +#include "i915_trace.h" + +static bool +mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) +{ + if (obj->pin_count) + return false; + + list_add(&obj->exec_list, unwind); + return drm_mm_scan_add_block(obj->gtt_space); +} + +int +i915_gem_evict_something(struct drm_device *dev, int min_size, + unsigned alignment, unsigned cache_level, + bool mappable, bool nonblocking) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct list_head eviction_list, unwind_list; + struct drm_i915_gem_object *obj; + int ret = 0; + + trace_i915_gem_evict(dev, min_size, alignment, mappable); + + /* + * The goal is to evict objects and amalgamate space in LRU order. + * The oldest idle objects reside on the inactive list, which is in + * retirement order. The next objects to retire are those on the (per + * ring) active list that do not have an outstanding flush. Once the + * hardware reports completion (the seqno is updated after the + * batchbuffer has been finished) the clean buffer objects would + * be retired to the inactive list. Any dirty objects would be added + * to the tail of the flushing list. So after processing the clean + * active objects we need to emit a MI_FLUSH to retire the flushing + * list, hence the retirement order of the flushing list is in + * advance of the dirty objects on the active lists. + * + * The retirement sequence is thus: + * 1. Inactive objects (already retired) + * 2. Clean active objects + * 3. Flushing list + * 4. Dirty active objects. + * + * On each list, the oldest objects lie at the HEAD with the freshest + * object on the TAIL. + */ + + INIT_LIST_HEAD(&unwind_list); + if (mappable) + drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, + min_size, alignment, cache_level, + 0, dev_priv->gtt.mappable_end); + else + drm_mm_init_scan(&dev_priv->mm.gtt_space, + min_size, alignment, cache_level); + + /* First see if there is a large enough contiguous idle region... */ + list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { + if (mark_free(obj, &unwind_list)) + goto found; + } + + if (nonblocking) + goto none; + + /* Now merge in the soon-to-be-expired objects... */ + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { + if (mark_free(obj, &unwind_list)) + goto found; + } + +none: + /* Nothing found, clean up and bail out! */ + while (!list_empty(&unwind_list)) { + obj = list_first_entry(&unwind_list, + struct drm_i915_gem_object, + exec_list); + + ret = drm_mm_scan_remove_block(obj->gtt_space); + BUG_ON(ret); + + list_del_init(&obj->exec_list); + } + + /* We expect the caller to unpin, evict all and try again, or give up. + * So calling i915_gem_evict_everything() is unnecessary. + */ + return -ENOSPC; + +found: + /* drm_mm doesn't allow any other other operations while + * scanning, therefore store to be evicted objects on a + * temporary list. */ + INIT_LIST_HEAD(&eviction_list); + while (!list_empty(&unwind_list)) { + obj = list_first_entry(&unwind_list, + struct drm_i915_gem_object, + exec_list); + if (drm_mm_scan_remove_block(obj->gtt_space)) { + list_move(&obj->exec_list, &eviction_list); + drm_gem_object_reference(&obj->base); + continue; + } + list_del_init(&obj->exec_list); + } + + /* Unbinding will emit any required flushes */ + while (!list_empty(&eviction_list)) { + obj = list_first_entry(&eviction_list, + struct drm_i915_gem_object, + exec_list); + if (ret == 0) + ret = i915_gem_object_unbind(obj); + + list_del_init(&obj->exec_list); + drm_gem_object_unreference(&obj->base); + } + + return ret; +} + +int +i915_gem_evict_everything(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj, *next; + bool lists_empty; + int ret; + + lists_empty = (list_empty(&dev_priv->mm.inactive_list) && + list_empty(&dev_priv->mm.active_list)); + if (lists_empty) + return -ENOSPC; + + trace_i915_gem_evict_everything(dev); + + /* The gpu_idle will flush everything in the write domain to the + * active list. Then we must move everything off the active list + * with retire requests. + */ + ret = i915_gpu_idle(dev); + if (ret) + return ret; + + i915_gem_retire_requests(dev); + + /* Having flushed everything, unbind() should never raise an error */ + list_for_each_entry_safe(obj, next, + &dev_priv->mm.inactive_list, mm_list) + if (obj->pin_count == 0) + WARN_ON(i915_gem_object_unbind(obj)); + + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c new file mode 100644 index 0000000..117ce38 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -0,0 +1,1224 @@ +/* + * Copyright © 2008,2010 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * Chris Wilson + * + */ + +#include +#include +#include "i915_drv.h" +#include "i915_trace.h" +#include "intel_drv.h" +#include + +struct eb_objects { + struct list_head objects; + int and; + union { + struct drm_i915_gem_object *lut[0]; + struct hlist_head buckets[0]; + }; +}; + +static struct eb_objects * +eb_create(struct drm_i915_gem_execbuffer2 *args) +{ + struct eb_objects *eb = NULL; + + if (args->flags & I915_EXEC_HANDLE_LUT) { + int size = args->buffer_count; + size *= sizeof(struct drm_i915_gem_object *); + size += sizeof(struct eb_objects); + eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); + } + + if (eb == NULL) { + int size = args->buffer_count; + int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; + BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); + while (count > 2*size) + count >>= 1; + eb = kzalloc(count*sizeof(struct hlist_head) + + sizeof(struct eb_objects), + GFP_TEMPORARY); + if (eb == NULL) + return eb; + + eb->and = count - 1; + } else + eb->and = -args->buffer_count; + + INIT_LIST_HEAD(&eb->objects); + return eb; +} + +static void +eb_reset(struct eb_objects *eb) +{ + if (eb->and >= 0) + memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); +} + +static int +eb_lookup_objects(struct eb_objects *eb, + struct drm_i915_gem_exec_object2 *exec, + const struct drm_i915_gem_execbuffer2 *args, + struct drm_file *file) +{ + int i; + + spin_lock(&file->table_lock); + for (i = 0; i < args->buffer_count; i++) { + struct drm_i915_gem_object *obj; + + obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle)); + if (obj == NULL) { + spin_unlock(&file->table_lock); + DRM_DEBUG("Invalid object handle %d at index %d\n", + exec[i].handle, i); + return -ENOENT; + } + + if (!list_empty(&obj->exec_list)) { + spin_unlock(&file->table_lock); + DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", + obj, exec[i].handle, i); + return -EINVAL; + } + + drm_gem_object_reference(&obj->base); + list_add_tail(&obj->exec_list, &eb->objects); + + obj->exec_entry = &exec[i]; + if (eb->and < 0) { + eb->lut[i] = obj; + } else { + uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle; + obj->exec_handle = handle; + hlist_add_head(&obj->exec_node, + &eb->buckets[handle & eb->and]); + } + } + spin_unlock(&file->table_lock); + + return 0; +} + +static struct drm_i915_gem_object * +eb_get_object(struct eb_objects *eb, unsigned long handle) +{ + if (eb->and < 0) { + if (handle >= -eb->and) + return NULL; + return eb->lut[handle]; + } else { + struct hlist_head *head; + struct hlist_node *node; + + head = &eb->buckets[handle & eb->and]; + hlist_for_each(node, head) { + struct drm_i915_gem_object *obj; + + obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); + if (obj->exec_handle == handle) + return obj; + } + return NULL; + } +} + +static void +eb_destroy(struct eb_objects *eb) +{ + while (!list_empty(&eb->objects)) { + struct drm_i915_gem_object *obj; + + obj = list_first_entry(&eb->objects, + struct drm_i915_gem_object, + exec_list); + list_del_init(&obj->exec_list); + drm_gem_object_unreference(&obj->base); + } + kfree(eb); +} + +static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) +{ + return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || + !obj->map_and_fenceable || + obj->cache_level != I915_CACHE_NONE); +} + +static int +i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, + struct eb_objects *eb, + struct drm_i915_gem_relocation_entry *reloc) +{ + struct drm_device *dev = obj->base.dev; + struct drm_gem_object *target_obj; + struct drm_i915_gem_object *target_i915_obj; + uint32_t target_offset; + int ret = -EINVAL; + + /* we've already hold a reference to all valid objects */ + target_obj = &eb_get_object(eb, reloc->target_handle)->base; + if (unlikely(target_obj == NULL)) + return -ENOENT; + + target_i915_obj = to_intel_bo(target_obj); + target_offset = target_i915_obj->gtt_offset; + + /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and + * pipe_control writes because the gpu doesn't properly redirect them + * through the ppgtt for non_secure batchbuffers. */ + if (unlikely(IS_GEN6(dev) && + reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && + !target_i915_obj->has_global_gtt_mapping)) { + i915_gem_gtt_bind_object(target_i915_obj, + target_i915_obj->cache_level); + } + + /* Validate that the target is in a valid r/w GPU domain */ + if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { + DRM_DEBUG("reloc with multiple write domains: " + "obj %p target %d offset %d " + "read %08x write %08x", + obj, reloc->target_handle, + (int) reloc->offset, + reloc->read_domains, + reloc->write_domain); + return ret; + } + if (unlikely((reloc->write_domain | reloc->read_domains) + & ~I915_GEM_GPU_DOMAINS)) { + DRM_DEBUG("reloc with read/write non-GPU domains: " + "obj %p target %d offset %d " + "read %08x write %08x", + obj, reloc->target_handle, + (int) reloc->offset, + reloc->read_domains, + reloc->write_domain); + return ret; + } + + target_obj->pending_read_domains |= reloc->read_domains; + target_obj->pending_write_domain |= reloc->write_domain; + + /* If the relocation already has the right value in it, no + * more work needs to be done. + */ + if (target_offset == reloc->presumed_offset) + return 0; + + /* Check that the relocation address is valid... */ + if (unlikely(reloc->offset > obj->base.size - 4)) { + DRM_DEBUG("Relocation beyond object bounds: " + "obj %p target %d offset %d size %d.\n", + obj, reloc->target_handle, + (int) reloc->offset, + (int) obj->base.size); + return ret; + } + if (unlikely(reloc->offset & 3)) { + DRM_DEBUG("Relocation not 4-byte aligned: " + "obj %p target %d offset %d.\n", + obj, reloc->target_handle, + (int) reloc->offset); + return ret; + } + + /* We can't wait for rendering with pagefaults disabled */ + if (obj->active && in_atomic()) + return -EFAULT; + + reloc->delta += target_offset; + if (use_cpu_reloc(obj)) { + uint32_t page_offset = reloc->offset & ~PAGE_MASK; + char *vaddr; + + ret = i915_gem_object_set_to_cpu_domain(obj, 1); + if (ret) + return ret; + + vaddr = kmap_atomic(i915_gem_object_get_page(obj, + reloc->offset >> PAGE_SHIFT)); + *(uint32_t *)(vaddr + page_offset) = reloc->delta; + kunmap_atomic(vaddr); + } else { + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t __iomem *reloc_entry; + void __iomem *reloc_page; + + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + return ret; + + ret = i915_gem_object_put_fence(obj); + if (ret) + return ret; + + /* Map the page containing the relocation we're going to perform. */ + reloc->offset += obj->gtt_offset; + reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, + reloc->offset & PAGE_MASK); + reloc_entry = (uint32_t __iomem *) + (reloc_page + (reloc->offset & ~PAGE_MASK)); + iowrite32(reloc->delta, reloc_entry); + io_mapping_unmap_atomic(reloc_page); + } + + /* and update the user's relocation entry */ + reloc->presumed_offset = target_offset; + + return 0; +} + +static int +i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, + struct eb_objects *eb) +{ +#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) + struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)]; + struct drm_i915_gem_relocation_entry __user *user_relocs; + struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; + int remain, ret; + + user_relocs = to_user_ptr(entry->relocs_ptr); + + remain = entry->relocation_count; + while (remain) { + struct drm_i915_gem_relocation_entry *r = stack_reloc; + int count = remain; + if (count > ARRAY_SIZE(stack_reloc)) + count = ARRAY_SIZE(stack_reloc); + remain -= count; + + if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) + return -EFAULT; + + do { + u64 offset = r->presumed_offset; + + ret = i915_gem_execbuffer_relocate_entry(obj, eb, r); + if (ret) + return ret; + + if (r->presumed_offset != offset && + __copy_to_user_inatomic(&user_relocs->presumed_offset, + &r->presumed_offset, + sizeof(r->presumed_offset))) { + return -EFAULT; + } + + user_relocs++; + r++; + } while (--count); + } + + return 0; +#undef N_RELOC +} + +static int +i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, + struct eb_objects *eb, + struct drm_i915_gem_relocation_entry *relocs) +{ + const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; + int i, ret; + + for (i = 0; i < entry->relocation_count; i++) { + ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]); + if (ret) + return ret; + } + + return 0; +} + +static int +i915_gem_execbuffer_relocate(struct eb_objects *eb) +{ + struct drm_i915_gem_object *obj; + int ret = 0; + + /* This is the fast path and we cannot handle a pagefault whilst + * holding the struct mutex lest the user pass in the relocations + * contained within a mmaped bo. For in such a case we, the page + * fault handler would call i915_gem_fault() and we would try to + * acquire the struct mutex again. Obviously this is bad and so + * lockdep complains vehemently. + */ + pagefault_disable(); + list_for_each_entry(obj, &eb->objects, exec_list) { + ret = i915_gem_execbuffer_relocate_object(obj, eb); + if (ret) + break; + } + pagefault_enable(); + + return ret; +} + +#define __EXEC_OBJECT_HAS_PIN (1<<31) +#define __EXEC_OBJECT_HAS_FENCE (1<<30) + +static int +need_reloc_mappable(struct drm_i915_gem_object *obj) +{ + struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; + return entry->relocation_count && !use_cpu_reloc(obj); +} + +static int +i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *ring, + bool *need_reloc) +{ + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; + bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; + bool need_fence, need_mappable; + int ret; + + need_fence = + has_fenced_gpu_access && + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; + need_mappable = need_fence || need_reloc_mappable(obj); + + ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false); + if (ret) + return ret; + + entry->flags |= __EXEC_OBJECT_HAS_PIN; + + if (has_fenced_gpu_access) { + if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { + ret = i915_gem_object_get_fence(obj); + if (ret) + return ret; + + if (i915_gem_object_pin_fence(obj)) + entry->flags |= __EXEC_OBJECT_HAS_FENCE; + + obj->pending_fenced_gpu_access = true; + } + } + + /* Ensure ppgtt mapping exists if needed */ + if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { + i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, + obj, obj->cache_level); + + obj->has_aliasing_ppgtt_mapping = 1; + } + + if (entry->offset != obj->gtt_offset) { + entry->offset = obj->gtt_offset; + *need_reloc = true; + } + + if (entry->flags & EXEC_OBJECT_WRITE) { + obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER; + obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; + } + + if (entry->flags & EXEC_OBJECT_NEEDS_GTT && + !obj->has_global_gtt_mapping) + i915_gem_gtt_bind_object(obj, obj->cache_level); + + return 0; +} + +static void +i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) +{ + struct drm_i915_gem_exec_object2 *entry; + + if (!obj->gtt_space) + return; + + entry = obj->exec_entry; + + if (entry->flags & __EXEC_OBJECT_HAS_FENCE) + i915_gem_object_unpin_fence(obj); + + if (entry->flags & __EXEC_OBJECT_HAS_PIN) + i915_gem_object_unpin(obj); + + entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); +} + +static int +i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, + struct list_head *objects, + bool *need_relocs) +{ + struct drm_i915_gem_object *obj; + struct list_head ordered_objects; + bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; + int retry; + + INIT_LIST_HEAD(&ordered_objects); + while (!list_empty(objects)) { + struct drm_i915_gem_exec_object2 *entry; + bool need_fence, need_mappable; + + obj = list_first_entry(objects, + struct drm_i915_gem_object, + exec_list); + entry = obj->exec_entry; + + need_fence = + has_fenced_gpu_access && + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; + need_mappable = need_fence || need_reloc_mappable(obj); + + if (need_mappable) + list_move(&obj->exec_list, &ordered_objects); + else + list_move_tail(&obj->exec_list, &ordered_objects); + + obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; + obj->base.pending_write_domain = 0; + obj->pending_fenced_gpu_access = false; + } + list_splice(&ordered_objects, objects); + + /* Attempt to pin all of the buffers into the GTT. + * This is done in 3 phases: + * + * 1a. Unbind all objects that do not match the GTT constraints for + * the execbuffer (fenceable, mappable, alignment etc). + * 1b. Increment pin count for already bound objects. + * 2. Bind new objects. + * 3. Decrement pin count. + * + * This avoid unnecessary unbinding of later objects in order to make + * room for the earlier objects *unless* we need to defragment. + */ + retry = 0; + do { + int ret = 0; + + /* Unbind any ill-fitting objects or pin. */ + list_for_each_entry(obj, objects, exec_list) { + struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; + bool need_fence, need_mappable; + + if (!obj->gtt_space) + continue; + + need_fence = + has_fenced_gpu_access && + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; + need_mappable = need_fence || need_reloc_mappable(obj); + + if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || + (need_mappable && !obj->map_and_fenceable)) + ret = i915_gem_object_unbind(obj); + else + ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); + if (ret) + goto err; + } + + /* Bind fresh objects */ + list_for_each_entry(obj, objects, exec_list) { + if (obj->gtt_space) + continue; + + ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); + if (ret) + goto err; + } + +err: /* Decrement pin count for bound objects */ + list_for_each_entry(obj, objects, exec_list) + i915_gem_execbuffer_unreserve_object(obj); + + if (ret != -ENOSPC || retry++) + return ret; + + ret = i915_gem_evict_everything(ring->dev); + if (ret) + return ret; + } while (1); +} + +static int +i915_gem_execbuffer_relocate_slow(struct drm_device *dev, + struct drm_i915_gem_execbuffer2 *args, + struct drm_file *file, + struct intel_ring_buffer *ring, + struct eb_objects *eb, + struct drm_i915_gem_exec_object2 *exec) +{ + struct drm_i915_gem_relocation_entry *reloc; + struct drm_i915_gem_object *obj; + bool need_relocs; + int *reloc_offset; + int i, total, ret; + int count = args->buffer_count; + + /* We may process another execbuffer during the unlock... */ + while (!list_empty(&eb->objects)) { + obj = list_first_entry(&eb->objects, + struct drm_i915_gem_object, + exec_list); + list_del_init(&obj->exec_list); + drm_gem_object_unreference(&obj->base); + } + + mutex_unlock(&dev->struct_mutex); + + total = 0; + for (i = 0; i < count; i++) + total += exec[i].relocation_count; + + reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset)); + reloc = drm_malloc_ab(total, sizeof(*reloc)); + if (reloc == NULL || reloc_offset == NULL) { + drm_free_large(reloc); + drm_free_large(reloc_offset); + mutex_lock(&dev->struct_mutex); + return -ENOMEM; + } + + total = 0; + for (i = 0; i < count; i++) { + struct drm_i915_gem_relocation_entry __user *user_relocs; + u64 invalid_offset = (u64)-1; + int j; + + user_relocs = to_user_ptr(exec[i].relocs_ptr); + + if (copy_from_user(reloc+total, user_relocs, + exec[i].relocation_count * sizeof(*reloc))) { + ret = -EFAULT; + mutex_lock(&dev->struct_mutex); + goto err; + } + + /* As we do not update the known relocation offsets after + * relocating (due to the complexities in lock handling), + * we need to mark them as invalid now so that we force the + * relocation processing next time. Just in case the target + * object is evicted and then rebound into its old + * presumed_offset before the next execbuffer - if that + * happened we would make the mistake of assuming that the + * relocations were valid. + */ + for (j = 0; j < exec[i].relocation_count; j++) { + if (copy_to_user(&user_relocs[j].presumed_offset, + &invalid_offset, + sizeof(invalid_offset))) { + ret = -EFAULT; + mutex_lock(&dev->struct_mutex); + goto err; + } + } + + reloc_offset[i] = total; + total += exec[i].relocation_count; + } + + ret = i915_mutex_lock_interruptible(dev); + if (ret) { + mutex_lock(&dev->struct_mutex); + goto err; + } + + /* reacquire the objects */ + eb_reset(eb); + ret = eb_lookup_objects(eb, exec, args, file); + if (ret) + goto err; + + need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; + ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs); + if (ret) + goto err; + + list_for_each_entry(obj, &eb->objects, exec_list) { + int offset = obj->exec_entry - exec; + ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, + reloc + reloc_offset[offset]); + if (ret) + goto err; + } + + /* Leave the user relocations as are, this is the painfully slow path, + * and we want to avoid the complication of dropping the lock whilst + * having buffers reserved in the aperture and so causing spurious + * ENOSPC for random operations. + */ + +err: + drm_free_large(reloc); + drm_free_large(reloc_offset); + return ret; +} + +static int +i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, + struct list_head *objects) +{ + struct drm_i915_gem_object *obj; + uint32_t flush_domains = 0; + int ret; + + list_for_each_entry(obj, objects, exec_list) { + ret = i915_gem_object_sync(obj, ring); + if (ret) + return ret; + + if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) + i915_gem_clflush_object(obj); + + flush_domains |= obj->base.write_domain; + } + + if (flush_domains & I915_GEM_DOMAIN_CPU) + i915_gem_chipset_flush(ring->dev); + + if (flush_domains & I915_GEM_DOMAIN_GTT) + wmb(); + + /* Unconditionally invalidate gpu caches and ensure that we do flush + * any residual writes from the previous batch. + */ + return intel_ring_invalidate_all_caches(ring); +} + +static bool +i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) +{ + if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS) + return false; + + return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; +} + +static int +validate_exec_list(struct drm_i915_gem_exec_object2 *exec, + int count) +{ + int i; + int relocs_total = 0; + int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); + + for (i = 0; i < count; i++) { + char __user *ptr = to_user_ptr(exec[i].relocs_ptr); + int length; /* limited by fault_in_pages_readable() */ + + if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) + return -EINVAL; + + /* First check for malicious input causing overflow in + * the worst case where we need to allocate the entire + * relocation tree as a single array. + */ + if (exec[i].relocation_count > relocs_max - relocs_total) + return -EINVAL; + relocs_total += exec[i].relocation_count; + + length = exec[i].relocation_count * + sizeof(struct drm_i915_gem_relocation_entry); + /* + * We must check that the entire relocation array is safe + * to read, but since we may need to update the presumed + * offsets during execution, check for full write access. + */ + if (!access_ok(VERIFY_WRITE, ptr, length)) + return -EFAULT; + + if (fault_in_multipages_readable(ptr, length)) + return -EFAULT; + } + + return 0; +} + +static void +i915_gem_execbuffer_move_to_active(struct list_head *objects, + struct intel_ring_buffer *ring) +{ + struct drm_i915_gem_object *obj; + + list_for_each_entry(obj, objects, exec_list) { + u32 old_read = obj->base.read_domains; + u32 old_write = obj->base.write_domain; + + obj->base.write_domain = obj->base.pending_write_domain; + if (obj->base.write_domain == 0) + obj->base.pending_read_domains |= obj->base.read_domains; + obj->base.read_domains = obj->base.pending_read_domains; + obj->fenced_gpu_access = obj->pending_fenced_gpu_access; + + i915_gem_object_move_to_active(obj, ring); + if (obj->base.write_domain) { + obj->dirty = 1; + obj->last_write_seqno = intel_ring_get_seqno(ring); + if (obj->pin_count) /* check for potential scanout */ + intel_mark_fb_busy(obj); + } + + trace_i915_gem_object_change_domain(obj, old_read, old_write); + } +} + +static void +i915_gem_execbuffer_retire_commands(struct drm_device *dev, + struct drm_file *file, + struct intel_ring_buffer *ring) +{ + /* Unconditionally force add_request to emit a full flush. */ + ring->gpu_caches_dirty = true; + + /* Add a breadcrumb for the completion of the batch buffer */ + (void)i915_add_request(ring, file, NULL); +} + +static int +i915_reset_gen7_sol_offsets(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int ret, i; + + if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) + return 0; + + ret = intel_ring_begin(ring, 4 * 3); + if (ret) + return ret; + + for (i = 0; i < 4; i++) { + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i)); + intel_ring_emit(ring, 0); + } + + intel_ring_advance(ring); + + return 0; +} + +static int +i915_gem_do_execbuffer(struct drm_device *dev, void *data, + struct drm_file *file, + struct drm_i915_gem_execbuffer2 *args, + struct drm_i915_gem_exec_object2 *exec) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct eb_objects *eb; + struct drm_i915_gem_object *batch_obj; + struct drm_clip_rect *cliprects = NULL; + struct intel_ring_buffer *ring; + u32 ctx_id = i915_execbuffer2_get_context_id(*args); + u32 exec_start, exec_len; + u32 mask, flags; + int ret, mode, i; + bool need_relocs; + + if (!i915_gem_check_execbuffer(args)) + return -EINVAL; + + ret = validate_exec_list(exec, args->buffer_count); + if (ret) + return ret; + + flags = 0; + if (args->flags & I915_EXEC_SECURE) { + if (!file->is_master || !capable(CAP_SYS_ADMIN)) + return -EPERM; + + flags |= I915_DISPATCH_SECURE; + } + if (args->flags & I915_EXEC_IS_PINNED) + flags |= I915_DISPATCH_PINNED; + + switch (args->flags & I915_EXEC_RING_MASK) { + case I915_EXEC_DEFAULT: + case I915_EXEC_RENDER: + ring = &dev_priv->ring[RCS]; + break; + case I915_EXEC_BSD: + ring = &dev_priv->ring[VCS]; + if (ctx_id != 0) { + DRM_DEBUG("Ring %s doesn't support contexts\n", + ring->name); + return -EPERM; + } + break; + case I915_EXEC_BLT: + ring = &dev_priv->ring[BCS]; + if (ctx_id != 0) { + DRM_DEBUG("Ring %s doesn't support contexts\n", + ring->name); + return -EPERM; + } + break; + default: + DRM_DEBUG("execbuf with unknown ring: %d\n", + (int)(args->flags & I915_EXEC_RING_MASK)); + return -EINVAL; + } + if (!intel_ring_initialized(ring)) { + DRM_DEBUG("execbuf with invalid ring: %d\n", + (int)(args->flags & I915_EXEC_RING_MASK)); + return -EINVAL; + } + + mode = args->flags & I915_EXEC_CONSTANTS_MASK; + mask = I915_EXEC_CONSTANTS_MASK; + switch (mode) { + case I915_EXEC_CONSTANTS_REL_GENERAL: + case I915_EXEC_CONSTANTS_ABSOLUTE: + case I915_EXEC_CONSTANTS_REL_SURFACE: + if (ring == &dev_priv->ring[RCS] && + mode != dev_priv->relative_constants_mode) { + if (INTEL_INFO(dev)->gen < 4) + return -EINVAL; + + if (INTEL_INFO(dev)->gen > 5 && + mode == I915_EXEC_CONSTANTS_REL_SURFACE) + return -EINVAL; + + /* The HW changed the meaning on this bit on gen6 */ + if (INTEL_INFO(dev)->gen >= 6) + mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; + } + break; + default: + DRM_DEBUG("execbuf with unknown constants: %d\n", mode); + return -EINVAL; + } + + if (args->buffer_count < 1) { + DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); + return -EINVAL; + } + + if (args->num_cliprects != 0) { + if (ring != &dev_priv->ring[RCS]) { + DRM_DEBUG("clip rectangles are only valid with the render ring\n"); + return -EINVAL; + } + + if (INTEL_INFO(dev)->gen >= 5) { + DRM_DEBUG("clip rectangles are only valid on pre-gen5\n"); + return -EINVAL; + } + + if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { + DRM_DEBUG("execbuf with %u cliprects\n", + args->num_cliprects); + return -EINVAL; + } + + cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), + GFP_KERNEL); + if (cliprects == NULL) { + ret = -ENOMEM; + goto pre_mutex_err; + } + + if (copy_from_user(cliprects, + to_user_ptr(args->cliprects_ptr), + sizeof(*cliprects)*args->num_cliprects)) { + ret = -EFAULT; + goto pre_mutex_err; + } + } + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto pre_mutex_err; + + if (dev_priv->mm.suspended) { + mutex_unlock(&dev->struct_mutex); + ret = -EBUSY; + goto pre_mutex_err; + } + + eb = eb_create(args); + if (eb == NULL) { + mutex_unlock(&dev->struct_mutex); + ret = -ENOMEM; + goto pre_mutex_err; + } + + /* Look up object handles */ + ret = eb_lookup_objects(eb, exec, args, file); + if (ret) + goto err; + + /* take note of the batch buffer before we might reorder the lists */ + batch_obj = list_entry(eb->objects.prev, + struct drm_i915_gem_object, + exec_list); + + /* Move the objects en-masse into the GTT, evicting if necessary. */ + need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; + ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs); + if (ret) + goto err; + + /* The objects are in their final locations, apply the relocations. */ + if (need_relocs) + ret = i915_gem_execbuffer_relocate(eb); + if (ret) { + if (ret == -EFAULT) { + ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, + eb, exec); + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + } + if (ret) + goto err; + } + + /* Set the pending read domains for the batch buffer to COMMAND */ + if (batch_obj->base.pending_write_domain) { + DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); + ret = -EINVAL; + goto err; + } + batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; + + /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure + * batch" bit. Hence we need to pin secure batches into the global gtt. + * hsw should have this fixed, but let's be paranoid and do it + * unconditionally for now. */ + if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) + i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); + + ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects); + if (ret) + goto err; + + ret = i915_switch_context(ring, file, ctx_id); + if (ret) + goto err; + + if (ring == &dev_priv->ring[RCS] && + mode != dev_priv->relative_constants_mode) { + ret = intel_ring_begin(ring, 4); + if (ret) + goto err; + + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, INSTPM); + intel_ring_emit(ring, mask << 16 | mode); + intel_ring_advance(ring); + + dev_priv->relative_constants_mode = mode; + } + + if (args->flags & I915_EXEC_GEN7_SOL_RESET) { + ret = i915_reset_gen7_sol_offsets(dev, ring); + if (ret) + goto err; + } + + exec_start = batch_obj->gtt_offset + args->batch_start_offset; + exec_len = args->batch_len; + if (cliprects) { + for (i = 0; i < args->num_cliprects; i++) { + ret = i915_emit_box(dev, &cliprects[i], + args->DR1, args->DR4); + if (ret) + goto err; + + ret = ring->dispatch_execbuffer(ring, + exec_start, exec_len, + flags); + if (ret) + goto err; + } + } else { + ret = ring->dispatch_execbuffer(ring, + exec_start, exec_len, + flags); + if (ret) + goto err; + } + + trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); + + i915_gem_execbuffer_move_to_active(&eb->objects, ring); + i915_gem_execbuffer_retire_commands(dev, file, ring); + +err: + eb_destroy(eb); + + mutex_unlock(&dev->struct_mutex); + +pre_mutex_err: + kfree(cliprects); + return ret; +} + +/* + * Legacy execbuffer just creates an exec2 list from the original exec object + * list array and passes it to the real function. + */ +int +i915_gem_execbuffer(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_execbuffer *args = data; + struct drm_i915_gem_execbuffer2 exec2; + struct drm_i915_gem_exec_object *exec_list = NULL; + struct drm_i915_gem_exec_object2 *exec2_list = NULL; + int ret, i; + + if (args->buffer_count < 1) { + DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); + return -EINVAL; + } + + /* Copy in the exec list from userland */ + exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); + exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); + if (exec_list == NULL || exec2_list == NULL) { + DRM_DEBUG("Failed to allocate exec list for %d buffers\n", + args->buffer_count); + drm_free_large(exec_list); + drm_free_large(exec2_list); + return -ENOMEM; + } + ret = copy_from_user(exec_list, + to_user_ptr(args->buffers_ptr), + sizeof(*exec_list) * args->buffer_count); + if (ret != 0) { + DRM_DEBUG("copy %d exec entries failed %d\n", + args->buffer_count, ret); + drm_free_large(exec_list); + drm_free_large(exec2_list); + return -EFAULT; + } + + for (i = 0; i < args->buffer_count; i++) { + exec2_list[i].handle = exec_list[i].handle; + exec2_list[i].relocation_count = exec_list[i].relocation_count; + exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; + exec2_list[i].alignment = exec_list[i].alignment; + exec2_list[i].offset = exec_list[i].offset; + if (INTEL_INFO(dev)->gen < 4) + exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; + else + exec2_list[i].flags = 0; + } + + exec2.buffers_ptr = args->buffers_ptr; + exec2.buffer_count = args->buffer_count; + exec2.batch_start_offset = args->batch_start_offset; + exec2.batch_len = args->batch_len; + exec2.DR1 = args->DR1; + exec2.DR4 = args->DR4; + exec2.num_cliprects = args->num_cliprects; + exec2.cliprects_ptr = args->cliprects_ptr; + exec2.flags = I915_EXEC_RENDER; + i915_execbuffer2_set_context_id(exec2, 0); + + ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); + if (!ret) { + /* Copy the new buffer offsets back to the user's exec list. */ + for (i = 0; i < args->buffer_count; i++) + exec_list[i].offset = exec2_list[i].offset; + /* ... and back out to userspace */ + ret = copy_to_user(to_user_ptr(args->buffers_ptr), + exec_list, + sizeof(*exec_list) * args->buffer_count); + if (ret) { + ret = -EFAULT; + DRM_DEBUG("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + } + } + + drm_free_large(exec_list); + drm_free_large(exec2_list); + return ret; +} + +int +i915_gem_execbuffer2(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_execbuffer2 *args = data; + struct drm_i915_gem_exec_object2 *exec2_list = NULL; + int ret; + + if (args->buffer_count < 1 || + args->buffer_count > UINT_MAX / sizeof(*exec2_list)) { + DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); + return -EINVAL; + } + + exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, + GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); + if (exec2_list == NULL) + exec2_list = drm_malloc_ab(sizeof(*exec2_list), + args->buffer_count); + if (exec2_list == NULL) { + DRM_DEBUG("Failed to allocate exec list for %d buffers\n", + args->buffer_count); + return -ENOMEM; + } + ret = copy_from_user(exec2_list, + to_user_ptr(args->buffers_ptr), + sizeof(*exec2_list) * args->buffer_count); + if (ret != 0) { + DRM_DEBUG("copy %d exec entries failed %d\n", + args->buffer_count, ret); + drm_free_large(exec2_list); + return -EFAULT; + } + + ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); + if (!ret) { + /* Copy the new buffer offsets back to the user's exec list. */ + ret = copy_to_user(to_user_ptr(args->buffers_ptr), + exec2_list, + sizeof(*exec2_list) * args->buffer_count); + if (ret) { + ret = -EFAULT; + DRM_DEBUG("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + } + } + + drm_free_large(exec2_list); + return ret; +} diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c new file mode 100644 index 0000000..bdb0d77 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -0,0 +1,830 @@ +/* + * Copyright © 2010 Daniel Vetter + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include +#include +#include "i915_drv.h" +#include "i915_trace.h" +#include "intel_drv.h" + +typedef uint32_t gen6_gtt_pte_t; + +/* PPGTT stuff */ +#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) + +#define GEN6_PDE_VALID (1 << 0) +/* gen6+ has bit 11-4 for physical addr bit 39-32 */ +#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) + +#define GEN6_PTE_VALID (1 << 0) +#define GEN6_PTE_UNCACHED (1 << 1) +#define HSW_PTE_UNCACHED (0) +#define GEN6_PTE_CACHE_LLC (2 << 1) +#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) +#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) + +static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev, + dma_addr_t addr, + enum i915_cache_level level) +{ + gen6_gtt_pte_t pte = GEN6_PTE_VALID; + pte |= GEN6_PTE_ADDR_ENCODE(addr); + + switch (level) { + case I915_CACHE_LLC_MLC: + /* Haswell doesn't set L3 this way */ + if (IS_HASWELL(dev)) + pte |= GEN6_PTE_CACHE_LLC; + else + pte |= GEN6_PTE_CACHE_LLC_MLC; + break; + case I915_CACHE_LLC: + pte |= GEN6_PTE_CACHE_LLC; + break; + case I915_CACHE_NONE: + if (IS_HASWELL(dev)) + pte |= HSW_PTE_UNCACHED; + else + pte |= GEN6_PTE_UNCACHED; + break; + default: + BUG(); + } + + return pte; +} + +static int gen6_ppgtt_enable(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + uint32_t pd_offset; + struct intel_ring_buffer *ring; + struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; + gen6_gtt_pte_t __iomem *pd_addr; + uint32_t pd_entry; + int i; + + pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm + + ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); + for (i = 0; i < ppgtt->num_pd_entries; i++) { + dma_addr_t pt_addr; + + pt_addr = ppgtt->pt_dma_addr[i]; + pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); + pd_entry |= GEN6_PDE_VALID; + + writel(pd_entry, pd_addr + i); + } + readl(pd_addr); + + pd_offset = ppgtt->pd_offset; + pd_offset /= 64; /* in cachelines, */ + pd_offset <<= 16; + + if (INTEL_INFO(dev)->gen == 6) { + uint32_t ecochk, gab_ctl, ecobits; + + ecobits = I915_READ(GAC_ECO_BITS); + I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | + ECOBITS_PPGTT_CACHE64B); + + gab_ctl = I915_READ(GAB_CTL); + I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); + + ecochk = I915_READ(GAM_ECOCHK); + I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | + ECOCHK_PPGTT_CACHE64B); + I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); + } else if (INTEL_INFO(dev)->gen >= 7) { + uint32_t ecochk, ecobits; + + ecobits = I915_READ(GAC_ECO_BITS); + I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); + + ecochk = I915_READ(GAM_ECOCHK); + if (IS_HASWELL(dev)) { + ecochk |= ECOCHK_PPGTT_WB_HSW; + } else { + ecochk |= ECOCHK_PPGTT_LLC_IVB; + ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; + } + I915_WRITE(GAM_ECOCHK, ecochk); + /* GFX_MODE is per-ring on gen7+ */ + } + + for_each_ring(ring, dev_priv, i) { + if (INTEL_INFO(dev)->gen >= 7) + I915_WRITE(RING_MODE_GEN7(ring), + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); + + I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); + I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); + } + return 0; +} + +/* PPGTT support for Sandybdrige/Gen6 and later */ +static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, + unsigned first_entry, + unsigned num_entries) +{ + gen6_gtt_pte_t *pt_vaddr, scratch_pte; + unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; + unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; + unsigned last_pte, i; + + scratch_pte = gen6_pte_encode(ppgtt->dev, + ppgtt->scratch_page_dma_addr, + I915_CACHE_LLC); + + while (num_entries) { + last_pte = first_pte + num_entries; + if (last_pte > I915_PPGTT_PT_ENTRIES) + last_pte = I915_PPGTT_PT_ENTRIES; + + pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); + + for (i = first_pte; i < last_pte; i++) + pt_vaddr[i] = scratch_pte; + + kunmap_atomic(pt_vaddr); + + num_entries -= last_pte - first_pte; + first_pte = 0; + act_pt++; + } +} + +static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, + struct sg_table *pages, + unsigned first_entry, + enum i915_cache_level cache_level) +{ + gen6_gtt_pte_t *pt_vaddr; + unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; + unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; + struct sg_page_iter sg_iter; + + pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); + for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { + dma_addr_t page_addr; + + page_addr = sg_page_iter_dma_address(&sg_iter); + pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr, + cache_level); + if (++act_pte == I915_PPGTT_PT_ENTRIES) { + kunmap_atomic(pt_vaddr); + act_pt++; + pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); + act_pte = 0; + + } + } + kunmap_atomic(pt_vaddr); +} + +static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) +{ + int i; + + if (ppgtt->pt_dma_addr) { + for (i = 0; i < ppgtt->num_pd_entries; i++) + pci_unmap_page(ppgtt->dev->pdev, + ppgtt->pt_dma_addr[i], + 4096, PCI_DMA_BIDIRECTIONAL); + } + + kfree(ppgtt->pt_dma_addr); + for (i = 0; i < ppgtt->num_pd_entries; i++) + __free_page(ppgtt->pt_pages[i]); + kfree(ppgtt->pt_pages); + kfree(ppgtt); +} + +static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) +{ + struct drm_device *dev = ppgtt->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned first_pd_entry_in_global_pt; + int i; + int ret = -ENOMEM; + + /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 + * entries. For aliasing ppgtt support we just steal them at the end for + * now. */ + first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt); + + ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; + ppgtt->enable = gen6_ppgtt_enable; + ppgtt->clear_range = gen6_ppgtt_clear_range; + ppgtt->insert_entries = gen6_ppgtt_insert_entries; + ppgtt->cleanup = gen6_ppgtt_cleanup; + ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, + GFP_KERNEL); + if (!ppgtt->pt_pages) + return -ENOMEM; + + for (i = 0; i < ppgtt->num_pd_entries; i++) { + ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); + if (!ppgtt->pt_pages[i]) + goto err_pt_alloc; + } + + ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries, + GFP_KERNEL); + if (!ppgtt->pt_dma_addr) + goto err_pt_alloc; + + for (i = 0; i < ppgtt->num_pd_entries; i++) { + dma_addr_t pt_addr; + + pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096, + PCI_DMA_BIDIRECTIONAL); + + if (pci_dma_mapping_error(dev->pdev, pt_addr)) { + ret = -EIO; + goto err_pd_pin; + + } + ppgtt->pt_dma_addr[i] = pt_addr; + } + + ppgtt->clear_range(ppgtt, 0, + ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); + + ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); + + return 0; + +err_pd_pin: + if (ppgtt->pt_dma_addr) { + for (i--; i >= 0; i--) + pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i], + 4096, PCI_DMA_BIDIRECTIONAL); + } +err_pt_alloc: + kfree(ppgtt->pt_dma_addr); + for (i = 0; i < ppgtt->num_pd_entries; i++) { + if (ppgtt->pt_pages[i]) + __free_page(ppgtt->pt_pages[i]); + } + kfree(ppgtt->pt_pages); + + return ret; +} + +static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_hw_ppgtt *ppgtt; + int ret; + + ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); + if (!ppgtt) + return -ENOMEM; + + ppgtt->dev = dev; + ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma; + + if (INTEL_INFO(dev)->gen < 8) + ret = gen6_ppgtt_init(ppgtt); + else + BUG(); + + if (ret) + kfree(ppgtt); + else + dev_priv->mm.aliasing_ppgtt = ppgtt; + + return ret; +} + +void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; + + if (!ppgtt) + return; + + ppgtt->cleanup(ppgtt); + dev_priv->mm.aliasing_ppgtt = NULL; +} + +void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, + struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level) +{ + ppgtt->insert_entries(ppgtt, obj->pages, + obj->gtt_space->start >> PAGE_SHIFT, + cache_level); +} + +void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, + struct drm_i915_gem_object *obj) +{ + ppgtt->clear_range(ppgtt, + obj->gtt_space->start >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT); +} + +extern int intel_iommu_gfx_mapped; +/* Certain Gen5 chipsets require require idling the GPU before + * unmapping anything from the GTT when VT-d is enabled. + */ +static inline bool needs_idle_maps(struct drm_device *dev) +{ +#ifdef CONFIG_INTEL_IOMMU + /* Query intel_iommu to see if we need the workaround. Presumably that + * was loaded first. + */ + if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped) + return true; +#endif + return false; +} + +static bool do_idling(struct drm_i915_private *dev_priv) +{ + bool ret = dev_priv->mm.interruptible; + + if (unlikely(dev_priv->gtt.do_idle_maps)) { + dev_priv->mm.interruptible = false; + if (i915_gpu_idle(dev_priv->dev)) { + DRM_ERROR("Couldn't idle GPU\n"); + /* Wait a bit, in hopes it avoids the hang */ + udelay(10); + } + } + + return ret; +} + +static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) +{ + if (unlikely(dev_priv->gtt.do_idle_maps)) + dev_priv->mm.interruptible = interruptible; +} + +void i915_gem_restore_gtt_mappings(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + + /* First fill our portion of the GTT with scratch pages */ + dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE, + dev_priv->gtt.total / PAGE_SIZE); + + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { + i915_gem_clflush_object(obj); + i915_gem_gtt_bind_object(obj, obj->cache_level); + } + + i915_gem_chipset_flush(dev); +} + +int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) +{ + if (obj->has_dma_mapping) + return 0; + + if (!dma_map_sg(&obj->base.dev->pdev->dev, + obj->pages->sgl, obj->pages->nents, + PCI_DMA_BIDIRECTIONAL)) + return -ENOSPC; + + return 0; +} + +/* + * Binds an object into the global gtt with the specified cache level. The object + * will be accessible to the GPU via commands whose operands reference offsets + * within the global GTT as well as accessible by the GPU through the GMADR + * mapped BAR (dev_priv->mm.gtt->gtt). + */ +static void gen6_ggtt_insert_entries(struct drm_device *dev, + struct sg_table *st, + unsigned int first_entry, + enum i915_cache_level level) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + gen6_gtt_pte_t __iomem *gtt_entries = + (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; + int i = 0; + struct sg_page_iter sg_iter; + dma_addr_t addr; + + for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { + addr = sg_page_iter_dma_address(&sg_iter); + iowrite32(gen6_pte_encode(dev, addr, level), >t_entries[i]); + i++; + } + + /* XXX: This serves as a posting read to make sure that the PTE has + * actually been updated. There is some concern that even though + * registers and PTEs are within the same BAR that they are potentially + * of NUMA access patterns. Therefore, even with the way we assume + * hardware should work, we must keep this posting read for paranoia. + */ + if (i != 0) + WARN_ON(readl(>t_entries[i-1]) + != gen6_pte_encode(dev, addr, level)); + + /* This next bit makes the above posting read even more important. We + * want to flush the TLBs only after we're certain all the PTE updates + * have finished. + */ + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + POSTING_READ(GFX_FLSH_CNTL_GEN6); +} + +static void gen6_ggtt_clear_range(struct drm_device *dev, + unsigned int first_entry, + unsigned int num_entries) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = + (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; + const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; + int i; + + if (WARN(num_entries > max_entries, + "First entry = %d; Num entries = %d (max=%d)\n", + first_entry, num_entries, max_entries)) + num_entries = max_entries; + + scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma, + I915_CACHE_LLC); + for (i = 0; i < num_entries; i++) + iowrite32(scratch_pte, >t_base[i]); + readl(gtt_base); +} + + +static void i915_ggtt_insert_entries(struct drm_device *dev, + struct sg_table *st, + unsigned int pg_start, + enum i915_cache_level cache_level) +{ + unsigned int flags = (cache_level == I915_CACHE_NONE) ? + AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; + + intel_gtt_insert_sg_entries(st, pg_start, flags); + +} + +static void i915_ggtt_clear_range(struct drm_device *dev, + unsigned int first_entry, + unsigned int num_entries) +{ + intel_gtt_clear_range(first_entry, num_entries); +} + + +void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + dev_priv->gtt.gtt_insert_entries(dev, obj->pages, + obj->gtt_space->start >> PAGE_SHIFT, + cache_level); + + obj->has_global_gtt_mapping = 1; +} + +void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + dev_priv->gtt.gtt_clear_range(obj->base.dev, + obj->gtt_space->start >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT); + + obj->has_global_gtt_mapping = 0; +} + +void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + bool interruptible; + + interruptible = do_idling(dev_priv); + + if (!obj->has_dma_mapping) + dma_unmap_sg(&dev->pdev->dev, + obj->pages->sgl, obj->pages->nents, + PCI_DMA_BIDIRECTIONAL); + + undo_idling(dev_priv, interruptible); +} + +static void i915_gtt_color_adjust(struct drm_mm_node *node, + unsigned long color, + unsigned long *start, + unsigned long *end) +{ + if (node->color != color) + *start += 4096; + + if (!list_empty(&node->node_list)) { + node = list_entry(node->node_list.next, + struct drm_mm_node, + node_list); + if (node->allocated && node->color != color) + *end -= 4096; + } +} +void i915_gem_setup_global_gtt(struct drm_device *dev, + unsigned long start, + unsigned long mappable_end, + unsigned long end) +{ + /* Let GEM Manage all of the aperture. + * + * However, leave one page at the end still bound to the scratch page. + * There are a number of places where the hardware apparently prefetches + * past the end of the object, and we've seen multiple hangs with the + * GPU head pointer stuck in a batchbuffer bound at the last page of the + * aperture. One page should be enough to keep any prefetching inside + * of the aperture. + */ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_mm_node *entry; + struct drm_i915_gem_object *obj; + unsigned long hole_start, hole_end; + + BUG_ON(mappable_end > end); + + /* Subtract the guard page ... */ + drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); + if (!HAS_LLC(dev)) + dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; + + /* Mark any preallocated objects as occupied */ + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { + DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n", + obj->gtt_offset, obj->base.size); + + BUG_ON(obj->gtt_space != I915_GTT_RESERVED); + obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, + obj->gtt_offset, + obj->base.size, + false); + obj->has_global_gtt_mapping = 1; + } + + dev_priv->gtt.start = start; + dev_priv->gtt.total = end - start; + + /* Clear any non-preallocated blocks */ + drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space, + hole_start, hole_end) { + DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", + hole_start, hole_end); + dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE, + (hole_end-hole_start) / PAGE_SIZE); + } + + /* And finally clear the reserved guard page */ + dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1); +} + +static bool +intel_enable_ppgtt(struct drm_device *dev) +{ + if (i915_enable_ppgtt >= 0) + return i915_enable_ppgtt; + +#ifdef CONFIG_INTEL_IOMMU + /* Disable ppgtt on SNB if VT-d is on. */ + if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) + return false; +#endif + + return true; +} + +void i915_gem_init_global_gtt(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long gtt_size, mappable_size; + + gtt_size = dev_priv->gtt.total; + mappable_size = dev_priv->gtt.mappable_end; + + if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { + int ret; + + if (INTEL_INFO(dev)->gen <= 7) { + /* PPGTT pdes are stolen from global gtt ptes, so shrink the + * aperture accordingly when using aliasing ppgtt. */ + gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; + } + + i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); + + ret = i915_gem_init_aliasing_ppgtt(dev); + if (!ret) + return; + + DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); + drm_mm_takedown(&dev_priv->mm.gtt_space); + gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE; + } + i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); +} + +static int setup_scratch_page(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct page *page; + dma_addr_t dma_addr; + + page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); + if (page == NULL) + return -ENOMEM; + get_page(page); + set_pages_uc(page, 1); + +#ifdef CONFIG_INTEL_IOMMU + dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(dev->pdev, dma_addr)) + return -EINVAL; +#else + dma_addr = page_to_phys(page); +#endif + dev_priv->gtt.scratch_page = page; + dev_priv->gtt.scratch_page_dma = dma_addr; + + return 0; +} + +static void teardown_scratch_page(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + set_pages_wb(dev_priv->gtt.scratch_page, 1); + pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + put_page(dev_priv->gtt.scratch_page); + __free_page(dev_priv->gtt.scratch_page); +} + +static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) +{ + snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; + snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; + return snb_gmch_ctl << 20; +} + +static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) +{ + snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; + snb_gmch_ctl &= SNB_GMCH_GMS_MASK; + return snb_gmch_ctl << 25; /* 32 MB units */ +} + +static int gen6_gmch_probe(struct drm_device *dev, + size_t *gtt_total, + size_t *stolen, + phys_addr_t *mappable_base, + unsigned long *mappable_end) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + phys_addr_t gtt_bus_addr; + unsigned int gtt_size; + u16 snb_gmch_ctl; + int ret; + + *mappable_base = pci_resource_start(dev->pdev, 2); + *mappable_end = pci_resource_len(dev->pdev, 2); + + /* 64/512MB is the current min/max we actually know of, but this is just + * a coarse sanity check. + */ + if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) { + DRM_ERROR("Unknown GMADR size (%lx)\n", + dev_priv->gtt.mappable_end); + return -ENXIO; + } + + if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) + pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); + pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); + gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); + + *stolen = gen6_get_stolen_size(snb_gmch_ctl); + *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; + + /* For Modern GENs the PTEs and register space are split in the BAR */ + gtt_bus_addr = pci_resource_start(dev->pdev, 0) + + (pci_resource_len(dev->pdev, 0) / 2); + + dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size); + if (!dev_priv->gtt.gsm) { + DRM_ERROR("Failed to map the gtt page table\n"); + return -ENOMEM; + } + + ret = setup_scratch_page(dev); + if (ret) + DRM_ERROR("Scratch setup failed\n"); + + dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range; + dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries; + + return ret; +} + +static void gen6_gmch_remove(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + iounmap(dev_priv->gtt.gsm); + teardown_scratch_page(dev_priv->dev); +} + +static int i915_gmch_probe(struct drm_device *dev, + size_t *gtt_total, + size_t *stolen, + phys_addr_t *mappable_base, + unsigned long *mappable_end) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); + if (!ret) { + DRM_ERROR("failed to set up gmch\n"); + return -EIO; + } + + intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); + + dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); + dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range; + dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries; + + return 0; +} + +static void i915_gmch_remove(struct drm_device *dev) +{ + intel_gmch_remove(); +} + +int i915_gem_gtt_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_gtt *gtt = &dev_priv->gtt; + int ret; + + if (INTEL_INFO(dev)->gen <= 5) { + dev_priv->gtt.gtt_probe = i915_gmch_probe; + dev_priv->gtt.gtt_remove = i915_gmch_remove; + } else { + dev_priv->gtt.gtt_probe = gen6_gmch_probe; + dev_priv->gtt.gtt_remove = gen6_gmch_remove; + } + + ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total, + &dev_priv->gtt.stolen_size, + >t->mappable_base, + >t->mappable_end); + if (ret) + return ret; + + /* GMADR is the PCI mmio aperture into the global GTT. */ + DRM_INFO("Memory usable by graphics device = %zdM\n", + dev_priv->gtt.total >> 20); + DRM_DEBUG_DRIVER("GMADR size = %ldM\n", + dev_priv->gtt.mappable_end >> 20); + DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", + dev_priv->gtt.stolen_size >> 20); + + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c new file mode 100644 index 0000000..130d1db --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -0,0 +1,387 @@ +/* + * Copyright © 2008-2012 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * Chris Wilson + * + */ + +#include +#include +#include "i915_drv.h" + +/* + * The BIOS typically reserves some of the system's memory for the exclusive + * use of the integrated graphics. This memory is no longer available for + * use by the OS and so the user finds that his system has less memory + * available than he put in. We refer to this memory as stolen. + * + * The BIOS will allocate its framebuffer from the stolen memory. Our + * goal is try to reuse that object for our own fbcon which must always + * be available for panics. Anything else we can reuse the stolen memory + * for is a boon. + */ + +static unsigned long i915_stolen_to_physical(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = dev_priv->bridge_dev; + u32 base; + + /* On the machines I have tested the Graphics Base of Stolen Memory + * is unreliable, so on those compute the base by subtracting the + * stolen memory from the Top of Low Usable DRAM which is where the + * BIOS places the graphics stolen memory. + * + * On gen2, the layout is slightly different with the Graphics Segment + * immediately following Top of Memory (or Top of Usable DRAM). Note + * it appears that TOUD is only reported by 865g, so we just use the + * top of memory as determined by the e820 probe. + * + * XXX gen2 requires an unavailable symbol and 945gm fails with + * its value of TOLUD. + */ + base = 0; + if (INTEL_INFO(dev)->gen >= 6) { + /* Read Base Data of Stolen Memory Register (BDSM) directly. + * Note that there is also a MCHBAR miror at 0x1080c0 or + * we could use device 2:0x5c instead. + */ + pci_read_config_dword(pdev, 0xB0, &base); + base &= ~4095; /* lower bits used for locking register */ + } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { + /* Read Graphics Base of Stolen Memory directly */ + pci_read_config_dword(pdev, 0xA4, &base); +#if 0 + } else if (IS_GEN3(dev)) { + u8 val; + /* Stolen is immediately below Top of Low Usable DRAM */ + pci_read_config_byte(pdev, 0x9c, &val); + base = val >> 3 << 27; + base -= dev_priv->mm.gtt->stolen_size; + } else { + /* Stolen is immediately above Top of Memory */ + base = max_low_pfn_mapped << PAGE_SHIFT; +#endif + } + + return base; +} + +static int i915_setup_compression(struct drm_device *dev, int size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); + + /* Try to over-allocate to reduce reallocations and fragmentation */ + compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, + size <<= 1, 4096, 0); + if (!compressed_fb) + compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, + size >>= 1, 4096, 0); + if (compressed_fb) + compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); + if (!compressed_fb) + goto err; + + if (HAS_PCH_SPLIT(dev)) + I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); + else if (IS_GM45(dev)) { + I915_WRITE(DPFC_CB_BASE, compressed_fb->start); + } else { + compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, + 4096, 4096, 0); + if (compressed_llb) + compressed_llb = drm_mm_get_block(compressed_llb, + 4096, 4096); + if (!compressed_llb) + goto err_fb; + + dev_priv->compressed_llb = compressed_llb; + + I915_WRITE(FBC_CFB_BASE, + dev_priv->mm.stolen_base + compressed_fb->start); + I915_WRITE(FBC_LL_BASE, + dev_priv->mm.stolen_base + compressed_llb->start); + } + + dev_priv->compressed_fb = compressed_fb; + dev_priv->cfb_size = size; + + DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", + size); + + return 0; + +err_fb: + drm_mm_put_block(compressed_fb); +err: + return -ENOSPC; +} + +int i915_gem_stolen_setup_compression(struct drm_device *dev, int size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->mm.stolen_base == 0) + return -ENODEV; + + if (size < dev_priv->cfb_size) + return 0; + + /* Release any current block */ + i915_gem_stolen_cleanup_compression(dev); + + return i915_setup_compression(dev, size); +} + +void i915_gem_stolen_cleanup_compression(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->cfb_size == 0) + return; + + if (dev_priv->compressed_fb) + drm_mm_put_block(dev_priv->compressed_fb); + + if (dev_priv->compressed_llb) + drm_mm_put_block(dev_priv->compressed_llb); + + dev_priv->cfb_size = 0; +} + +void i915_gem_cleanup_stolen(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + i915_gem_stolen_cleanup_compression(dev); + drm_mm_takedown(&dev_priv->mm.stolen); +} + +int i915_gem_init_stolen(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); + if (dev_priv->mm.stolen_base == 0) + return 0; + + DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n", + dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base); + + /* Basic memrange allocator for stolen space */ + drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size); + + return 0; +} + +static struct sg_table * +i915_pages_create_for_stolen(struct drm_device *dev, + u32 offset, u32 size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct sg_table *st; + struct scatterlist *sg; + + DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); + BUG_ON(offset > dev_priv->gtt.stolen_size - size); + + /* We hide that we have no struct page backing our stolen object + * by wrapping the contiguous physical allocation with a fake + * dma mapping in a single scatterlist. + */ + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (st == NULL) + return NULL; + + if (sg_alloc_table(st, 1, GFP_KERNEL)) { + kfree(st); + return NULL; + } + + sg = st->sgl; + sg->offset = offset; + sg->length = size; + + sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; + sg_dma_len(sg) = size; + + return st; +} + +static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) +{ + BUG(); + return -EINVAL; +} + +static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) +{ + /* Should only be called during free */ + sg_free_table(obj->pages); + kfree(obj->pages); +} + +static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { + .get_pages = i915_gem_object_get_pages_stolen, + .put_pages = i915_gem_object_put_pages_stolen, +}; + +static struct drm_i915_gem_object * +_i915_gem_object_create_stolen(struct drm_device *dev, + struct drm_mm_node *stolen) +{ + struct drm_i915_gem_object *obj; + + obj = i915_gem_object_alloc(dev); + if (obj == NULL) + return NULL; + + if (drm_gem_private_object_init(dev, &obj->base, stolen->size)) + goto cleanup; + + i915_gem_object_init(obj, &i915_gem_object_stolen_ops); + + obj->pages = i915_pages_create_for_stolen(dev, + stolen->start, stolen->size); + if (obj->pages == NULL) + goto cleanup; + + obj->has_dma_mapping = true; + obj->pages_pin_count = 1; + obj->stolen = stolen; + + obj->base.write_domain = I915_GEM_DOMAIN_GTT; + obj->base.read_domains = I915_GEM_DOMAIN_GTT; + obj->cache_level = I915_CACHE_NONE; + + return obj; + +cleanup: + i915_gem_object_free(obj); + return NULL; +} + +struct drm_i915_gem_object * +i915_gem_object_create_stolen(struct drm_device *dev, u32 size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + struct drm_mm_node *stolen; + + if (dev_priv->mm.stolen_base == 0) + return NULL; + + DRM_DEBUG_KMS("creating stolen object: size=%x\n", size); + if (size == 0) + return NULL; + + stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); + if (stolen) + stolen = drm_mm_get_block(stolen, size, 4096); + if (stolen == NULL) + return NULL; + + obj = _i915_gem_object_create_stolen(dev, stolen); + if (obj) + return obj; + + drm_mm_put_block(stolen); + return NULL; +} + +struct drm_i915_gem_object * +i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, + u32 stolen_offset, + u32 gtt_offset, + u32 size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + struct drm_mm_node *stolen; + + if (dev_priv->mm.stolen_base == 0) + return NULL; + + DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", + stolen_offset, gtt_offset, size); + + /* KISS and expect everything to be page-aligned */ + BUG_ON(stolen_offset & 4095); + BUG_ON(gtt_offset & 4095); + BUG_ON(size & 4095); + + if (WARN_ON(size == 0)) + return NULL; + + stolen = drm_mm_create_block(&dev_priv->mm.stolen, + stolen_offset, size, + false); + if (stolen == NULL) { + DRM_DEBUG_KMS("failed to allocate stolen space\n"); + return NULL; + } + + obj = _i915_gem_object_create_stolen(dev, stolen); + if (obj == NULL) { + DRM_DEBUG_KMS("failed to allocate stolen object\n"); + drm_mm_put_block(stolen); + return NULL; + } + + /* To simplify the initialisation sequence between KMS and GTT, + * we allow construction of the stolen object prior to + * setting up the GTT space. The actual reservation will occur + * later. + */ + if (drm_mm_initialized(&dev_priv->mm.gtt_space)) { + obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, + gtt_offset, size, + false); + if (obj->gtt_space == NULL) { + DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); + drm_gem_object_unreference(&obj->base); + return NULL; + } + } else + obj->gtt_space = I915_GTT_RESERVED; + + obj->gtt_offset = gtt_offset; + obj->has_global_gtt_mapping = 1; + + list_add_tail(&obj->gtt_list, &dev_priv->mm.bound_list); + list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + + return obj; +} + +void +i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) +{ + if (obj->stolen) { + drm_mm_put_block(obj->stolen); + obj->stolen = NULL; + } +} diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c new file mode 100644 index 0000000..537545b --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -0,0 +1,523 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +#include +#include +#include +#include +#include "i915_drv.h" + +/** @file i915_gem_tiling.c + * + * Support for managing tiling state of buffer objects. + * + * The idea behind tiling is to increase cache hit rates by rearranging + * pixel data so that a group of pixel accesses are in the same cacheline. + * Performance improvement from doing this on the back/depth buffer are on + * the order of 30%. + * + * Intel architectures make this somewhat more complicated, though, by + * adjustments made to addressing of data when the memory is in interleaved + * mode (matched pairs of DIMMS) to improve memory bandwidth. + * For interleaved memory, the CPU sends every sequential 64 bytes + * to an alternate memory channel so it can get the bandwidth from both. + * + * The GPU also rearranges its accesses for increased bandwidth to interleaved + * memory, and it matches what the CPU does for non-tiled. However, when tiled + * it does it a little differently, since one walks addresses not just in the + * X direction but also Y. So, along with alternating channels when bit + * 6 of the address flips, it also alternates when other bits flip -- Bits 9 + * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines) + * are common to both the 915 and 965-class hardware. + * + * The CPU also sometimes XORs in higher bits as well, to improve + * bandwidth doing strided access like we do so frequently in graphics. This + * is called "Channel XOR Randomization" in the MCH documentation. The result + * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address + * decode. + * + * All of this bit 6 XORing has an effect on our memory management, + * as we need to make sure that the 3d driver can correctly address object + * contents. + * + * If we don't have interleaved memory, all tiling is safe and no swizzling is + * required. + * + * When bit 17 is XORed in, we simply refuse to tile at all. Bit + * 17 is not just a page offset, so as we page an objet out and back in, + * individual pages in it will have different bit 17 addresses, resulting in + * each 64 bytes being swapped with its neighbor! + * + * Otherwise, if interleaved, we have to tell the 3d driver what the address + * swizzling it needs to do is, since it's writing with the CPU to the pages + * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the + * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling + * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order + * to match what the GPU expects. + */ + +/** + * Detects bit 6 swizzling of address lookup between IGD access and CPU + * access through main memory. + */ +void +i915_gem_detect_bit_6_swizzle(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; + uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; + + if (IS_VALLEYVIEW(dev)) { + swizzle_x = I915_BIT_6_SWIZZLE_NONE; + swizzle_y = I915_BIT_6_SWIZZLE_NONE; + } else if (INTEL_INFO(dev)->gen >= 6) { + uint32_t dimm_c0, dimm_c1; + dimm_c0 = I915_READ(MAD_DIMM_C0); + dimm_c1 = I915_READ(MAD_DIMM_C1); + dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; + dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; + /* Enable swizzling when the channels are populated with + * identically sized dimms. We don't need to check the 3rd + * channel because no cpu with gpu attached ships in that + * configuration. Also, swizzling only makes sense for 2 + * channels anyway. */ + if (dimm_c0 == dimm_c1) { + swizzle_x = I915_BIT_6_SWIZZLE_9_10; + swizzle_y = I915_BIT_6_SWIZZLE_9; + } else { + swizzle_x = I915_BIT_6_SWIZZLE_NONE; + swizzle_y = I915_BIT_6_SWIZZLE_NONE; + } + } else if (IS_GEN5(dev)) { + /* On Ironlake whatever DRAM config, GPU always do + * same swizzling setup. + */ + swizzle_x = I915_BIT_6_SWIZZLE_9_10; + swizzle_y = I915_BIT_6_SWIZZLE_9; + } else if (IS_GEN2(dev)) { + /* As far as we know, the 865 doesn't have these bit 6 + * swizzling issues. + */ + swizzle_x = I915_BIT_6_SWIZZLE_NONE; + swizzle_y = I915_BIT_6_SWIZZLE_NONE; + } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) { + uint32_t dcc; + + /* On 9xx chipsets, channel interleave by the CPU is + * determined by DCC. For single-channel, neither the CPU + * nor the GPU do swizzling. For dual channel interleaved, + * the GPU's interleave is bit 9 and 10 for X tiled, and bit + * 9 for Y tiled. The CPU's interleave is independent, and + * can be based on either bit 11 (haven't seen this yet) or + * bit 17 (common). + */ + dcc = I915_READ(DCC); + switch (dcc & DCC_ADDRESSING_MODE_MASK) { + case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: + case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: + swizzle_x = I915_BIT_6_SWIZZLE_NONE; + swizzle_y = I915_BIT_6_SWIZZLE_NONE; + break; + case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: + if (dcc & DCC_CHANNEL_XOR_DISABLE) { + /* This is the base swizzling by the GPU for + * tiled buffers. + */ + swizzle_x = I915_BIT_6_SWIZZLE_9_10; + swizzle_y = I915_BIT_6_SWIZZLE_9; + } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { + /* Bit 11 swizzling by the CPU in addition. */ + swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; + swizzle_y = I915_BIT_6_SWIZZLE_9_11; + } else { + /* Bit 17 swizzling by the CPU in addition. */ + swizzle_x = I915_BIT_6_SWIZZLE_9_10_17; + swizzle_y = I915_BIT_6_SWIZZLE_9_17; + } + break; + } + if (dcc == 0xffffffff) { + DRM_ERROR("Couldn't read from MCHBAR. " + "Disabling tiling.\n"); + swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; + swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; + } + } else { + /* The 965, G33, and newer, have a very flexible memory + * configuration. It will enable dual-channel mode + * (interleaving) on as much memory as it can, and the GPU + * will additionally sometimes enable different bit 6 + * swizzling for tiled objects from the CPU. + * + * Here's what I found on the G965: + * slot fill memory size swizzling + * 0A 0B 1A 1B 1-ch 2-ch + * 512 0 0 0 512 0 O + * 512 0 512 0 16 1008 X + * 512 0 0 512 16 1008 X + * 0 512 0 512 16 1008 X + * 1024 1024 1024 0 2048 1024 O + * + * We could probably detect this based on either the DRB + * matching, which was the case for the swizzling required in + * the table above, or from the 1-ch value being less than + * the minimum size of a rank. + */ + if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { + swizzle_x = I915_BIT_6_SWIZZLE_NONE; + swizzle_y = I915_BIT_6_SWIZZLE_NONE; + } else { + swizzle_x = I915_BIT_6_SWIZZLE_9_10; + swizzle_y = I915_BIT_6_SWIZZLE_9; + } + } + + dev_priv->mm.bit_6_swizzle_x = swizzle_x; + dev_priv->mm.bit_6_swizzle_y = swizzle_y; +} + +/* Check pitch constriants for all chips & tiling formats */ +static bool +i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) +{ + int tile_width; + + /* Linear is always fine */ + if (tiling_mode == I915_TILING_NONE) + return true; + + if (IS_GEN2(dev) || + (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) + tile_width = 128; + else + tile_width = 512; + + /* check maximum stride & object size */ + /* i965+ stores the end address of the gtt mapping in the fence + * reg, so dont bother to check the size */ + if (INTEL_INFO(dev)->gen >= 7) { + if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL) + return false; + } else if (INTEL_INFO(dev)->gen >= 4) { + if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) + return false; + } else { + if (stride > 8192) + return false; + + if (IS_GEN3(dev)) { + if (size > I830_FENCE_MAX_SIZE_VAL << 20) + return false; + } else { + if (size > I830_FENCE_MAX_SIZE_VAL << 19) + return false; + } + } + + if (stride < tile_width) + return false; + + /* 965+ just needs multiples of tile width */ + if (INTEL_INFO(dev)->gen >= 4) { + if (stride & (tile_width - 1)) + return false; + return true; + } + + /* Pre-965 needs power of two tile widths */ + if (stride & (stride - 1)) + return false; + + return true; +} + +/* Is the current GTT allocation valid for the change in tiling? */ +static bool +i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) +{ + u32 size; + + if (tiling_mode == I915_TILING_NONE) + return true; + + if (INTEL_INFO(obj->base.dev)->gen >= 4) + return true; + + if (INTEL_INFO(obj->base.dev)->gen == 3) { + if (obj->gtt_offset & ~I915_FENCE_START_MASK) + return false; + } else { + if (obj->gtt_offset & ~I830_FENCE_START_MASK) + return false; + } + + size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); + if (obj->gtt_space->size != size) + return false; + + if (obj->gtt_offset & (size - 1)) + return false; + + return true; +} + +/** + * Sets the tiling mode of an object, returning the required swizzling of + * bit 6 of addresses in the object. + */ +int +i915_gem_set_tiling(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_set_tiling *args = data; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + int ret = 0; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); + if (&obj->base == NULL) + return -ENOENT; + + if (!i915_tiling_ok(dev, + args->stride, obj->base.size, args->tiling_mode)) { + drm_gem_object_unreference_unlocked(&obj->base); + return -EINVAL; + } + + if (obj->pin_count) { + drm_gem_object_unreference_unlocked(&obj->base); + return -EBUSY; + } + + if (args->tiling_mode == I915_TILING_NONE) { + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; + args->stride = 0; + } else { + if (args->tiling_mode == I915_TILING_X) + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; + else + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; + + /* Hide bit 17 swizzling from the user. This prevents old Mesa + * from aborting the application on sw fallbacks to bit 17, + * and we use the pread/pwrite bit17 paths to swizzle for it. + * If there was a user that was relying on the swizzle + * information for drm_intel_bo_map()ed reads/writes this would + * break it, but we don't have any of those. + */ + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) + args->swizzle_mode = I915_BIT_6_SWIZZLE_9; + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) + args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; + + /* If we can't handle the swizzling, make it untiled. */ + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { + args->tiling_mode = I915_TILING_NONE; + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; + args->stride = 0; + } + } + + mutex_lock(&dev->struct_mutex); + if (args->tiling_mode != obj->tiling_mode || + args->stride != obj->stride) { + /* We need to rebind the object if its current allocation + * no longer meets the alignment restrictions for its new + * tiling mode. Otherwise we can just leave it alone, but + * need to ensure that any fence register is updated before + * the next fenced (either through the GTT or by the BLT unit + * on older GPUs) access. + * + * After updating the tiling parameters, we then flag whether + * we need to update an associated fence register. Note this + * has to also include the unfenced register the GPU uses + * whilst executing a fenced command for an untiled object. + */ + + obj->map_and_fenceable = + obj->gtt_space == NULL || + (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end && + i915_gem_object_fence_ok(obj, args->tiling_mode)); + + /* Rebind if we need a change of alignment */ + if (!obj->map_and_fenceable) { + u32 unfenced_alignment = + i915_gem_get_gtt_alignment(dev, obj->base.size, + args->tiling_mode, + false); + if (obj->gtt_offset & (unfenced_alignment - 1)) + ret = i915_gem_object_unbind(obj); + } + + if (ret == 0) { + obj->fence_dirty = + obj->fenced_gpu_access || + obj->fence_reg != I915_FENCE_REG_NONE; + + obj->tiling_mode = args->tiling_mode; + obj->stride = args->stride; + + /* Force the fence to be reacquired for GTT access */ + i915_gem_release_mmap(obj); + } + } + /* we have to maintain this existing ABI... */ + args->stride = obj->stride; + args->tiling_mode = obj->tiling_mode; + + /* Try to preallocate memory required to save swizzling on put-pages */ + if (i915_gem_object_needs_bit17_swizzle(obj)) { + if (obj->bit_17 == NULL) { + obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) * + sizeof(long), GFP_KERNEL); + } + } else { + kfree(obj->bit_17); + obj->bit_17 = NULL; + } + + drm_gem_object_unreference(&obj->base); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +/** + * Returns the current tiling mode and required bit 6 swizzling for the object. + */ +int +i915_gem_get_tiling(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_get_tiling *args = data; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); + if (&obj->base == NULL) + return -ENOENT; + + mutex_lock(&dev->struct_mutex); + + args->tiling_mode = obj->tiling_mode; + switch (obj->tiling_mode) { + case I915_TILING_X: + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; + break; + case I915_TILING_Y: + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; + break; + case I915_TILING_NONE: + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; + break; + default: + DRM_ERROR("unknown tiling mode\n"); + } + + /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) + args->swizzle_mode = I915_BIT_6_SWIZZLE_9; + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) + args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; + + drm_gem_object_unreference(&obj->base); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +/** + * Swap every 64 bytes of this page around, to account for it having a new + * bit 17 of its physical address and therefore being interpreted differently + * by the GPU. + */ +static void +i915_gem_swizzle_page(struct page *page) +{ + char temp[64]; + char *vaddr; + int i; + + vaddr = kmap(page); + + for (i = 0; i < PAGE_SIZE; i += 128) { + memcpy(temp, &vaddr[i], 64); + memcpy(&vaddr[i], &vaddr[i + 64], 64); + memcpy(&vaddr[i + 64], temp, 64); + } + + kunmap(page); +} + +void +i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) +{ + struct sg_page_iter sg_iter; + int i; + + if (obj->bit_17 == NULL) + return; + + i = 0; + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { + struct page *page = sg_page_iter_page(&sg_iter); + char new_bit_17 = page_to_phys(page) >> 17; + if ((new_bit_17 & 0x1) != + (test_bit(i, obj->bit_17) != 0)) { + i915_gem_swizzle_page(page); + set_page_dirty(page); + } + i++; + } +} + +void +i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) +{ + struct sg_page_iter sg_iter; + int page_count = obj->base.size >> PAGE_SHIFT; + int i; + + if (obj->bit_17 == NULL) { + obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * + sizeof(long), GFP_KERNEL); + if (obj->bit_17 == NULL) { + DRM_ERROR("Failed to allocate memory for bit 17 " + "record\n"); + return; + } + } + + i = 0; + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { + if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17)) + __set_bit(i, obj->bit_17); + else + __clear_bit(i, obj->bit_17); + i++; + } +} diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c new file mode 100644 index 0000000..3c59584 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_ioc32.c @@ -0,0 +1,221 @@ +/** + * \file i915_ioc32.c + * + * 32-bit ioctl compatibility routines for the i915 DRM. + * + * \author Alan Hourihane + * + * + * Copyright (C) Paul Mackerras 2005 + * Copyright (C) Alan Hourihane 2005 + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ +#include + +#include +#include +#include "i915_drv.h" + +typedef struct _drm_i915_batchbuffer32 { + int start; /* agp offset */ + int used; /* nr bytes in use */ + int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ + int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ + int num_cliprects; /* mulitpass with multiple cliprects? */ + u32 cliprects; /* pointer to userspace cliprects */ +} drm_i915_batchbuffer32_t; + +static int compat_i915_batchbuffer(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_i915_batchbuffer32_t batchbuffer32; + drm_i915_batchbuffer_t __user *batchbuffer; + + if (copy_from_user + (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32))) + return -EFAULT; + + batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer)); + if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer)) + || __put_user(batchbuffer32.start, &batchbuffer->start) + || __put_user(batchbuffer32.used, &batchbuffer->used) + || __put_user(batchbuffer32.DR1, &batchbuffer->DR1) + || __put_user(batchbuffer32.DR4, &batchbuffer->DR4) + || __put_user(batchbuffer32.num_cliprects, + &batchbuffer->num_cliprects) + || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects, + &batchbuffer->cliprects)) + return -EFAULT; + + return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER, + (unsigned long)batchbuffer); +} + +typedef struct _drm_i915_cmdbuffer32 { + u32 buf; /* pointer to userspace command buffer */ + int sz; /* nr bytes in buf */ + int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ + int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ + int num_cliprects; /* mulitpass with multiple cliprects? */ + u32 cliprects; /* pointer to userspace cliprects */ +} drm_i915_cmdbuffer32_t; + +static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_i915_cmdbuffer32_t cmdbuffer32; + drm_i915_cmdbuffer_t __user *cmdbuffer; + + if (copy_from_user + (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32))) + return -EFAULT; + + cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer)); + if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer)) + || __put_user((int __user *)(unsigned long)cmdbuffer32.buf, + &cmdbuffer->buf) + || __put_user(cmdbuffer32.sz, &cmdbuffer->sz) + || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1) + || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4) + || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects) + || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects, + &cmdbuffer->cliprects)) + return -EFAULT; + + return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER, + (unsigned long)cmdbuffer); +} + +typedef struct drm_i915_irq_emit32 { + u32 irq_seq; +} drm_i915_irq_emit32_t; + +static int compat_i915_irq_emit(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_i915_irq_emit32_t req32; + drm_i915_irq_emit_t __user *request; + + if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user((int __user *)(unsigned long)req32.irq_seq, + &request->irq_seq)) + return -EFAULT; + + return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT, + (unsigned long)request); +} +typedef struct drm_i915_getparam32 { + int param; + u32 value; +} drm_i915_getparam32_t; + +static int compat_i915_getparam(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_i915_getparam32_t req32; + drm_i915_getparam_t __user *request; + + if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user(req32.param, &request->param) + || __put_user((void __user *)(unsigned long)req32.value, + &request->value)) + return -EFAULT; + + return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM, + (unsigned long)request); +} + +typedef struct drm_i915_mem_alloc32 { + int region; + int alignment; + int size; + u32 region_offset; /* offset from start of fb or agp */ +} drm_i915_mem_alloc32_t; + +static int compat_i915_alloc(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_i915_mem_alloc32_t req32; + drm_i915_mem_alloc_t __user *request; + + if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user(req32.region, &request->region) + || __put_user(req32.alignment, &request->alignment) + || __put_user(req32.size, &request->size) + || __put_user((void __user *)(unsigned long)req32.region_offset, + &request->region_offset)) + return -EFAULT; + + return drm_ioctl(file, DRM_IOCTL_I915_ALLOC, + (unsigned long)request); +} + +static drm_ioctl_compat_t *i915_compat_ioctls[] = { + [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer, + [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer, + [DRM_I915_GETPARAM] = compat_i915_getparam, + [DRM_I915_IRQ_EMIT] = compat_i915_irq_emit, + [DRM_I915_ALLOC] = compat_i915_alloc +}; + +#ifdef CONFIG_COMPAT +/** + * Called whenever a 32-bit process running under a 64-bit kernel + * performs an ioctl on /dev/dri/card. + * + * \param filp file pointer. + * \param cmd command. + * \param arg user argument. + * \return zero on success or negative number on failure. + */ +long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + unsigned int nr = DRM_IOCTL_NR(cmd); + drm_ioctl_compat_t *fn = NULL; + int ret; + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(filp, cmd, arg); + + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) + fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE]; + + if (fn != NULL) + ret = (*fn) (filp, cmd, arg); + else + ret = drm_ioctl(filp, cmd, arg); + + return ret; +} +#endif diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c new file mode 100644 index 0000000..c8d16a6 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -0,0 +1,3212 @@ +/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- + */ +/* + * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include "i915_drv.h" +#include "i915_trace.h" +#include "intel_drv.h" + +static const u32 hpd_ibx[] = { + [HPD_CRT] = SDE_CRT_HOTPLUG, + [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, + [HPD_PORT_B] = SDE_PORTB_HOTPLUG, + [HPD_PORT_C] = SDE_PORTC_HOTPLUG, + [HPD_PORT_D] = SDE_PORTD_HOTPLUG +}; + +static const u32 hpd_cpt[] = { + [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, + [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, + [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, + [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, + [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT +}; + +static const u32 hpd_mask_i915[] = { + [HPD_CRT] = CRT_HOTPLUG_INT_EN, + [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, + [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, + [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, + [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, + [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN +}; + +static const u32 hpd_status_gen4[] = { + [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, + [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, + [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, + [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, + [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, + [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS +}; + +static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ + [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, + [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, + [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, + [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, + [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, + [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS +}; + +static void ibx_hpd_irq_setup(struct drm_device *dev); +static void i915_hpd_irq_setup(struct drm_device *dev); + +/* For display hotplug interrupt */ +static void +ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) +{ + if ((dev_priv->irq_mask & mask) != 0) { + dev_priv->irq_mask &= ~mask; + I915_WRITE(DEIMR, dev_priv->irq_mask); + POSTING_READ(DEIMR); + } +} + +static void +ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) +{ + if ((dev_priv->irq_mask & mask) != mask) { + dev_priv->irq_mask |= mask; + I915_WRITE(DEIMR, dev_priv->irq_mask); + POSTING_READ(DEIMR); + } +} + +void +i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) +{ + u32 reg = PIPESTAT(pipe); + u32 pipestat = I915_READ(reg) & 0x7fff0000; + + if ((pipestat & mask) == mask) + return; + + /* Enable the interrupt, clear any pending status */ + pipestat |= mask | (mask >> 16); + I915_WRITE(reg, pipestat); + POSTING_READ(reg); +} + +void +i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) +{ + u32 reg = PIPESTAT(pipe); + u32 pipestat = I915_READ(reg) & 0x7fff0000; + + if ((pipestat & mask) == 0) + return; + + pipestat &= ~mask; + I915_WRITE(reg, pipestat); + POSTING_READ(reg); +} + +/** + * intel_enable_asle - enable ASLE interrupt for OpRegion + */ +void intel_enable_asle(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long irqflags; + + /* FIXME: opregion/asle for VLV */ + if (IS_VALLEYVIEW(dev)) + return; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + + if (HAS_PCH_SPLIT(dev)) + ironlake_enable_display_irq(dev_priv, DE_GSE); + else { + i915_enable_pipestat(dev_priv, 1, + PIPE_LEGACY_BLC_EVENT_ENABLE); + if (INTEL_INFO(dev)->gen >= 4) + i915_enable_pipestat(dev_priv, 0, + PIPE_LEGACY_BLC_EVENT_ENABLE); + } + + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + +/** + * i915_pipe_enabled - check if a pipe is enabled + * @dev: DRM device + * @pipe: pipe to check + * + * Reading certain registers when the pipe is disabled can hang the chip. + * Use this routine to make sure the PLL is running and the pipe is active + * before reading such registers if unsure. + */ +static int +i915_pipe_enabled(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, + pipe); + + return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; +} + +/* Called from drm generic code, passed a 'crtc', which + * we use as a pipe index + */ +static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long high_frame; + unsigned long low_frame; + u32 high1, high2, low; + + if (!i915_pipe_enabled(dev, pipe)) { + DRM_DEBUG_DRIVER("trying to get vblank count for disabled " + "pipe %c\n", pipe_name(pipe)); + return 0; + } + + high_frame = PIPEFRAME(pipe); + low_frame = PIPEFRAMEPIXEL(pipe); + + /* + * High & low register fields aren't synchronized, so make sure + * we get a low value that's stable across two reads of the high + * register. + */ + do { + high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; + low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; + high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; + } while (high1 != high2); + + high1 >>= PIPE_FRAME_HIGH_SHIFT; + low >>= PIPE_FRAME_LOW_SHIFT; + return (high1 << 8) | low; +} + +static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int reg = PIPE_FRMCOUNT_GM45(pipe); + + if (!i915_pipe_enabled(dev, pipe)) { + DRM_DEBUG_DRIVER("trying to get vblank count for disabled " + "pipe %c\n", pipe_name(pipe)); + return 0; + } + + return I915_READ(reg); +} + +static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, + int *vpos, int *hpos) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 vbl = 0, position = 0; + int vbl_start, vbl_end, htotal, vtotal; + bool in_vbl = true; + int ret = 0; + enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, + pipe); + + if (!i915_pipe_enabled(dev, pipe)) { + DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " + "pipe %c\n", pipe_name(pipe)); + return 0; + } + + /* Get vtotal. */ + vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); + + if (INTEL_INFO(dev)->gen >= 4) { + /* No obvious pixelcount register. Only query vertical + * scanout position from Display scan line register. + */ + position = I915_READ(PIPEDSL(pipe)); + + /* Decode into vertical scanout position. Don't have + * horizontal scanout position. + */ + *vpos = position & 0x1fff; + *hpos = 0; + } else { + /* Have access to pixelcount since start of frame. + * We can split this into vertical and horizontal + * scanout position. + */ + position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; + + htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); + *vpos = position / htotal; + *hpos = position - (*vpos * htotal); + } + + /* Query vblank area. */ + vbl = I915_READ(VBLANK(cpu_transcoder)); + + /* Test position against vblank region. */ + vbl_start = vbl & 0x1fff; + vbl_end = (vbl >> 16) & 0x1fff; + + if ((*vpos < vbl_start) || (*vpos > vbl_end)) + in_vbl = false; + + /* Inside "upper part" of vblank area? Apply corrective offset: */ + if (in_vbl && (*vpos >= vbl_start)) + *vpos = *vpos - vtotal; + + /* Readouts valid? */ + if (vbl > 0) + ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; + + /* In vblank? */ + if (in_vbl) + ret |= DRM_SCANOUTPOS_INVBL; + + return ret; +} + +static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, + int *max_error, + struct timeval *vblank_time, + unsigned flags) +{ + struct drm_crtc *crtc; + + if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { + DRM_ERROR("Invalid crtc %d\n", pipe); + return -EINVAL; + } + + /* Get drm_crtc to timestamp: */ + crtc = intel_get_crtc_for_pipe(dev, pipe); + if (crtc == NULL) { + DRM_ERROR("Invalid crtc %d\n", pipe); + return -EINVAL; + } + + if (!crtc->enabled) { + DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); + return -EBUSY; + } + + /* Helper routine in DRM core does all the work: */ + return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, + vblank_time, flags, + crtc); +} + +/* + * Handle hotplug events outside the interrupt handler proper. + */ +#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) + +static void i915_hotplug_work_func(struct work_struct *work) +{ + drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, + hotplug_work); + struct drm_device *dev = dev_priv->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct intel_connector *intel_connector; + struct intel_encoder *intel_encoder; + struct drm_connector *connector; + unsigned long irqflags; + bool hpd_disabled = false; + + /* HPD irq before everything is fully set up. */ + if (!dev_priv->enable_hotplug_processing) + return; + + mutex_lock(&mode_config->mutex); + DRM_DEBUG_KMS("running encoder hotplug functions\n"); + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + list_for_each_entry(connector, &mode_config->connector_list, head) { + intel_connector = to_intel_connector(connector); + intel_encoder = intel_connector->encoder; + if (intel_encoder->hpd_pin > HPD_NONE && + dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && + connector->polled == DRM_CONNECTOR_POLL_HPD) { + DRM_INFO("HPD interrupt storm detected on connector %s: " + "switching from hotplug detection to polling\n", + drm_get_connector_name(connector)); + dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; + connector->polled = DRM_CONNECTOR_POLL_CONNECT + | DRM_CONNECTOR_POLL_DISCONNECT; + hpd_disabled = true; + } + } + /* if there were no outputs to poll, poll was disabled, + * therefore make sure it's enabled when disabling HPD on + * some connectors */ + if (hpd_disabled) { + drm_kms_helper_poll_enable(dev); + mod_timer(&dev_priv->hotplug_reenable_timer, + jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); + } + + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) + if (intel_encoder->hot_plug) + intel_encoder->hot_plug(intel_encoder); + + mutex_unlock(&mode_config->mutex); + + /* Just fire off a uevent and let userspace tell us what to do */ + drm_helper_hpd_irq_event(dev); +} + +static void ironlake_handle_rps_change(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u32 busy_up, busy_down, max_avg, min_avg; + u8 new_delay; + unsigned long flags; + + spin_lock_irqsave(&mchdev_lock, flags); + + I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); + + new_delay = dev_priv->ips.cur_delay; + + I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); + busy_up = I915_READ(RCPREVBSYTUPAVG); + busy_down = I915_READ(RCPREVBSYTDNAVG); + max_avg = I915_READ(RCBMAXAVG); + min_avg = I915_READ(RCBMINAVG); + + /* Handle RCS change request from hw */ + if (busy_up > max_avg) { + if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) + new_delay = dev_priv->ips.cur_delay - 1; + if (new_delay < dev_priv->ips.max_delay) + new_delay = dev_priv->ips.max_delay; + } else if (busy_down < min_avg) { + if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) + new_delay = dev_priv->ips.cur_delay + 1; + if (new_delay > dev_priv->ips.min_delay) + new_delay = dev_priv->ips.min_delay; + } + + if (ironlake_set_drps(dev, new_delay)) + dev_priv->ips.cur_delay = new_delay; + + spin_unlock_irqrestore(&mchdev_lock, flags); + + return; +} + +static void notify_ring(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (ring->obj == NULL) + return; + + trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); + + wake_up_all(&ring->irq_queue); + if (i915_enable_hangcheck) { + dev_priv->gpu_error.hangcheck_count = 0; + mod_timer(&dev_priv->gpu_error.hangcheck_timer, + round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); + } +} + +static void gen6_pm_rps_work(struct work_struct *work) +{ + drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, + rps.work); + u32 pm_iir, pm_imr; + u8 new_delay; + + spin_lock_irq(&dev_priv->rps.lock); + pm_iir = dev_priv->rps.pm_iir; + dev_priv->rps.pm_iir = 0; + pm_imr = I915_READ(GEN6_PMIMR); + I915_WRITE(GEN6_PMIMR, 0); + spin_unlock_irq(&dev_priv->rps.lock); + + if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) + return; + + mutex_lock(&dev_priv->rps.hw_lock); + + if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) + new_delay = dev_priv->rps.cur_delay + 1; + else + new_delay = dev_priv->rps.cur_delay - 1; + + /* sysfs frequency interfaces may have snuck in while servicing the + * interrupt + */ + if (!(new_delay > dev_priv->rps.max_delay || + new_delay < dev_priv->rps.min_delay)) { + gen6_set_rps(dev_priv->dev, new_delay); + } + + mutex_unlock(&dev_priv->rps.hw_lock); +} + + +/** + * ivybridge_parity_work - Workqueue called when a parity error interrupt + * occurred. + * @work: workqueue struct + * + * Doesn't actually do anything except notify userspace. As a consequence of + * this event, userspace should try to remap the bad rows since statistically + * it is likely the same row is more likely to go bad again. + */ +static void ivybridge_parity_work(struct work_struct *work) +{ + drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, + l3_parity.error_work); + u32 error_status, row, bank, subbank; + char *parity_event[5]; + uint32_t misccpctl; + unsigned long flags; + + /* We must turn off DOP level clock gating to access the L3 registers. + * In order to prevent a get/put style interface, acquire struct mutex + * any time we access those registers. + */ + mutex_lock(&dev_priv->dev->struct_mutex); + + misccpctl = I915_READ(GEN7_MISCCPCTL); + I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); + POSTING_READ(GEN7_MISCCPCTL); + + error_status = I915_READ(GEN7_L3CDERRST1); + row = GEN7_PARITY_ERROR_ROW(error_status); + bank = GEN7_PARITY_ERROR_BANK(error_status); + subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); + + I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | + GEN7_L3CDERRST1_ENABLE); + POSTING_READ(GEN7_L3CDERRST1); + + I915_WRITE(GEN7_MISCCPCTL, misccpctl); + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + + mutex_unlock(&dev_priv->dev->struct_mutex); + + parity_event[0] = "L3_PARITY_ERROR=1"; + parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); + parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); + parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); + parity_event[4] = NULL; + + kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, + KOBJ_CHANGE, parity_event); + + DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", + row, bank, subbank); + + kfree(parity_event[3]); + kfree(parity_event[2]); + kfree(parity_event[1]); +} + +static void ivybridge_handle_parity_error(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long flags; + + if (!HAS_L3_GPU_CACHE(dev)) + return; + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + + queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); +} + +static void snb_gt_irq_handler(struct drm_device *dev, + struct drm_i915_private *dev_priv, + u32 gt_iir) +{ + + if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | + GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) + notify_ring(dev, &dev_priv->ring[RCS]); + if (gt_iir & GEN6_BSD_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[VCS]); + if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[BCS]); + + if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | + GT_GEN6_BSD_CS_ERROR_INTERRUPT | + GT_RENDER_CS_ERROR_INTERRUPT)) { + DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); + i915_handle_error(dev, false); + } + + if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) + ivybridge_handle_parity_error(dev); +} + +static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, + u32 pm_iir) +{ + unsigned long flags; + + /* + * IIR bits should never already be set because IMR should + * prevent an interrupt from being shown in IIR. The warning + * displays a case where we've unsafely cleared + * dev_priv->rps.pm_iir. Although missing an interrupt of the same + * type is not a problem, it displays a problem in the logic. + * + * The mask bit in IMR is cleared by dev_priv->rps.work. + */ + + spin_lock_irqsave(&dev_priv->rps.lock, flags); + dev_priv->rps.pm_iir |= pm_iir; + I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); + POSTING_READ(GEN6_PMIMR); + spin_unlock_irqrestore(&dev_priv->rps.lock, flags); + + queue_work(dev_priv->wq, &dev_priv->rps.work); +} + +#define HPD_STORM_DETECT_PERIOD 1000 +#define HPD_STORM_THRESHOLD 5 + +static inline bool hotplug_irq_storm_detect(struct drm_device *dev, + u32 hotplug_trigger, + const u32 *hpd) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long irqflags; + int i; + bool ret = false; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + + for (i = 1; i < HPD_NUM_PINS; i++) { + + if (!(hpd[i] & hotplug_trigger) || + dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) + continue; + + if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, + dev_priv->hpd_stats[i].hpd_last_jiffies + + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { + dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; + dev_priv->hpd_stats[i].hpd_cnt = 0; + } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { + dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; + DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); + ret = true; + } else { + dev_priv->hpd_stats[i].hpd_cnt++; + } + } + + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + return ret; +} + +static void gmbus_irq_handler(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; + + wake_up_all(&dev_priv->gmbus_wait_queue); +} + +static void dp_aux_irq_handler(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; + + wake_up_all(&dev_priv->gmbus_wait_queue); +} + +static irqreturn_t valleyview_irq_handler(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *) arg; + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 iir, gt_iir, pm_iir; + irqreturn_t ret = IRQ_NONE; + unsigned long irqflags; + int pipe; + u32 pipe_stats[I915_MAX_PIPES]; + + atomic_inc(&dev_priv->irq_received); + + while (true) { + iir = I915_READ(VLV_IIR); + gt_iir = I915_READ(GTIIR); + pm_iir = I915_READ(GEN6_PMIIR); + + if (gt_iir == 0 && pm_iir == 0 && iir == 0) + goto out; + + ret = IRQ_HANDLED; + + snb_gt_irq_handler(dev, dev_priv, gt_iir); + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + for_each_pipe(pipe) { + int reg = PIPESTAT(pipe); + pipe_stats[pipe] = I915_READ(reg); + + /* + * Clear the PIPE*STAT regs before the IIR + */ + if (pipe_stats[pipe] & 0x8000ffff) { + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG_DRIVER("pipe %c underrun\n", + pipe_name(pipe)); + I915_WRITE(reg, pipe_stats[pipe]); + } + } + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + for_each_pipe(pipe) { + if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) + drm_handle_vblank(dev, pipe); + + if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { + intel_prepare_page_flip(dev, pipe); + intel_finish_page_flip(dev, pipe); + } + } + + /* Consume port. Then clear IIR or we'll miss events */ + if (iir & I915_DISPLAY_PORT_INTERRUPT) { + u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); + u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; + + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", + hotplug_status); + if (hotplug_trigger) { + if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915)) + i915_hpd_irq_setup(dev); + queue_work(dev_priv->wq, + &dev_priv->hotplug_work); + } + I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + I915_READ(PORT_HOTPLUG_STAT); + } + + if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) + gmbus_irq_handler(dev); + + if (pm_iir & GEN6_PM_DEFERRED_EVENTS) + gen6_queue_rps_work(dev_priv, pm_iir); + + I915_WRITE(GTIIR, gt_iir); + I915_WRITE(GEN6_PMIIR, pm_iir); + I915_WRITE(VLV_IIR, iir); + } + +out: + return ret; +} + +static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; + + if (hotplug_trigger) { + if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx)) + ibx_hpd_irq_setup(dev); + queue_work(dev_priv->wq, &dev_priv->hotplug_work); + } + if (pch_iir & SDE_AUDIO_POWER_MASK) + DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", + (pch_iir & SDE_AUDIO_POWER_MASK) >> + SDE_AUDIO_POWER_SHIFT); + + if (pch_iir & SDE_AUX_MASK) + dp_aux_irq_handler(dev); + + if (pch_iir & SDE_GMBUS) + gmbus_irq_handler(dev); + + if (pch_iir & SDE_AUDIO_HDCP_MASK) + DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); + + if (pch_iir & SDE_AUDIO_TRANS_MASK) + DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); + + if (pch_iir & SDE_POISON) + DRM_ERROR("PCH poison interrupt\n"); + + if (pch_iir & SDE_FDI_MASK) + for_each_pipe(pipe) + DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", + pipe_name(pipe), + I915_READ(FDI_RX_IIR(pipe))); + + if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) + DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); + + if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) + DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); + + if (pch_iir & SDE_TRANSB_FIFO_UNDER) + DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); + if (pch_iir & SDE_TRANSA_FIFO_UNDER) + DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); +} + +static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; + + if (hotplug_trigger) { + if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt)) + ibx_hpd_irq_setup(dev); + queue_work(dev_priv->wq, &dev_priv->hotplug_work); + } + if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) + DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", + (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> + SDE_AUDIO_POWER_SHIFT_CPT); + + if (pch_iir & SDE_AUX_MASK_CPT) + dp_aux_irq_handler(dev); + + if (pch_iir & SDE_GMBUS_CPT) + gmbus_irq_handler(dev); + + if (pch_iir & SDE_AUDIO_CP_REQ_CPT) + DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); + + if (pch_iir & SDE_AUDIO_CP_CHG_CPT) + DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); + + if (pch_iir & SDE_FDI_MASK_CPT) + for_each_pipe(pipe) + DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", + pipe_name(pipe), + I915_READ(FDI_RX_IIR(pipe))); +} + +static irqreturn_t ivybridge_irq_handler(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *) arg; + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0; + irqreturn_t ret = IRQ_NONE; + int i; + + atomic_inc(&dev_priv->irq_received); + + /* disable master interrupt before clearing iir */ + de_ier = I915_READ(DEIER); + I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); + + /* Disable south interrupts. We'll only write to SDEIIR once, so further + * interrupts will will be stored on its back queue, and then we'll be + * able to process them after we restore SDEIER (as soon as we restore + * it, we'll get an interrupt if SDEIIR still has something to process + * due to its back queue). */ + if (!HAS_PCH_NOP(dev)) { + sde_ier = I915_READ(SDEIER); + I915_WRITE(SDEIER, 0); + POSTING_READ(SDEIER); + } + + gt_iir = I915_READ(GTIIR); + if (gt_iir) { + snb_gt_irq_handler(dev, dev_priv, gt_iir); + I915_WRITE(GTIIR, gt_iir); + ret = IRQ_HANDLED; + } + + de_iir = I915_READ(DEIIR); + if (de_iir) { + if (de_iir & DE_AUX_CHANNEL_A_IVB) + dp_aux_irq_handler(dev); + + if (de_iir & DE_GSE_IVB) + intel_opregion_gse_intr(dev); + + for (i = 0; i < 3; i++) { + if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) + drm_handle_vblank(dev, i); + if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { + intel_prepare_page_flip(dev, i); + intel_finish_page_flip_plane(dev, i); + } + } + + /* check event from PCH */ + if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { + u32 pch_iir = I915_READ(SDEIIR); + + cpt_irq_handler(dev, pch_iir); + + /* clear PCH hotplug event before clear CPU irq */ + I915_WRITE(SDEIIR, pch_iir); + } + + I915_WRITE(DEIIR, de_iir); + ret = IRQ_HANDLED; + } + + pm_iir = I915_READ(GEN6_PMIIR); + if (pm_iir) { + if (pm_iir & GEN6_PM_DEFERRED_EVENTS) + gen6_queue_rps_work(dev_priv, pm_iir); + I915_WRITE(GEN6_PMIIR, pm_iir); + ret = IRQ_HANDLED; + } + + I915_WRITE(DEIER, de_ier); + POSTING_READ(DEIER); + if (!HAS_PCH_NOP(dev)) { + I915_WRITE(SDEIER, sde_ier); + POSTING_READ(SDEIER); + } + + return ret; +} + +static void ilk_gt_irq_handler(struct drm_device *dev, + struct drm_i915_private *dev_priv, + u32 gt_iir) +{ + if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) + notify_ring(dev, &dev_priv->ring[RCS]); + if (gt_iir & GT_BSD_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[VCS]); +} + +static irqreturn_t ironlake_irq_handler(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *) arg; + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int ret = IRQ_NONE; + u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; + + atomic_inc(&dev_priv->irq_received); + + /* disable master interrupt before clearing iir */ + de_ier = I915_READ(DEIER); + I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); + POSTING_READ(DEIER); + + /* Disable south interrupts. We'll only write to SDEIIR once, so further + * interrupts will will be stored on its back queue, and then we'll be + * able to process them after we restore SDEIER (as soon as we restore + * it, we'll get an interrupt if SDEIIR still has something to process + * due to its back queue). */ + sde_ier = I915_READ(SDEIER); + I915_WRITE(SDEIER, 0); + POSTING_READ(SDEIER); + + de_iir = I915_READ(DEIIR); + gt_iir = I915_READ(GTIIR); + pm_iir = I915_READ(GEN6_PMIIR); + + if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0)) + goto done; + + ret = IRQ_HANDLED; + + if (IS_GEN5(dev)) + ilk_gt_irq_handler(dev, dev_priv, gt_iir); + else + snb_gt_irq_handler(dev, dev_priv, gt_iir); + + if (de_iir & DE_AUX_CHANNEL_A) + dp_aux_irq_handler(dev); + + if (de_iir & DE_GSE) + intel_opregion_gse_intr(dev); + + if (de_iir & DE_PIPEA_VBLANK) + drm_handle_vblank(dev, 0); + + if (de_iir & DE_PIPEB_VBLANK) + drm_handle_vblank(dev, 1); + + if (de_iir & DE_PLANEA_FLIP_DONE) { + intel_prepare_page_flip(dev, 0); + intel_finish_page_flip_plane(dev, 0); + } + + if (de_iir & DE_PLANEB_FLIP_DONE) { + intel_prepare_page_flip(dev, 1); + intel_finish_page_flip_plane(dev, 1); + } + + /* check event from PCH */ + if (de_iir & DE_PCH_EVENT) { + u32 pch_iir = I915_READ(SDEIIR); + + if (HAS_PCH_CPT(dev)) + cpt_irq_handler(dev, pch_iir); + else + ibx_irq_handler(dev, pch_iir); + + /* should clear PCH hotplug event before clear CPU irq */ + I915_WRITE(SDEIIR, pch_iir); + } + + if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) + ironlake_handle_rps_change(dev); + + if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) + gen6_queue_rps_work(dev_priv, pm_iir); + + I915_WRITE(GTIIR, gt_iir); + I915_WRITE(DEIIR, de_iir); + I915_WRITE(GEN6_PMIIR, pm_iir); + +done: + I915_WRITE(DEIER, de_ier); + POSTING_READ(DEIER); + I915_WRITE(SDEIER, sde_ier); + POSTING_READ(SDEIER); + + return ret; +} + +static void i915_error_wake_up(struct drm_i915_private *dev_priv, + bool reset_completed) +{ + struct intel_ring_buffer *ring; + int i; + + /* + * Notify all waiters for GPU completion events that reset state has + * been changed, and that they need to restart their wait after + * checking for potential errors (and bail out to drop locks if there is + * a gpu reset pending so that i915_error_work_func can acquire them). + */ + + /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ + for_each_ring(ring, dev_priv, i) + wake_up_all(&ring->irq_queue); + + /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ + wake_up_all(&dev_priv->pending_flip_queue); + + /* + * Signal tasks blocked in i915_gem_wait_for_error that the pending + * reset state is cleared. + */ + if (reset_completed) + wake_up_all(&dev_priv->gpu_error.reset_queue); +} + +/** + * i915_error_work_func - do process context error handling work + * @work: work struct + * + * Fire an error uevent so userspace can see that a hang or error + * was detected. + */ +static void i915_error_work_func(struct work_struct *work) +{ + struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, + work); + drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, + gpu_error); + struct drm_device *dev = dev_priv->dev; + char *error_event[] = { "ERROR=1", NULL }; + char *reset_event[] = { "RESET=1", NULL }; + char *reset_done_event[] = { "ERROR=0", NULL }; + int ret; + + kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); + + /* + * Note that there's only one work item which does gpu resets, so we + * need not worry about concurrent gpu resets potentially incrementing + * error->reset_counter twice. We only need to take care of another + * racing irq/hangcheck declaring the gpu dead for a second time. A + * quick check for that is good enough: schedule_work ensures the + * correct ordering between hang detection and this work item, and since + * the reset in-progress bit is only ever set by code outside of this + * work we don't need to worry about any other races. + */ + if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { + DRM_DEBUG_DRIVER("resetting chip\n"); + kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, + reset_event); + + /* + * All state reset _must_ be completed before we update the + * reset counter, for otherwise waiters might miss the reset + * pending state and not properly drop locks, resulting in + * deadlocks with the reset work. + */ + ret = i915_reset(dev); + + intel_display_handle_reset(dev); + + if (ret == 0) { + /* + * After all the gem state is reset, increment the reset + * counter and wake up everyone waiting for the reset to + * complete. + * + * Since unlock operations are a one-sided barrier only, + * we need to insert a barrier here to order any seqno + * updates before + * the counter increment. + */ + smp_mb__before_atomic_inc(); + atomic_inc(&dev_priv->gpu_error.reset_counter); + + kobject_uevent_env(&dev->primary->kdev.kobj, + KOBJ_CHANGE, reset_done_event); + } else { + atomic_set(&error->reset_counter, I915_WEDGED); + } + + /* + * Note: The wake_up also serves as a memory barrier so that + * waiters see the update value of the reset counter atomic_t. + */ + i915_error_wake_up(dev_priv, true); + } +} + +/* NB: please notice the memset */ +static void i915_get_extra_instdone(struct drm_device *dev, + uint32_t *instdone) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); + + switch(INTEL_INFO(dev)->gen) { + case 2: + case 3: + instdone[0] = I915_READ(INSTDONE); + break; + case 4: + case 5: + case 6: + instdone[0] = I915_READ(INSTDONE_I965); + instdone[1] = I915_READ(INSTDONE1); + break; + default: + WARN_ONCE(1, "Unsupported platform\n"); + case 7: + instdone[0] = I915_READ(GEN7_INSTDONE_1); + instdone[1] = I915_READ(GEN7_SC_INSTDONE); + instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); + instdone[3] = I915_READ(GEN7_ROW_INSTDONE); + break; + } +} + +#ifdef CONFIG_DEBUG_FS +static struct drm_i915_error_object * +i915_error_object_create_sized(struct drm_i915_private *dev_priv, + struct drm_i915_gem_object *src, + const int num_pages) +{ + struct drm_i915_error_object *dst; + int i; + u32 reloc_offset; + + if (src == NULL || src->pages == NULL) + return NULL; + + dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); + if (dst == NULL) + return NULL; + + reloc_offset = src->gtt_offset; + for (i = 0; i < num_pages; i++) { + unsigned long flags; + void *d; + + d = kmalloc(PAGE_SIZE, GFP_ATOMIC); + if (d == NULL) + goto unwind; + + local_irq_save(flags); + if (reloc_offset < dev_priv->gtt.mappable_end && + src->has_global_gtt_mapping) { + void __iomem *s; + + /* Simply ignore tiling or any overlapping fence. + * It's part of the error state, and this hopefully + * captures what the GPU read. + */ + + s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, + reloc_offset); + memcpy_fromio(d, s, PAGE_SIZE); + io_mapping_unmap_atomic(s); + } else if (src->stolen) { + unsigned long offset; + + offset = dev_priv->mm.stolen_base; + offset += src->stolen->start; + offset += i << PAGE_SHIFT; + + memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); + } else { + struct page *page; + void *s; + + page = i915_gem_object_get_page(src, i); + + drm_clflush_pages(&page, 1); + + s = kmap_atomic(page); + memcpy(d, s, PAGE_SIZE); + kunmap_atomic(s); + + drm_clflush_pages(&page, 1); + } + local_irq_restore(flags); + + dst->pages[i] = d; + + reloc_offset += PAGE_SIZE; + } + dst->page_count = num_pages; + dst->gtt_offset = src->gtt_offset; + + return dst; + +unwind: + while (i--) + kfree(dst->pages[i]); + kfree(dst); + return NULL; +} +#define i915_error_object_create(dev_priv, src) \ + i915_error_object_create_sized((dev_priv), (src), \ + (src)->base.size>>PAGE_SHIFT) + +static void +i915_error_object_free(struct drm_i915_error_object *obj) +{ + int page; + + if (obj == NULL) + return; + + for (page = 0; page < obj->page_count; page++) + kfree(obj->pages[page]); + + kfree(obj); +} + +void +i915_error_state_free(struct kref *error_ref) +{ + struct drm_i915_error_state *error = container_of(error_ref, + typeof(*error), ref); + int i; + + for (i = 0; i < ARRAY_SIZE(error->ring); i++) { + i915_error_object_free(error->ring[i].batchbuffer); + i915_error_object_free(error->ring[i].ringbuffer); + kfree(error->ring[i].requests); + } + + kfree(error->active_bo); + kfree(error->overlay); + kfree(error); +} +static void capture_bo(struct drm_i915_error_buffer *err, + struct drm_i915_gem_object *obj) +{ + err->size = obj->base.size; + err->name = obj->base.name; + err->rseqno = obj->last_read_seqno; + err->wseqno = obj->last_write_seqno; + err->gtt_offset = obj->gtt_offset; + err->read_domains = obj->base.read_domains; + err->write_domain = obj->base.write_domain; + err->fence_reg = obj->fence_reg; + err->pinned = 0; + if (obj->pin_count > 0) + err->pinned = 1; + if (obj->user_pin_count > 0) + err->pinned = -1; + err->tiling = obj->tiling_mode; + err->dirty = obj->dirty; + err->purgeable = obj->madv != I915_MADV_WILLNEED; + err->ring = obj->ring ? obj->ring->id : -1; + err->cache_level = obj->cache_level; +} + +static u32 capture_active_bo(struct drm_i915_error_buffer *err, + int count, struct list_head *head) +{ + struct drm_i915_gem_object *obj; + int i = 0; + + list_for_each_entry(obj, head, mm_list) { + capture_bo(err++, obj); + if (++i == count) + break; + } + + return i; +} + +static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, + int count, struct list_head *head) +{ + struct drm_i915_gem_object *obj; + int i = 0; + + list_for_each_entry(obj, head, gtt_list) { + if (obj->pin_count == 0) + continue; + + capture_bo(err++, obj); + if (++i == count) + break; + } + + return i; +} + +static void i915_gem_record_fences(struct drm_device *dev, + struct drm_i915_error_state *error) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + /* Fences */ + switch (INTEL_INFO(dev)->gen) { + case 7: + case 6: + for (i = 0; i < dev_priv->num_fence_regs; i++) + error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); + break; + case 5: + case 4: + for (i = 0; i < 16; i++) + error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); + break; + case 3: + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); + case 2: + for (i = 0; i < 8; i++) + error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); + break; + + default: + BUG(); + } +} + +static struct drm_i915_error_object * +i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, + struct intel_ring_buffer *ring) +{ + struct drm_i915_gem_object *obj; + u32 seqno; + + if (!ring->get_seqno) + return NULL; + + if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { + u32 acthd = I915_READ(ACTHD); + + if (WARN_ON(ring->id != RCS)) + return NULL; + + obj = ring->private; + if (acthd >= obj->gtt_offset && + acthd < obj->gtt_offset + obj->base.size) + return i915_error_object_create(dev_priv, obj); + } + + seqno = ring->get_seqno(ring, false); + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { + if (obj->ring != ring) + continue; + + if (i915_seqno_passed(seqno, obj->last_read_seqno)) + continue; + + if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) + continue; + + /* We need to copy these to an anonymous buffer as the simplest + * method to avoid being overwritten by userspace. + */ + return i915_error_object_create(dev_priv, obj); + } + + return NULL; +} + +static void i915_record_ring_state(struct drm_device *dev, + struct drm_i915_error_state *error, + struct intel_ring_buffer *ring) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (INTEL_INFO(dev)->gen >= 6) { + error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); + error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); + error->semaphore_mboxes[ring->id][0] + = I915_READ(RING_SYNC_0(ring->mmio_base)); + error->semaphore_mboxes[ring->id][1] + = I915_READ(RING_SYNC_1(ring->mmio_base)); + error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; + error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; + } + + if (INTEL_INFO(dev)->gen >= 4) { + error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); + error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); + error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); + error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); + error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); + if (ring->id == RCS) + error->bbaddr = I915_READ64(BB_ADDR); + } else { + error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); + error->ipeir[ring->id] = I915_READ(IPEIR); + error->ipehr[ring->id] = I915_READ(IPEHR); + error->instdone[ring->id] = I915_READ(INSTDONE); + } + + error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); + error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); + error->seqno[ring->id] = ring->get_seqno(ring, false); + error->acthd[ring->id] = intel_ring_get_active_head(ring); + error->head[ring->id] = I915_READ_HEAD(ring); + error->tail[ring->id] = I915_READ_TAIL(ring); + error->ctl[ring->id] = I915_READ_CTL(ring); + + error->cpu_ring_head[ring->id] = ring->head; + error->cpu_ring_tail[ring->id] = ring->tail; +} + + +static void i915_gem_record_active_context(struct intel_ring_buffer *ring, + struct drm_i915_error_state *error, + struct drm_i915_error_ring *ering) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_gem_object *obj; + + /* Currently render ring is the only HW context user */ + if (ring->id != RCS || !error->ccid) + return; + + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { + if ((error->ccid & PAGE_MASK) == obj->gtt_offset) { + ering->ctx = i915_error_object_create_sized(dev_priv, + obj, 1); + } + } +} + +static void i915_gem_record_rings(struct drm_device *dev, + struct drm_i915_error_state *error) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + struct drm_i915_gem_request *request; + int i, count; + + for_each_ring(ring, dev_priv, i) { + i915_record_ring_state(dev, error, ring); + + error->ring[i].batchbuffer = + i915_error_first_batchbuffer(dev_priv, ring); + + error->ring[i].ringbuffer = + i915_error_object_create(dev_priv, ring->obj); + + + i915_gem_record_active_context(ring, error, &error->ring[i]); + + count = 0; + list_for_each_entry(request, &ring->request_list, list) + count++; + + error->ring[i].num_requests = count; + error->ring[i].requests = + kmalloc(count*sizeof(struct drm_i915_error_request), + GFP_ATOMIC); + if (error->ring[i].requests == NULL) { + error->ring[i].num_requests = 0; + continue; + } + + count = 0; + list_for_each_entry(request, &ring->request_list, list) { + struct drm_i915_error_request *erq; + + erq = &error->ring[i].requests[count++]; + erq->seqno = request->seqno; + erq->jiffies = request->emitted_jiffies; + erq->tail = request->tail; + } + } +} + +/** + * i915_capture_error_state - capture an error record for later analysis + * @dev: drm device + * + * Should be called when an error is detected (either a hang or an error + * interrupt) to capture error state from the time of the error. Fills + * out a structure which becomes available in debugfs for user level tools + * to pick up. + */ +static void i915_capture_error_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + struct drm_i915_error_state *error; + unsigned long flags; + int i, pipe; + + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); + error = dev_priv->gpu_error.first_error; + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); + if (error) + return; + + /* Account for pipe specific data like PIPE*STAT */ + error = kzalloc(sizeof(*error), GFP_ATOMIC); + if (!error) { + DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); + return; + } + + DRM_INFO("capturing error event; look for more information in " + "/sys/kernel/debug/dri/%d/i915_error_state\n", + dev->primary->index); + + kref_init(&error->ref); + error->eir = I915_READ(EIR); + error->pgtbl_er = I915_READ(PGTBL_ER); + if (HAS_HW_CONTEXTS(dev)) + error->ccid = I915_READ(CCID); + + if (HAS_PCH_SPLIT(dev)) + error->ier = I915_READ(DEIER) | I915_READ(GTIER); + else if (IS_VALLEYVIEW(dev)) + error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); + else if (IS_GEN2(dev)) + error->ier = I915_READ16(IER); + else + error->ier = I915_READ(IER); + + if (INTEL_INFO(dev)->gen >= 6) + error->derrmr = I915_READ(DERRMR); + + if (IS_VALLEYVIEW(dev)) + error->forcewake = I915_READ(FORCEWAKE_VLV); + else if (INTEL_INFO(dev)->gen >= 7) + error->forcewake = I915_READ(FORCEWAKE_MT); + else if (INTEL_INFO(dev)->gen == 6) + error->forcewake = I915_READ(FORCEWAKE); + + if (!HAS_PCH_SPLIT(dev)) + for_each_pipe(pipe) + error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); + + if (INTEL_INFO(dev)->gen >= 6) { + error->error = I915_READ(ERROR_GEN6); + error->done_reg = I915_READ(DONE_REG); + } + + if (INTEL_INFO(dev)->gen == 7) + error->err_int = I915_READ(GEN7_ERR_INT); + + i915_get_extra_instdone(dev, error->extra_instdone); + + i915_gem_record_fences(dev, error); + i915_gem_record_rings(dev, error); + + /* Record buffers on the active and pinned lists. */ + error->active_bo = NULL; + error->pinned_bo = NULL; + + i = 0; + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) + i++; + error->active_bo_count = i; + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) + if (obj->pin_count) + i++; + error->pinned_bo_count = i - error->active_bo_count; + + error->active_bo = NULL; + error->pinned_bo = NULL; + if (i) { + error->active_bo = kmalloc(sizeof(*error->active_bo)*i, + GFP_ATOMIC); + if (error->active_bo) + error->pinned_bo = + error->active_bo + error->active_bo_count; + } + + if (error->active_bo) + error->active_bo_count = + capture_active_bo(error->active_bo, + error->active_bo_count, + &dev_priv->mm.active_list); + + if (error->pinned_bo) + error->pinned_bo_count = + capture_pinned_bo(error->pinned_bo, + error->pinned_bo_count, + &dev_priv->mm.bound_list); + + do_gettimeofday(&error->time); + + error->overlay = intel_overlay_capture_error_state(dev); + error->display = intel_display_capture_error_state(dev); + + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); + if (dev_priv->gpu_error.first_error == NULL) { + dev_priv->gpu_error.first_error = error; + error = NULL; + } + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); + + if (error) + i915_error_state_free(&error->ref); +} + +void i915_destroy_error_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_error_state *error; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); + error = dev_priv->gpu_error.first_error; + dev_priv->gpu_error.first_error = NULL; + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); + + if (error) + kref_put(&error->ref, i915_error_state_free); +} +#else +#define i915_capture_error_state(x) +#endif + +static void i915_report_and_clear_eir(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t instdone[I915_NUM_INSTDONE_REG]; + u32 eir = I915_READ(EIR); + int pipe, i; + + if (!eir) + return; + + pr_err("render error detected, EIR: 0x%08x\n", eir); + + i915_get_extra_instdone(dev, instdone); + + if (IS_G4X(dev)) { + if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { + u32 ipeir = I915_READ(IPEIR_I965); + + pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); + pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); + for (i = 0; i < ARRAY_SIZE(instdone); i++) + pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); + pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); + pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); + I915_WRITE(IPEIR_I965, ipeir); + POSTING_READ(IPEIR_I965); + } + if (eir & GM45_ERROR_PAGE_TABLE) { + u32 pgtbl_err = I915_READ(PGTBL_ER); + pr_err("page table error\n"); + pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); + I915_WRITE(PGTBL_ER, pgtbl_err); + POSTING_READ(PGTBL_ER); + } + } + + if (!IS_GEN2(dev)) { + if (eir & I915_ERROR_PAGE_TABLE) { + u32 pgtbl_err = I915_READ(PGTBL_ER); + pr_err("page table error\n"); + pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); + I915_WRITE(PGTBL_ER, pgtbl_err); + POSTING_READ(PGTBL_ER); + } + } + + if (eir & I915_ERROR_MEMORY_REFRESH) { + pr_err("memory refresh error:\n"); + for_each_pipe(pipe) + pr_err("pipe %c stat: 0x%08x\n", + pipe_name(pipe), I915_READ(PIPESTAT(pipe))); + /* pipestat has already been acked */ + } + if (eir & I915_ERROR_INSTRUCTION) { + pr_err("instruction error\n"); + pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); + for (i = 0; i < ARRAY_SIZE(instdone); i++) + pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); + if (INTEL_INFO(dev)->gen < 4) { + u32 ipeir = I915_READ(IPEIR); + + pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); + pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); + pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); + I915_WRITE(IPEIR, ipeir); + POSTING_READ(IPEIR); + } else { + u32 ipeir = I915_READ(IPEIR_I965); + + pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); + pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); + pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); + pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); + I915_WRITE(IPEIR_I965, ipeir); + POSTING_READ(IPEIR_I965); + } + } + + I915_WRITE(EIR, eir); + POSTING_READ(EIR); + eir = I915_READ(EIR); + if (eir) { + /* + * some errors might have become stuck, + * mask them. + */ + DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); + I915_WRITE(EMR, I915_READ(EMR) | eir); + I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + } +} + +/** + * i915_handle_error - handle an error interrupt + * @dev: drm device + * + * Do some basic checking of regsiter state at error interrupt time and + * dump it to the syslog. Also call i915_capture_error_state() to make + * sure we get a record and make it available in debugfs. Fire a uevent + * so userspace knows something bad happened (should trigger collection + * of a ring dump etc.). + */ +void i915_handle_error(struct drm_device *dev, bool wedged) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + i915_capture_error_state(dev); + i915_report_and_clear_eir(dev); + + if (wedged) { + atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, + &dev_priv->gpu_error.reset_counter); + + /* + * Wakeup waiting processes so that the reset work function + * i915_error_work_func doesn't deadlock trying to grab various + * locks. By bumping the reset counter first, the woken + * processes will see a reset in progress and back off, + * releasing their locks and then wait for the reset completion. + * We must do this for _all_ gpu waiters that might hold locks + * that the reset work needs to acquire. + * + * Note: The wake_up serves as the required memory barrier to + * ensure that the waiters see the updated value of the reset + * counter atomic_t. + */ + i915_error_wake_up(dev_priv, false); + } + + /* + * Our reset work can grab modeset locks (since it needs to reset the + * state of outstanding pagelips). Hence it must not be run on our own + * dev-priv->wq work queue for otherwise the flush_work in the pageflip + * code will deadlock. + */ + schedule_work(&dev_priv->gpu_error.work); +} + +static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_i915_gem_object *obj; + struct intel_unpin_work *work; + unsigned long flags; + bool stall_detected; + + /* Ignore early vblank irqs */ + if (intel_crtc == NULL) + return; + + spin_lock_irqsave(&dev->event_lock, flags); + work = intel_crtc->unpin_work; + + if (work == NULL || + atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || + !work->enable_stall_check) { + /* Either the pending flip IRQ arrived, or we're too early. Don't check */ + spin_unlock_irqrestore(&dev->event_lock, flags); + return; + } + + /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ + obj = work->pending_flip_obj; + if (INTEL_INFO(dev)->gen >= 4) { + int dspsurf = DSPSURF(intel_crtc->plane); + stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == + obj->gtt_offset; + } else { + int dspaddr = DSPADDR(intel_crtc->plane); + stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + + crtc->y * crtc->fb->pitches[0] + + crtc->x * crtc->fb->bits_per_pixel/8); + } + + spin_unlock_irqrestore(&dev->event_lock, flags); + + if (stall_detected) { + DRM_DEBUG_DRIVER("Pageflip stall detected\n"); + intel_prepare_page_flip(dev, intel_crtc->plane); + } +} + +/* Called from drm generic code, passed 'crtc' which + * we use as a pipe index + */ +static int i915_enable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long irqflags; + + if (!i915_pipe_enabled(dev, pipe)) + return -EINVAL; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + if (INTEL_INFO(dev)->gen >= 4) + i915_enable_pipestat(dev_priv, pipe, + PIPE_START_VBLANK_INTERRUPT_ENABLE); + else + i915_enable_pipestat(dev_priv, pipe, + PIPE_VBLANK_INTERRUPT_ENABLE); + + /* maintain vblank delivery even in deep C-states */ + if (dev_priv->info->gen == 3) + I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + return 0; +} + +static int ironlake_enable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long irqflags; + + if (!i915_pipe_enabled(dev, pipe)) + return -EINVAL; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + ironlake_enable_display_irq(dev_priv, (pipe == 0) ? + DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + return 0; +} + +static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long irqflags; + + if (!i915_pipe_enabled(dev, pipe)) + return -EINVAL; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + ironlake_enable_display_irq(dev_priv, + DE_PIPEA_VBLANK_IVB << (5 * pipe)); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + return 0; +} + +static int valleyview_enable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long irqflags; + u32 imr; + + if (!i915_pipe_enabled(dev, pipe)) + return -EINVAL; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + imr = I915_READ(VLV_IMR); + if (pipe == 0) + imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; + else + imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + I915_WRITE(VLV_IMR, imr); + i915_enable_pipestat(dev_priv, pipe, + PIPE_START_VBLANK_INTERRUPT_ENABLE); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + return 0; +} + +/* Called from drm generic code, passed 'crtc' which + * we use as a pipe index + */ +static void i915_disable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + if (dev_priv->info->gen == 3) + I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); + + i915_disable_pipestat(dev_priv, pipe, + PIPE_VBLANK_INTERRUPT_ENABLE | + PIPE_START_VBLANK_INTERRUPT_ENABLE); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + +static void ironlake_disable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + ironlake_disable_display_irq(dev_priv, (pipe == 0) ? + DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + +static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + ironlake_disable_display_irq(dev_priv, + DE_PIPEA_VBLANK_IVB << (pipe * 5)); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + +static void valleyview_disable_vblank(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long irqflags; + u32 imr; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_disable_pipestat(dev_priv, pipe, + PIPE_START_VBLANK_INTERRUPT_ENABLE); + imr = I915_READ(VLV_IMR); + if (pipe == 0) + imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; + else + imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + I915_WRITE(VLV_IMR, imr); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + +static u32 +ring_last_seqno(struct intel_ring_buffer *ring) +{ + return list_entry(ring->request_list.prev, + struct drm_i915_gem_request, list)->seqno; +} + +static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) +{ + if (list_empty(&ring->request_list) || + i915_seqno_passed(ring->get_seqno(ring, false), + ring_last_seqno(ring))) { + /* Issue a wake-up to catch stuck h/w. */ + if (waitqueue_active(&ring->irq_queue)) { + DRM_ERROR("Hangcheck timer elapsed... %s idle\n", + ring->name); + wake_up_all(&ring->irq_queue); + *err = true; + } + return true; + } + return false; +} + +static bool semaphore_passed(struct intel_ring_buffer *ring) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; + struct intel_ring_buffer *signaller; + u32 cmd, ipehr, acthd_min; + + ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); + if ((ipehr & ~(0x3 << 16)) != + (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) + return false; + + /* ACTHD is likely pointing to the dword after the actual command, + * so scan backwards until we find the MBOX. + */ + acthd_min = max((int)acthd - 3 * 4, 0); + do { + cmd = ioread32(ring->virtual_start + acthd); + if (cmd == ipehr) + break; + + acthd -= 4; + if (acthd < acthd_min) + return false; + } while (1); + + signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; + return i915_seqno_passed(signaller->get_seqno(signaller, false), + ioread32(ring->virtual_start+acthd+4)+1); +} + +static bool kick_ring(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 tmp = I915_READ_CTL(ring); + if (tmp & RING_WAIT) { + DRM_ERROR("Kicking stuck wait on %s\n", + ring->name); + I915_WRITE_CTL(ring, tmp); + return true; + } + + if (INTEL_INFO(dev)->gen >= 6 && + tmp & RING_WAIT_SEMAPHORE && + semaphore_passed(ring)) { + DRM_ERROR("Kicking stuck semaphore on %s\n", + ring->name); + I915_WRITE_CTL(ring, tmp); + return true; + } + return false; +} + +static bool i915_hangcheck_hung(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + if (dev_priv->gpu_error.hangcheck_count++ > 1) { + bool hung = true; + + DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); + i915_handle_error(dev, true); + + if (!IS_GEN2(dev)) { + struct intel_ring_buffer *ring; + int i; + + /* Is the chip hanging on a WAIT_FOR_EVENT? + * If so we can simply poke the RB_WAIT bit + * and break the hang. This should work on + * all but the second generation chipsets. + */ + for_each_ring(ring, dev_priv, i) + hung &= !kick_ring(ring); + } + + return hung; + } + + return false; +} + +/** + * This is called when the chip hasn't reported back with completed + * batchbuffers in a long time. The first time this is called we simply record + * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses + * again, we assume the chip is wedged and try to fix it. + */ +void i915_hangcheck_elapsed(unsigned long data) +{ + struct drm_device *dev = (struct drm_device *)data; + drm_i915_private_t *dev_priv = dev->dev_private; + uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG]; + struct intel_ring_buffer *ring; + bool err = false, idle; + int i; + + if (!i915_enable_hangcheck) + return; + + memset(acthd, 0, sizeof(acthd)); + idle = true; + for_each_ring(ring, dev_priv, i) { + idle &= i915_hangcheck_ring_idle(ring, &err); + acthd[i] = intel_ring_get_active_head(ring); + } + + /* If all work is done then ACTHD clearly hasn't advanced. */ + if (idle) { + if (err) { + if (i915_hangcheck_hung(dev)) + return; + + goto repeat; + } + + dev_priv->gpu_error.hangcheck_count = 0; + return; + } + + i915_get_extra_instdone(dev, instdone); + if (memcmp(dev_priv->gpu_error.last_acthd, acthd, + sizeof(acthd)) == 0 && + memcmp(dev_priv->gpu_error.prev_instdone, instdone, + sizeof(instdone)) == 0) { + if (i915_hangcheck_hung(dev)) + return; + } else { + dev_priv->gpu_error.hangcheck_count = 0; + + memcpy(dev_priv->gpu_error.last_acthd, acthd, + sizeof(acthd)); + memcpy(dev_priv->gpu_error.prev_instdone, instdone, + sizeof(instdone)); + } + +repeat: + /* Reset timer case chip hangs without another request being added */ + mod_timer(&dev_priv->gpu_error.hangcheck_timer, + round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); +} + +/* drm_dma.h hooks +*/ +static void ironlake_irq_preinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + atomic_set(&dev_priv->irq_received, 0); + + I915_WRITE(HWSTAM, 0xeffe); + + /* XXX hotplug from PCH */ + + I915_WRITE(DEIMR, 0xffffffff); + I915_WRITE(DEIER, 0x0); + POSTING_READ(DEIER); + + /* and GT */ + I915_WRITE(GTIMR, 0xffffffff); + I915_WRITE(GTIER, 0x0); + POSTING_READ(GTIER); + + if (HAS_PCH_NOP(dev)) + return; + + /* south display irq */ + I915_WRITE(SDEIMR, 0xffffffff); + /* + * SDEIER is also touched by the interrupt handler to work around missed + * PCH interrupts. Hence we can't update it after the interrupt handler + * is enabled - instead we unconditionally enable all PCH interrupt + * sources here, but then only unmask them as needed with SDEIMR. + */ + I915_WRITE(SDEIER, 0xffffffff); + POSTING_READ(SDEIER); +} + +static void valleyview_irq_preinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + atomic_set(&dev_priv->irq_received, 0); + + /* VLV magic */ + I915_WRITE(VLV_IMR, 0); + I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); + I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); + I915_WRITE(RING_IMR(BLT_RING_BASE), 0); + + /* and GT */ + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIMR, 0xffffffff); + I915_WRITE(GTIER, 0x0); + POSTING_READ(GTIER); + + I915_WRITE(DPINVGTT, 0xff); + + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0xffff); + I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(VLV_IMR, 0xffffffff); + I915_WRITE(VLV_IER, 0x0); + POSTING_READ(VLV_IER); +} + +static void ibx_hpd_irq_setup(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_mode_config *mode_config = &dev->mode_config; + struct intel_encoder *intel_encoder; + u32 mask = ~I915_READ(SDEIMR); + u32 hotplug; + + if (HAS_PCH_IBX(dev)) { + mask &= ~SDE_HOTPLUG_MASK; + list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) + if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) + mask |= hpd_ibx[intel_encoder->hpd_pin]; + } else { + mask &= ~SDE_HOTPLUG_MASK_CPT; + list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) + if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) + mask |= hpd_cpt[intel_encoder->hpd_pin]; + } + + I915_WRITE(SDEIMR, ~mask); + + /* + * Enable digital hotplug on the PCH, and configure the DP short pulse + * duration to 2ms (which is the minimum in the Display Port spec) + * + * This register is the same on all known PCH chips. + */ + hotplug = I915_READ(PCH_PORT_HOTPLUG); + hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); + hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; + hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; + hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; + I915_WRITE(PCH_PORT_HOTPLUG, hotplug); +} + +static void ibx_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 mask; + + if (HAS_PCH_IBX(dev)) + mask = SDE_GMBUS | SDE_AUX_MASK; + else + mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; + + if (HAS_PCH_NOP(dev)) + return; + + I915_WRITE(SDEIIR, I915_READ(SDEIIR)); + I915_WRITE(SDEIMR, ~mask); +} + +static int ironlake_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + /* enable kind of interrupts always enabled */ + u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | + DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | + DE_AUX_CHANNEL_A; + u32 render_irqs; + + dev_priv->irq_mask = ~display_mask; + + /* should always can generate irq */ + I915_WRITE(DEIIR, I915_READ(DEIIR)); + I915_WRITE(DEIMR, dev_priv->irq_mask); + I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); + POSTING_READ(DEIER); + + dev_priv->gt_irq_mask = ~0; + + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + + if (IS_GEN6(dev)) + render_irqs = + GT_USER_INTERRUPT | + GEN6_BSD_USER_INTERRUPT | + GEN6_BLITTER_USER_INTERRUPT; + else + render_irqs = + GT_USER_INTERRUPT | + GT_PIPE_NOTIFY | + GT_BSD_USER_INTERRUPT; + I915_WRITE(GTIER, render_irqs); + POSTING_READ(GTIER); + + ibx_irq_postinstall(dev); + + if (IS_IRONLAKE_M(dev)) { + /* Clear & enable PCU event interrupts */ + I915_WRITE(DEIIR, DE_PCU_EVENT); + I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); + ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); + } + + return 0; +} + +static int ivybridge_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + /* enable kind of interrupts always enabled */ + u32 display_mask = + DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | + DE_PLANEC_FLIP_DONE_IVB | + DE_PLANEB_FLIP_DONE_IVB | + DE_PLANEA_FLIP_DONE_IVB | + DE_AUX_CHANNEL_A_IVB; + u32 render_irqs; + + dev_priv->irq_mask = ~display_mask; + + /* should always can generate irq */ + I915_WRITE(DEIIR, I915_READ(DEIIR)); + I915_WRITE(DEIMR, dev_priv->irq_mask); + I915_WRITE(DEIER, + display_mask | + DE_PIPEC_VBLANK_IVB | + DE_PIPEB_VBLANK_IVB | + DE_PIPEA_VBLANK_IVB); + POSTING_READ(DEIER); + + dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; + + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + + render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | + GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT; + I915_WRITE(GTIER, render_irqs); + POSTING_READ(GTIER); + + ibx_irq_postinstall(dev); + + return 0; +} + +static int valleyview_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 enable_mask; + u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; + u32 render_irqs; + u16 msid; + + enable_mask = I915_DISPLAY_PORT_INTERRUPT; + enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + + /* + *Leave vblank interrupts masked initially. enable/disable will + * toggle them based on usage. + */ + dev_priv->irq_mask = (~enable_mask) | + I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | + I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + + /* Hack for broken MSIs on VLV */ + pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); + pci_read_config_word(dev->pdev, 0x98, &msid); + msid &= 0xff; /* mask out delivery bits */ + msid |= (1<<14); + pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); + + I915_WRITE(PORT_HOTPLUG_EN, 0); + POSTING_READ(PORT_HOTPLUG_EN); + + I915_WRITE(VLV_IMR, dev_priv->irq_mask); + I915_WRITE(VLV_IER, enable_mask); + I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(PIPESTAT(0), 0xffff); + I915_WRITE(PIPESTAT(1), 0xffff); + POSTING_READ(VLV_IER); + + i915_enable_pipestat(dev_priv, 0, pipestat_enable); + i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); + i915_enable_pipestat(dev_priv, 1, pipestat_enable); + + I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(VLV_IIR, 0xffffffff); + + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + + render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | + GEN6_BLITTER_USER_INTERRUPT; + I915_WRITE(GTIER, render_irqs); + POSTING_READ(GTIER); + + /* ack & enable invalid PTE error interrupts */ +#if 0 /* FIXME: add support to irq handler for checking these bits */ + I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); + I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); +#endif + + I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); + + return 0; +} + +static void valleyview_irq_uninstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + if (!dev_priv) + return; + + del_timer_sync(&dev_priv->hotplug_reenable_timer); + + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0xffff); + + I915_WRITE(HWSTAM, 0xffffffff); + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0xffff); + I915_WRITE(VLV_IIR, 0xffffffff); + I915_WRITE(VLV_IMR, 0xffffffff); + I915_WRITE(VLV_IER, 0x0); + POSTING_READ(VLV_IER); +} + +static void ironlake_irq_uninstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + if (!dev_priv) + return; + + del_timer_sync(&dev_priv->hotplug_reenable_timer); + + I915_WRITE(HWSTAM, 0xffffffff); + + I915_WRITE(DEIMR, 0xffffffff); + I915_WRITE(DEIER, 0x0); + I915_WRITE(DEIIR, I915_READ(DEIIR)); + + I915_WRITE(GTIMR, 0xffffffff); + I915_WRITE(GTIER, 0x0); + I915_WRITE(GTIIR, I915_READ(GTIIR)); + + if (HAS_PCH_NOP(dev)) + return; + + I915_WRITE(SDEIMR, 0xffffffff); + I915_WRITE(SDEIER, 0x0); + I915_WRITE(SDEIIR, I915_READ(SDEIIR)); +} + +static void i8xx_irq_preinstall(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + atomic_set(&dev_priv->irq_received, 0); + + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0); + I915_WRITE16(IMR, 0xffff); + I915_WRITE16(IER, 0x0); + POSTING_READ16(IER); +} + +static int i8xx_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + I915_WRITE16(EMR, + ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); + + /* Unmask the interrupts that we always want on. */ + dev_priv->irq_mask = + ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + I915_WRITE16(IMR, dev_priv->irq_mask); + + I915_WRITE16(IER, + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | + I915_USER_INTERRUPT); + POSTING_READ16(IER); + + return 0; +} + +/* + * Returns true when a page flip has completed. + */ +static bool i8xx_handle_vblank(struct drm_device *dev, + int pipe, u16 iir) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); + + if (!drm_handle_vblank(dev, pipe)) + return false; + + if ((iir & flip_pending) == 0) + return false; + + intel_prepare_page_flip(dev, pipe); + + /* We detect FlipDone by looking for the change in PendingFlip from '1' + * to '0' on the following vblank, i.e. IIR has the Pendingflip + * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence + * the flip is completed (no longer pending). Since this doesn't raise + * an interrupt per se, we watch for the change at vblank. + */ + if (I915_READ16(ISR) & flip_pending) + return false; + + intel_finish_page_flip(dev, pipe); + + return true; +} + +static irqreturn_t i8xx_irq_handler(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *) arg; + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u16 iir, new_iir; + u32 pipe_stats[2]; + unsigned long irqflags; + int irq_received; + int pipe; + u16 flip_mask = + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + + atomic_inc(&dev_priv->irq_received); + + iir = I915_READ16(IIR); + if (iir == 0) + return IRQ_NONE; + + while (iir & ~flip_mask) { + /* Can't rely on pipestat interrupt bit in iir as it might + * have been cleared after the pipestat interrupt was received. + * It doesn't set the bit in iir again, but it still produces + * interrupts (for non-MSI). + */ + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + i915_handle_error(dev, false); + + for_each_pipe(pipe) { + int reg = PIPESTAT(pipe); + pipe_stats[pipe] = I915_READ(reg); + + /* + * Clear the PIPE*STAT regs before the IIR + */ + if (pipe_stats[pipe] & 0x8000ffff) { + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG_DRIVER("pipe %c underrun\n", + pipe_name(pipe)); + I915_WRITE(reg, pipe_stats[pipe]); + irq_received = 1; + } + } + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + I915_WRITE16(IIR, iir & ~flip_mask); + new_iir = I915_READ16(IIR); /* Flush posted writes */ + + i915_update_dri1_breadcrumb(dev); + + if (iir & I915_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[RCS]); + + if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && + i8xx_handle_vblank(dev, 0, iir)) + flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); + + if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && + i8xx_handle_vblank(dev, 1, iir)) + flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); + + iir = new_iir; + } + + return IRQ_HANDLED; +} + +static void i8xx_irq_uninstall(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + for_each_pipe(pipe) { + /* Clear enable bits; then clear status bits */ + I915_WRITE(PIPESTAT(pipe), 0); + I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); + } + I915_WRITE16(IMR, 0xffff); + I915_WRITE16(IER, 0x0); + I915_WRITE16(IIR, I915_READ16(IIR)); +} + +static void i915_irq_preinstall(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + atomic_set(&dev_priv->irq_received, 0); + + if (I915_HAS_HOTPLUG(dev)) { + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + } + + I915_WRITE16(HWSTAM, 0xeffe); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0); + I915_WRITE(IMR, 0xffffffff); + I915_WRITE(IER, 0x0); + POSTING_READ(IER); +} + +static int i915_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 enable_mask; + + I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); + + /* Unmask the interrupts that we always want on. */ + dev_priv->irq_mask = + ~(I915_ASLE_INTERRUPT | + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + + enable_mask = + I915_ASLE_INTERRUPT | + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | + I915_USER_INTERRUPT; + + if (I915_HAS_HOTPLUG(dev)) { + I915_WRITE(PORT_HOTPLUG_EN, 0); + POSTING_READ(PORT_HOTPLUG_EN); + + /* Enable in IER... */ + enable_mask |= I915_DISPLAY_PORT_INTERRUPT; + /* and unmask in IMR */ + dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; + } + + I915_WRITE(IMR, dev_priv->irq_mask); + I915_WRITE(IER, enable_mask); + POSTING_READ(IER); + + intel_opregion_enable_asle(dev); + + return 0; +} + +/* + * Returns true when a page flip has completed. + */ +static bool i915_handle_vblank(struct drm_device *dev, + int plane, int pipe, u32 iir) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); + + if (!drm_handle_vblank(dev, pipe)) + return false; + + if ((iir & flip_pending) == 0) + return false; + + intel_prepare_page_flip(dev, plane); + + /* We detect FlipDone by looking for the change in PendingFlip from '1' + * to '0' on the following vblank, i.e. IIR has the Pendingflip + * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence + * the flip is completed (no longer pending). Since this doesn't raise + * an interrupt per se, we watch for the change at vblank. + */ + if (I915_READ(ISR) & flip_pending) + return false; + + intel_finish_page_flip(dev, pipe); + + return true; +} + +static irqreturn_t i915_irq_handler(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *) arg; + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; + unsigned long irqflags; + u32 flip_mask = + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + int pipe, ret = IRQ_NONE; + + atomic_inc(&dev_priv->irq_received); + + iir = I915_READ(IIR); + do { + bool irq_received = (iir & ~flip_mask) != 0; + bool blc_event = false; + + /* Can't rely on pipestat interrupt bit in iir as it might + * have been cleared after the pipestat interrupt was received. + * It doesn't set the bit in iir again, but it still produces + * interrupts (for non-MSI). + */ + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + i915_handle_error(dev, false); + + for_each_pipe(pipe) { + int reg = PIPESTAT(pipe); + pipe_stats[pipe] = I915_READ(reg); + + /* Clear the PIPE*STAT regs before the IIR */ + if (pipe_stats[pipe] & 0x8000ffff) { + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG_DRIVER("pipe %c underrun\n", + pipe_name(pipe)); + I915_WRITE(reg, pipe_stats[pipe]); + irq_received = true; + } + } + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + if (!irq_received) + break; + + /* Consume port. Then clear IIR or we'll miss events */ + if ((I915_HAS_HOTPLUG(dev)) && + (iir & I915_DISPLAY_PORT_INTERRUPT)) { + u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); + u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; + + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", + hotplug_status); + if (hotplug_trigger) { + if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915)) + i915_hpd_irq_setup(dev); + queue_work(dev_priv->wq, + &dev_priv->hotplug_work); + } + I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + POSTING_READ(PORT_HOTPLUG_STAT); + } + + I915_WRITE(IIR, iir & ~flip_mask); + new_iir = I915_READ(IIR); /* Flush posted writes */ + + if (iir & I915_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[RCS]); + + for_each_pipe(pipe) { + int plane = pipe; + if (IS_MOBILE(dev)) + plane = !plane; + + if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && + i915_handle_vblank(dev, plane, pipe, iir)) + flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); + + if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) + blc_event = true; + } + + if (blc_event || (iir & I915_ASLE_INTERRUPT)) + intel_opregion_asle_intr(dev); + + /* With MSI, interrupts are only generated when iir + * transitions from zero to nonzero. If another bit got + * set while we were handling the existing iir bits, then + * we would never get another interrupt. + * + * This is fine on non-MSI as well, as if we hit this path + * we avoid exiting the interrupt handler only to generate + * another one. + * + * Note that for MSI this could cause a stray interrupt report + * if an interrupt landed in the time between writing IIR and + * the posting read. This should be rare enough to never + * trigger the 99% of 100,000 interrupts test for disabling + * stray interrupts. + */ + ret = IRQ_HANDLED; + iir = new_iir; + } while (iir & ~flip_mask); + + i915_update_dri1_breadcrumb(dev); + + return ret; +} + +static void i915_irq_uninstall(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + del_timer_sync(&dev_priv->hotplug_reenable_timer); + + if (I915_HAS_HOTPLUG(dev)) { + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + } + + I915_WRITE16(HWSTAM, 0xffff); + for_each_pipe(pipe) { + /* Clear enable bits; then clear status bits */ + I915_WRITE(PIPESTAT(pipe), 0); + I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); + } + I915_WRITE(IMR, 0xffffffff); + I915_WRITE(IER, 0x0); + + I915_WRITE(IIR, I915_READ(IIR)); +} + +static void i965_irq_preinstall(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + atomic_set(&dev_priv->irq_received, 0); + + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + + I915_WRITE(HWSTAM, 0xeffe); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0); + I915_WRITE(IMR, 0xffffffff); + I915_WRITE(IER, 0x0); + POSTING_READ(IER); +} + +static int i965_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 enable_mask; + u32 error_mask; + + /* Unmask the interrupts that we always want on. */ + dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | + I915_DISPLAY_PORT_INTERRUPT | + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + + enable_mask = ~dev_priv->irq_mask; + enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); + enable_mask |= I915_USER_INTERRUPT; + + if (IS_G4X(dev)) + enable_mask |= I915_BSD_USER_INTERRUPT; + + i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); + + /* + * Enable some error detection, note the instruction error mask + * bit is reserved, so we leave it masked. + */ + if (IS_G4X(dev)) { + error_mask = ~(GM45_ERROR_PAGE_TABLE | + GM45_ERROR_MEM_PRIV | + GM45_ERROR_CP_PRIV | + I915_ERROR_MEMORY_REFRESH); + } else { + error_mask = ~(I915_ERROR_PAGE_TABLE | + I915_ERROR_MEMORY_REFRESH); + } + I915_WRITE(EMR, error_mask); + + I915_WRITE(IMR, dev_priv->irq_mask); + I915_WRITE(IER, enable_mask); + POSTING_READ(IER); + + I915_WRITE(PORT_HOTPLUG_EN, 0); + POSTING_READ(PORT_HOTPLUG_EN); + + intel_opregion_enable_asle(dev); + + return 0; +} + +static void i915_hpd_irq_setup(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_mode_config *mode_config = &dev->mode_config; + struct intel_encoder *intel_encoder; + u32 hotplug_en; + + if (I915_HAS_HOTPLUG(dev)) { + hotplug_en = I915_READ(PORT_HOTPLUG_EN); + hotplug_en &= ~HOTPLUG_INT_EN_MASK; + /* Note HDMI and DP share hotplug bits */ + /* enable bits are the same for all generations */ + list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) + if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) + hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; + /* Programming the CRT detection parameters tends + to generate a spurious hotplug event about three + seconds later. So just do it once. + */ + if (IS_G4X(dev)) + hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; + hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; + hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; + + /* Ignore TV since it's buggy */ + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); + } +} + +static irqreturn_t i965_irq_handler(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *) arg; + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 iir, new_iir; + u32 pipe_stats[I915_MAX_PIPES]; + unsigned long irqflags; + int irq_received; + int ret = IRQ_NONE, pipe; + u32 flip_mask = + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + + atomic_inc(&dev_priv->irq_received); + + iir = I915_READ(IIR); + + for (;;) { + bool blc_event = false; + + irq_received = (iir & ~flip_mask) != 0; + + /* Can't rely on pipestat interrupt bit in iir as it might + * have been cleared after the pipestat interrupt was received. + * It doesn't set the bit in iir again, but it still produces + * interrupts (for non-MSI). + */ + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + i915_handle_error(dev, false); + + for_each_pipe(pipe) { + int reg = PIPESTAT(pipe); + pipe_stats[pipe] = I915_READ(reg); + + /* + * Clear the PIPE*STAT regs before the IIR + */ + if (pipe_stats[pipe] & 0x8000ffff) { + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG_DRIVER("pipe %c underrun\n", + pipe_name(pipe)); + I915_WRITE(reg, pipe_stats[pipe]); + irq_received = 1; + } + } + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + if (!irq_received) + break; + + ret = IRQ_HANDLED; + + /* Consume port. Then clear IIR or we'll miss events */ + if (iir & I915_DISPLAY_PORT_INTERRUPT) { + u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); + u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? + HOTPLUG_INT_STATUS_G4X : + HOTPLUG_INT_STATUS_I915); + + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", + hotplug_status); + if (hotplug_trigger) { + if (hotplug_irq_storm_detect(dev, hotplug_trigger, + IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915)) + i915_hpd_irq_setup(dev); + queue_work(dev_priv->wq, + &dev_priv->hotplug_work); + } + I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + I915_READ(PORT_HOTPLUG_STAT); + } + + I915_WRITE(IIR, iir & ~flip_mask); + new_iir = I915_READ(IIR); /* Flush posted writes */ + + if (iir & I915_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[RCS]); + if (iir & I915_BSD_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[VCS]); + + for_each_pipe(pipe) { + if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && + i915_handle_vblank(dev, pipe, pipe, iir)) + flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); + + if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) + blc_event = true; + } + + + if (blc_event || (iir & I915_ASLE_INTERRUPT)) + intel_opregion_asle_intr(dev); + + if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) + gmbus_irq_handler(dev); + + /* With MSI, interrupts are only generated when iir + * transitions from zero to nonzero. If another bit got + * set while we were handling the existing iir bits, then + * we would never get another interrupt. + * + * This is fine on non-MSI as well, as if we hit this path + * we avoid exiting the interrupt handler only to generate + * another one. + * + * Note that for MSI this could cause a stray interrupt report + * if an interrupt landed in the time between writing IIR and + * the posting read. This should be rare enough to never + * trigger the 99% of 100,000 interrupts test for disabling + * stray interrupts. + */ + iir = new_iir; + } + + i915_update_dri1_breadcrumb(dev); + + return ret; +} + +static void i965_irq_uninstall(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + if (!dev_priv) + return; + + del_timer_sync(&dev_priv->hotplug_reenable_timer); + + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + + I915_WRITE(HWSTAM, 0xffffffff); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0); + I915_WRITE(IMR, 0xffffffff); + I915_WRITE(IER, 0x0); + + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), + I915_READ(PIPESTAT(pipe)) & 0x8000ffff); + I915_WRITE(IIR, I915_READ(IIR)); +} + +static void i915_reenable_hotplug_timer_func(unsigned long data) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; + struct drm_device *dev = dev_priv->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + unsigned long irqflags; + int i; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { + struct drm_connector *connector; + + if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) + continue; + + dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; + + list_for_each_entry(connector, &mode_config->connector_list, head) { + struct intel_connector *intel_connector = to_intel_connector(connector); + + if (intel_connector->encoder->hpd_pin == i) { + if (connector->polled != intel_connector->polled) + DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", + drm_get_connector_name(connector)); + connector->polled = intel_connector->polled; + if (!connector->polled) + connector->polled = DRM_CONNECTOR_POLL_HPD; + } + } + } + if (dev_priv->display.hpd_irq_setup) + dev_priv->display.hpd_irq_setup(dev); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + +void intel_irq_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); + INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); + INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); + INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); + + setup_timer(&dev_priv->gpu_error.hangcheck_timer, + i915_hangcheck_elapsed, + (unsigned long) dev); + setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, + (unsigned long) dev_priv); + + pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); + + dev->driver->get_vblank_counter = i915_get_vblank_counter; + dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ + if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { + dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ + dev->driver->get_vblank_counter = gm45_get_vblank_counter; + } + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; + else + dev->driver->get_vblank_timestamp = NULL; + dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; + + if (IS_VALLEYVIEW(dev)) { + dev->driver->irq_handler = valleyview_irq_handler; + dev->driver->irq_preinstall = valleyview_irq_preinstall; + dev->driver->irq_postinstall = valleyview_irq_postinstall; + dev->driver->irq_uninstall = valleyview_irq_uninstall; + dev->driver->enable_vblank = valleyview_enable_vblank; + dev->driver->disable_vblank = valleyview_disable_vblank; + dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; + } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { + /* Share pre & uninstall handlers with ILK/SNB */ + dev->driver->irq_handler = ivybridge_irq_handler; + dev->driver->irq_preinstall = ironlake_irq_preinstall; + dev->driver->irq_postinstall = ivybridge_irq_postinstall; + dev->driver->irq_uninstall = ironlake_irq_uninstall; + dev->driver->enable_vblank = ivybridge_enable_vblank; + dev->driver->disable_vblank = ivybridge_disable_vblank; + dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; + } else if (HAS_PCH_SPLIT(dev)) { + dev->driver->irq_handler = ironlake_irq_handler; + dev->driver->irq_preinstall = ironlake_irq_preinstall; + dev->driver->irq_postinstall = ironlake_irq_postinstall; + dev->driver->irq_uninstall = ironlake_irq_uninstall; + dev->driver->enable_vblank = ironlake_enable_vblank; + dev->driver->disable_vblank = ironlake_disable_vblank; + dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; + } else { + if (INTEL_INFO(dev)->gen == 2) { + dev->driver->irq_preinstall = i8xx_irq_preinstall; + dev->driver->irq_postinstall = i8xx_irq_postinstall; + dev->driver->irq_handler = i8xx_irq_handler; + dev->driver->irq_uninstall = i8xx_irq_uninstall; + } else if (INTEL_INFO(dev)->gen == 3) { + dev->driver->irq_preinstall = i915_irq_preinstall; + dev->driver->irq_postinstall = i915_irq_postinstall; + dev->driver->irq_uninstall = i915_irq_uninstall; + dev->driver->irq_handler = i915_irq_handler; + dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; + } else { + dev->driver->irq_preinstall = i965_irq_preinstall; + dev->driver->irq_postinstall = i965_irq_postinstall; + dev->driver->irq_uninstall = i965_irq_uninstall; + dev->driver->irq_handler = i965_irq_handler; + dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; + } + dev->driver->enable_vblank = i915_enable_vblank; + dev->driver->disable_vblank = i915_disable_vblank; + } +} + +void intel_hpd_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *connector; + int i; + + for (i = 1; i < HPD_NUM_PINS; i++) { + dev_priv->hpd_stats[i].hpd_cnt = 0; + dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; + } + list_for_each_entry(connector, &mode_config->connector_list, head) { + struct intel_connector *intel_connector = to_intel_connector(connector); + connector->polled = intel_connector->polled; + if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) + connector->polled = DRM_CONNECTOR_POLL_HPD; + } + if (dev_priv->display.hpd_irq_setup) + dev_priv->display.hpd_irq_setup(dev); +} diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h new file mode 100644 index 0000000..b1a0cdb --- /dev/null +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -0,0 +1,4835 @@ +/* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _I915_REG_H_ +#define _I915_REG_H_ + +#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) +#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) + +#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) + +#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) +#define _MASKED_BIT_DISABLE(a) ((a) << 16) + +/* + * The Bridge device's PCI config space has information about the + * fb aperture size and the amount of pre-reserved memory. + * This is all handled in the intel-gtt.ko module. i915.ko only + * cares about the vga bit for the vga rbiter. + */ +#define INTEL_GMCH_CTRL 0x52 +#define INTEL_GMCH_VGA_DISABLE (1 << 1) +#define SNB_GMCH_CTRL 0x50 +#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */ +#define SNB_GMCH_GGMS_MASK 0x3 +#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ +#define SNB_GMCH_GMS_MASK 0x1f + + +/* PCI config space */ + +#define HPLLCC 0xc0 /* 855 only */ +#define GC_CLOCK_CONTROL_MASK (0xf << 0) +#define GC_CLOCK_133_200 (0 << 0) +#define GC_CLOCK_100_200 (1 << 0) +#define GC_CLOCK_100_133 (2 << 0) +#define GC_CLOCK_166_250 (3 << 0) +#define GCFGC2 0xda +#define GCFGC 0xf0 /* 915+ only */ +#define GC_LOW_FREQUENCY_ENABLE (1 << 7) +#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) +#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4) +#define GC_DISPLAY_CLOCK_MASK (7 << 4) +#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0) +#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0) +#define GM45_GC_RENDER_CLOCK_320_MHZ (9 << 0) +#define GM45_GC_RENDER_CLOCK_400_MHZ (0xb << 0) +#define GM45_GC_RENDER_CLOCK_533_MHZ (0xc << 0) +#define I965_GC_RENDER_CLOCK_MASK (0xf << 0) +#define I965_GC_RENDER_CLOCK_267_MHZ (2 << 0) +#define I965_GC_RENDER_CLOCK_333_MHZ (3 << 0) +#define I965_GC_RENDER_CLOCK_444_MHZ (4 << 0) +#define I965_GC_RENDER_CLOCK_533_MHZ (5 << 0) +#define I945_GC_RENDER_CLOCK_MASK (7 << 0) +#define I945_GC_RENDER_CLOCK_166_MHZ (0 << 0) +#define I945_GC_RENDER_CLOCK_200_MHZ (1 << 0) +#define I945_GC_RENDER_CLOCK_250_MHZ (3 << 0) +#define I945_GC_RENDER_CLOCK_400_MHZ (5 << 0) +#define I915_GC_RENDER_CLOCK_MASK (7 << 0) +#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) +#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) +#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) +#define LBB 0xf4 + +/* Graphics reset regs */ +#define I965_GDRST 0xc0 /* PCI config register */ +#define ILK_GDSR 0x2ca4 /* MCHBAR offset */ +#define GRDOM_FULL (0<<2) +#define GRDOM_RENDER (1<<2) +#define GRDOM_MEDIA (3<<2) +#define GRDOM_MASK (3<<2) +#define GRDOM_RESET_ENABLE (1<<0) + +#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */ +#define GEN6_MBC_SNPCR_SHIFT 21 +#define GEN6_MBC_SNPCR_MASK (3<<21) +#define GEN6_MBC_SNPCR_MAX (0<<21) +#define GEN6_MBC_SNPCR_MED (1<<21) +#define GEN6_MBC_SNPCR_LOW (2<<21) +#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */ + +#define GEN6_MBCTL 0x0907c +#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4) +#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3) +#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2) +#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1) +#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0) + +#define GEN6_GDRST 0x941c +#define GEN6_GRDOM_FULL (1 << 0) +#define GEN6_GRDOM_RENDER (1 << 1) +#define GEN6_GRDOM_MEDIA (1 << 2) +#define GEN6_GRDOM_BLT (1 << 3) + +#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) +#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) +#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) +#define PP_DIR_DCLV_2G 0xffffffff + +#define GAM_ECOCHK 0x4090 +#define ECOCHK_SNB_BIT (1<<10) +#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6) +#define ECOCHK_PPGTT_CACHE64B (0x3<<3) +#define ECOCHK_PPGTT_CACHE4B (0x0<<3) +#define ECOCHK_PPGTT_GFDT_IVB (0x1<<4) +#define ECOCHK_PPGTT_LLC_IVB (0x1<<3) +#define ECOCHK_PPGTT_UC_HSW (0x1<<3) +#define ECOCHK_PPGTT_WT_HSW (0x2<<3) +#define ECOCHK_PPGTT_WB_HSW (0x3<<3) + +#define GAC_ECO_BITS 0x14090 +#define ECOBITS_SNB_BIT (1<<13) +#define ECOBITS_PPGTT_CACHE64B (3<<8) +#define ECOBITS_PPGTT_CACHE4B (0<<8) + +#define GAB_CTL 0x24000 +#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8) + +/* VGA stuff */ + +#define VGA_ST01_MDA 0x3ba +#define VGA_ST01_CGA 0x3da + +#define VGA_MSR_WRITE 0x3c2 +#define VGA_MSR_READ 0x3cc +#define VGA_MSR_MEM_EN (1<<1) +#define VGA_MSR_CGA_MODE (1<<0) + +/* + * SR01 is the only VGA register touched on non-UMS setups. + * VLV doesn't do UMS, so the sequencer index/data registers + * are the only VGA registers which need to include + * display_mmio_offset. + */ +#define VGA_SR_INDEX (dev_priv->info->display_mmio_offset + 0x3c4) +#define SR01 1 +#define VGA_SR_DATA (dev_priv->info->display_mmio_offset + 0x3c5) + +#define VGA_AR_INDEX 0x3c0 +#define VGA_AR_VID_EN (1<<5) +#define VGA_AR_DATA_WRITE 0x3c0 +#define VGA_AR_DATA_READ 0x3c1 + +#define VGA_GR_INDEX 0x3ce +#define VGA_GR_DATA 0x3cf +/* GR05 */ +#define VGA_GR_MEM_READ_MODE_SHIFT 3 +#define VGA_GR_MEM_READ_MODE_PLANE 1 +/* GR06 */ +#define VGA_GR_MEM_MODE_MASK 0xc +#define VGA_GR_MEM_MODE_SHIFT 2 +#define VGA_GR_MEM_A0000_AFFFF 0 +#define VGA_GR_MEM_A0000_BFFFF 1 +#define VGA_GR_MEM_B0000_B7FFF 2 +#define VGA_GR_MEM_B0000_BFFFF 3 + +#define VGA_DACMASK 0x3c6 +#define VGA_DACRX 0x3c7 +#define VGA_DACWX 0x3c8 +#define VGA_DACDATA 0x3c9 + +#define VGA_CR_INDEX_MDA 0x3b4 +#define VGA_CR_DATA_MDA 0x3b5 +#define VGA_CR_INDEX_CGA 0x3d4 +#define VGA_CR_DATA_CGA 0x3d5 + +/* + * Memory interface instructions used by the kernel + */ +#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) + +#define MI_NOOP MI_INSTR(0, 0) +#define MI_USER_INTERRUPT MI_INSTR(0x02, 0) +#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) +#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16) +#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) +#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) +#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) +#define MI_FLUSH MI_INSTR(0x04, 0) +#define MI_READ_FLUSH (1 << 0) +#define MI_EXE_FLUSH (1 << 1) +#define MI_NO_WRITE_FLUSH (1 << 2) +#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ +#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ +#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ +#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) +#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) +#define MI_SUSPEND_FLUSH_EN (1<<0) +#define MI_REPORT_HEAD MI_INSTR(0x07, 0) +#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) +#define MI_OVERLAY_CONTINUE (0x0<<21) +#define MI_OVERLAY_ON (0x1<<21) +#define MI_OVERLAY_OFF (0x2<<21) +#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) +#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) +#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) +#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) +/* IVB has funny definitions for which plane to flip. */ +#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19) +#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19) +#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19) +#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) +#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) +#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) +#define MI_ARB_ON_OFF MI_INSTR(0x08, 0) +#define MI_ARB_ENABLE (1<<0) +#define MI_ARB_DISABLE (0<<0) + +#define MI_SET_CONTEXT MI_INSTR(0x18, 0) +#define MI_MM_SPACE_GTT (1<<8) +#define MI_MM_SPACE_PHYSICAL (0<<8) +#define MI_SAVE_EXT_STATE_EN (1<<3) +#define MI_RESTORE_EXT_STATE_EN (1<<2) +#define MI_FORCE_RESTORE (1<<1) +#define MI_RESTORE_INHIBIT (1<<0) +#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) +#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ +#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) +#define MI_STORE_DWORD_INDEX_SHIFT 2 +/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM: + * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw + * simply ignores the register load under certain conditions. + * - One can actually load arbitrary many arbitrary registers: Simply issue x + * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! + */ +#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) +#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ +#define MI_FLUSH_DW_STORE_INDEX (1<<21) +#define MI_INVALIDATE_TLB (1<<18) +#define MI_FLUSH_DW_OP_STOREDW (1<<14) +#define MI_INVALIDATE_BSD (1<<7) +#define MI_FLUSH_DW_USE_GTT (1<<2) +#define MI_FLUSH_DW_USE_PPGTT (0<<2) +#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) +#define MI_BATCH_NON_SECURE (1) +/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ +#define MI_BATCH_NON_SECURE_I965 (1<<8) +#define MI_BATCH_PPGTT_HSW (1<<8) +#define MI_BATCH_NON_SECURE_HSW (1<<13) +#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) +#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ +#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ +#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) +#define MI_SEMAPHORE_UPDATE (1<<21) +#define MI_SEMAPHORE_COMPARE (1<<20) +#define MI_SEMAPHORE_REGISTER (1<<18) +#define MI_SEMAPHORE_SYNC_RV (2<<16) +#define MI_SEMAPHORE_SYNC_RB (0<<16) +#define MI_SEMAPHORE_SYNC_VR (0<<16) +#define MI_SEMAPHORE_SYNC_VB (2<<16) +#define MI_SEMAPHORE_SYNC_BR (2<<16) +#define MI_SEMAPHORE_SYNC_BV (0<<16) +#define MI_SEMAPHORE_SYNC_INVALID (1<<0) +/* + * 3D instructions used by the kernel + */ +#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) + +#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) +#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) +#define SC_UPDATE_SCISSOR (0x1<<1) +#define SC_ENABLE_MASK (0x1<<0) +#define SC_ENABLE (0x1<<0) +#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16)) +#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) +#define SCI_YMIN_MASK (0xffff<<16) +#define SCI_XMIN_MASK (0xffff<<0) +#define SCI_YMAX_MASK (0xffff<<16) +#define SCI_XMAX_MASK (0xffff<<0) +#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) +#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) +#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) +#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) +#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) +#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) +#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) +#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) +#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) +#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) +#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) +#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) +#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) +#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) +#define BLT_DEPTH_8 (0<<24) +#define BLT_DEPTH_16_565 (1<<24) +#define BLT_DEPTH_16_1555 (2<<24) +#define BLT_DEPTH_32 (3<<24) +#define BLT_ROP_GXCOPY (0xcc<<16) +#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ +#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ +#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) +#define ASYNC_FLIP (1<<22) +#define DISPLAY_PLANE_A (0<<20) +#define DISPLAY_PLANE_B (1<<20) +#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) +#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ +#define PIPE_CONTROL_CS_STALL (1<<20) +#define PIPE_CONTROL_TLB_INVALIDATE (1<<18) +#define PIPE_CONTROL_QW_WRITE (1<<14) +#define PIPE_CONTROL_DEPTH_STALL (1<<13) +#define PIPE_CONTROL_WRITE_FLUSH (1<<12) +#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ +#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on Ironlake */ +#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ +#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) +#define PIPE_CONTROL_NOTIFY (1<<8) +#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) +#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) +#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) +#define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1) +#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0) +#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ + + +/* + * Reset registers + */ +#define DEBUG_RESET_I830 0x6070 +#define DEBUG_RESET_FULL (1<<7) +#define DEBUG_RESET_RENDER (1<<8) +#define DEBUG_RESET_DISPLAY (1<<9) + +/* + * DPIO - a special bus for various display related registers to hide behind: + * 0x800c: m1, m2, n, p1, p2, k dividers + * 0x8014: REF and SFR select + * 0x8014: N divider, VCO select + * 0x801c/3c: core clock bits + * 0x8048/68: low pass filter coefficients + * 0x8100: fast clock controls + * + * DPIO is VLV only. + */ +#define DPIO_PKT (VLV_DISPLAY_BASE + 0x2100) +#define DPIO_RID (0<<24) +#define DPIO_OP_WRITE (1<<16) +#define DPIO_OP_READ (0<<16) +#define DPIO_PORTID (0x12<<8) +#define DPIO_BYTE (0xf<<4) +#define DPIO_BUSY (1<<0) /* status only */ +#define DPIO_DATA (VLV_DISPLAY_BASE + 0x2104) +#define DPIO_REG (VLV_DISPLAY_BASE + 0x2108) +#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110) +#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ +#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ +#define DPIO_SFR_BYPASS (1<<1) +#define DPIO_RESET (1<<0) + +#define _DPIO_DIV_A 0x800c +#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */ +#define DPIO_K_SHIFT (24) /* 4 bits */ +#define DPIO_P1_SHIFT (21) /* 3 bits */ +#define DPIO_P2_SHIFT (16) /* 5 bits */ +#define DPIO_N_SHIFT (12) /* 4 bits */ +#define DPIO_ENABLE_CALIBRATION (1<<11) +#define DPIO_M1DIV_SHIFT (8) /* 3 bits */ +#define DPIO_M2DIV_MASK 0xff +#define _DPIO_DIV_B 0x802c +#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B) + +#define _DPIO_REFSFR_A 0x8014 +#define DPIO_REFSEL_OVERRIDE 27 +#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */ +#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */ +#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */ +#define DPIO_PLL_REFCLK_SEL_MASK 3 +#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */ +#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */ +#define _DPIO_REFSFR_B 0x8034 +#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B) + +#define _DPIO_CORE_CLK_A 0x801c +#define _DPIO_CORE_CLK_B 0x803c +#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B) + +#define _DPIO_LFP_COEFF_A 0x8048 +#define _DPIO_LFP_COEFF_B 0x8068 +#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B) + +#define DPIO_FASTCLK_DISABLE 0x8100 + +#define DPIO_DATA_CHANNEL1 0x8220 +#define DPIO_DATA_CHANNEL2 0x8420 + +/* + * Fence registers + */ +#define FENCE_REG_830_0 0x2000 +#define FENCE_REG_945_8 0x3000 +#define I830_FENCE_START_MASK 0x07f80000 +#define I830_FENCE_TILING_Y_SHIFT 12 +#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) +#define I830_FENCE_PITCH_SHIFT 4 +#define I830_FENCE_REG_VALID (1<<0) +#define I915_FENCE_MAX_PITCH_VAL 4 +#define I830_FENCE_MAX_PITCH_VAL 6 +#define I830_FENCE_MAX_SIZE_VAL (1<<8) + +#define I915_FENCE_START_MASK 0x0ff00000 +#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8) + +#define FENCE_REG_965_0 0x03000 +#define I965_FENCE_PITCH_SHIFT 2 +#define I965_FENCE_TILING_Y_SHIFT 1 +#define I965_FENCE_REG_VALID (1<<0) +#define I965_FENCE_MAX_PITCH_VAL 0x0400 + +#define FENCE_REG_SANDYBRIDGE_0 0x100000 +#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 +#define GEN7_FENCE_MAX_PITCH_VAL 0x0800 + +/* control register for cpu gtt access */ +#define TILECTL 0x101000 +#define TILECTL_SWZCTL (1 << 0) +#define TILECTL_TLB_PREFETCH_DIS (1 << 2) +#define TILECTL_BACKSNOOP_DIS (1 << 3) + +/* + * Instruction and interrupt control regs + */ +#define PGTBL_ER 0x02024 +#define RENDER_RING_BASE 0x02000 +#define BSD_RING_BASE 0x04000 +#define GEN6_BSD_RING_BASE 0x12000 +#define BLT_RING_BASE 0x22000 +#define RING_TAIL(base) ((base)+0x30) +#define RING_HEAD(base) ((base)+0x34) +#define RING_START(base) ((base)+0x38) +#define RING_CTL(base) ((base)+0x3c) +#define RING_SYNC_0(base) ((base)+0x40) +#define RING_SYNC_1(base) ((base)+0x44) +#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE)) +#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE)) +#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE)) +#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE)) +#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE)) +#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE)) +#define RING_MAX_IDLE(base) ((base)+0x54) +#define RING_HWS_PGA(base) ((base)+0x80) +#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) +#define ARB_MODE 0x04030 +#define ARB_MODE_SWIZZLE_SNB (1<<4) +#define ARB_MODE_SWIZZLE_IVB (1<<5) +#define RENDER_HWS_PGA_GEN7 (0x04080) +#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) +#define DONE_REG 0x40b0 +#define BSD_HWS_PGA_GEN7 (0x04180) +#define BLT_HWS_PGA_GEN7 (0x04280) +#define RING_ACTHD(base) ((base)+0x74) +#define RING_NOPID(base) ((base)+0x94) +#define RING_IMR(base) ((base)+0xa8) +#define RING_TIMESTAMP(base) ((base)+0x358) +#define TAIL_ADDR 0x001FFFF8 +#define HEAD_WRAP_COUNT 0xFFE00000 +#define HEAD_WRAP_ONE 0x00200000 +#define HEAD_ADDR 0x001FFFFC +#define RING_NR_PAGES 0x001FF000 +#define RING_REPORT_MASK 0x00000006 +#define RING_REPORT_64K 0x00000002 +#define RING_REPORT_128K 0x00000004 +#define RING_NO_REPORT 0x00000000 +#define RING_VALID_MASK 0x00000001 +#define RING_VALID 0x00000001 +#define RING_INVALID 0x00000000 +#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ +#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ +#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */ +#if 0 +#define PRB0_TAIL 0x02030 +#define PRB0_HEAD 0x02034 +#define PRB0_START 0x02038 +#define PRB0_CTL 0x0203c +#define PRB1_TAIL 0x02040 /* 915+ only */ +#define PRB1_HEAD 0x02044 /* 915+ only */ +#define PRB1_START 0x02048 /* 915+ only */ +#define PRB1_CTL 0x0204c /* 915+ only */ +#endif +#define IPEIR_I965 0x02064 +#define IPEHR_I965 0x02068 +#define INSTDONE_I965 0x0206c +#define GEN7_INSTDONE_1 0x0206c +#define GEN7_SC_INSTDONE 0x07100 +#define GEN7_SAMPLER_INSTDONE 0x0e160 +#define GEN7_ROW_INSTDONE 0x0e164 +#define I915_NUM_INSTDONE_REG 4 +#define RING_IPEIR(base) ((base)+0x64) +#define RING_IPEHR(base) ((base)+0x68) +#define RING_INSTDONE(base) ((base)+0x6c) +#define RING_INSTPS(base) ((base)+0x70) +#define RING_DMA_FADD(base) ((base)+0x78) +#define RING_INSTPM(base) ((base)+0xc0) +#define INSTPS 0x02070 /* 965+ only */ +#define INSTDONE1 0x0207c /* 965+ only */ +#define ACTHD_I965 0x02074 +#define HWS_PGA 0x02080 +#define HWS_ADDRESS_MASK 0xfffff000 +#define HWS_START_ADDRESS_SHIFT 4 +#define PWRCTXA 0x2088 /* 965GM+ only */ +#define PWRCTX_EN (1<<0) +#define IPEIR 0x02088 +#define IPEHR 0x0208c +#define INSTDONE 0x02090 +#define NOPID 0x02094 +#define HWSTAM 0x02098 +#define DMA_FADD_I8XX 0x020d0 + +#define ERROR_GEN6 0x040a0 +#define GEN7_ERR_INT 0x44040 +#define ERR_INT_MMIO_UNCLAIMED (1<<13) + +#define FPGA_DBG 0x42300 +#define FPGA_DBG_RM_NOCLAIM (1<<31) + +#define DERRMR 0x44050 + +/* GM45+ chicken bits -- debug workaround bits that may be required + * for various sorts of correct behavior. The top 16 bits of each are + * the enables for writing to the corresponding low bit. + */ +#define _3D_CHICKEN 0x02084 +#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) +#define _3D_CHICKEN2 0x0208c +/* Disables pipelining of read flushes past the SF-WIZ interface. + * Required on all Ironlake steppings according to the B-Spec, but the + * particular danger of not doing so is not specified. + */ +# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) +#define _3D_CHICKEN3 0x02090 +#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) +#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) + +#define MI_MODE 0x0209c +# define VS_TIMER_DISPATCH (1 << 6) +# define MI_FLUSH_ENABLE (1 << 12) +# define ASYNC_FLIP_PERF_DISABLE (1 << 14) + +#define GEN6_GT_MODE 0x20d0 +#define GEN6_GT_MODE_HI (1 << 9) +#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) + +#define GFX_MODE 0x02520 +#define GFX_MODE_GEN7 0x0229c +#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c) +#define GFX_RUN_LIST_ENABLE (1<<15) +#define GFX_TLB_INVALIDATE_ALWAYS (1<<13) +#define GFX_SURFACE_FAULT_ENABLE (1<<12) +#define GFX_REPLAY_MODE (1<<11) +#define GFX_PSMI_GRANULARITY (1<<10) +#define GFX_PPGTT_ENABLE (1<<9) + +#define VLV_DISPLAY_BASE 0x180000 + +#define SCPD0 0x0209c /* 915+ only */ +#define IER 0x020a0 +#define IIR 0x020a4 +#define IMR 0x020a8 +#define ISR 0x020ac +#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060) +#define GCFG_DIS (1<<8) +#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084) +#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0) +#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4) +#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8) +#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac) +#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) +#define I915_DISPLAY_PORT_INTERRUPT (1<<17) +#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) +#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */ +#define I915_HWB_OOM_INTERRUPT (1<<13) +#define I915_SYNC_STATUS_INTERRUPT (1<<12) +#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) +#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10) +#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9) +#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8) +#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7) +#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6) +#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5) +#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4) +#define I915_DEBUG_INTERRUPT (1<<2) +#define I915_USER_INTERRUPT (1<<1) +#define I915_ASLE_INTERRUPT (1<<0) +#define I915_BSD_USER_INTERRUPT (1<<25) +#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */ +#define EIR 0x020b0 +#define EMR 0x020b4 +#define ESR 0x020b8 +#define GM45_ERROR_PAGE_TABLE (1<<5) +#define GM45_ERROR_MEM_PRIV (1<<4) +#define I915_ERROR_PAGE_TABLE (1<<4) +#define GM45_ERROR_CP_PRIV (1<<3) +#define I915_ERROR_MEMORY_REFRESH (1<<1) +#define I915_ERROR_INSTRUCTION (1<<0) +#define INSTPM 0x020c0 +#define INSTPM_SELF_EN (1<<12) /* 915GM only */ +#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts + will not assert AGPBUSY# and will only + be delivered when out of C3. */ +#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ +#define INSTPM_TLB_INVALIDATE (1<<9) +#define INSTPM_SYNC_FLUSH (1<<5) +#define ACTHD 0x020c8 +#define FW_BLC 0x020d8 +#define FW_BLC2 0x020dc +#define FW_BLC_SELF 0x020e0 /* 915+ only */ +#define FW_BLC_SELF_EN_MASK (1<<31) +#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ +#define FW_BLC_SELF_EN (1<<15) /* 945 only */ +#define MM_BURST_LENGTH 0x00700000 +#define MM_FIFO_WATERMARK 0x0001F000 +#define LM_BURST_LENGTH 0x00000700 +#define LM_FIFO_WATERMARK 0x0000001F +#define MI_ARB_STATE 0x020e4 /* 915+ only */ + +/* Make render/texture TLB fetches lower priorty than associated data + * fetches. This is not turned on by default + */ +#define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15) + +/* Isoch request wait on GTT enable (Display A/B/C streams). + * Make isoch requests stall on the TLB update. May cause + * display underruns (test mode only) + */ +#define MI_ARB_ISOCH_WAIT_GTT (1 << 14) + +/* Block grant count for isoch requests when block count is + * set to a finite value. + */ +#define MI_ARB_BLOCK_GRANT_MASK (3 << 12) +#define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */ +#define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */ +#define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */ +#define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */ + +/* Enable render writes to complete in C2/C3/C4 power states. + * If this isn't enabled, render writes are prevented in low + * power states. That seems bad to me. + */ +#define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11) + +/* This acknowledges an async flip immediately instead + * of waiting for 2TLB fetches. + */ +#define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10) + +/* Enables non-sequential data reads through arbiter + */ +#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9) + +/* Disable FSB snooping of cacheable write cycles from binner/render + * command stream + */ +#define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8) + +/* Arbiter time slice for non-isoch streams */ +#define MI_ARB_TIME_SLICE_MASK (7 << 5) +#define MI_ARB_TIME_SLICE_1 (0 << 5) +#define MI_ARB_TIME_SLICE_2 (1 << 5) +#define MI_ARB_TIME_SLICE_4 (2 << 5) +#define MI_ARB_TIME_SLICE_6 (3 << 5) +#define MI_ARB_TIME_SLICE_8 (4 << 5) +#define MI_ARB_TIME_SLICE_10 (5 << 5) +#define MI_ARB_TIME_SLICE_14 (6 << 5) +#define MI_ARB_TIME_SLICE_16 (7 << 5) + +/* Low priority grace period page size */ +#define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */ +#define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4) + +/* Disable display A/B trickle feed */ +#define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) + +/* Set display plane priority */ +#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */ +#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ + +#define CACHE_MODE_0 0x02120 /* 915+ only */ +#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8) +#define CM0_IZ_OPT_DISABLE (1<<6) +#define CM0_ZR_OPT_DISABLE (1<<5) +#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) +#define CM0_DEPTH_EVICT_DISABLE (1<<4) +#define CM0_COLOR_EVICT_DISABLE (1<<3) +#define CM0_DEPTH_WRITE_DISABLE (1<<1) +#define CM0_RC_OP_FLUSH_DISABLE (1<<0) +#define BB_ADDR 0x02140 /* 8 bytes */ +#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ +#define GFX_FLSH_CNTL_GEN6 0x101008 +#define GFX_FLSH_CNTL_EN (1<<0) +#define ECOSKPD 0x021d0 +#define ECO_GATING_CX_ONLY (1<<3) +#define ECO_FLIP_DONE (1<<0) + +#define CACHE_MODE_1 0x7004 /* IVB+ */ +#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) + +/* GEN6 interrupt control + * Note that the per-ring interrupt bits do alias with the global interrupt bits + * in GTIMR. */ +#define GEN6_RENDER_HWSTAM 0x2098 +#define GEN6_RENDER_IMR 0x20a8 +#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) +#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7) +#define GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED (1 << 6) +#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5) +#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4) +#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3) +#define GEN6_RENDER_SYNC_STATUS (1 << 2) +#define GEN6_RENDER_DEBUG_INTERRUPT (1 << 1) +#define GEN6_RENDER_USER_INTERRUPT (1 << 0) + +#define GEN6_BLITTER_HWSTAM 0x22098 +#define GEN6_BLITTER_IMR 0x220a8 +#define GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT (1 << 26) +#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25) +#define GEN6_BLITTER_SYNC_STATUS (1 << 24) +#define GEN6_BLITTER_USER_INTERRUPT (1 << 22) + +#define GEN6_BLITTER_ECOSKPD 0x221d0 +#define GEN6_BLITTER_LOCK_SHIFT 16 +#define GEN6_BLITTER_FBC_NOTIFY (1<<3) + +#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 +#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) +#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) +#define GEN6_BSD_SLEEP_INDICATOR (1 << 3) +#define GEN6_BSD_GO_INDICATOR (1 << 4) + +#define GEN6_BSD_HWSTAM 0x12098 +#define GEN6_BSD_IMR 0x120a8 +#define GEN6_BSD_USER_INTERRUPT (1 << 12) + +#define GEN6_BSD_RNCID 0x12198 + +#define GEN7_FF_THREAD_MODE 0x20a0 +#define GEN7_FF_SCHED_MASK 0x0077070 +#define GEN7_FF_TS_SCHED_HS1 (0x5<<16) +#define GEN7_FF_TS_SCHED_HS0 (0x3<<16) +#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16) +#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */ +#define GEN7_FF_VS_REF_CNT_FFME (1 << 15) +#define GEN7_FF_VS_SCHED_HS1 (0x5<<12) +#define GEN7_FF_VS_SCHED_HS0 (0x3<<12) +#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */ +#define GEN7_FF_VS_SCHED_HW (0x0<<12) +#define GEN7_FF_DS_SCHED_HS1 (0x5<<4) +#define GEN7_FF_DS_SCHED_HS0 (0x3<<4) +#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */ +#define GEN7_FF_DS_SCHED_HW (0x0<<4) + +/* + * Framebuffer compression (915+ only) + */ + +#define FBC_CFB_BASE 0x03200 /* 4k page aligned */ +#define FBC_LL_BASE 0x03204 /* 4k page aligned */ +#define FBC_CONTROL 0x03208 +#define FBC_CTL_EN (1<<31) +#define FBC_CTL_PERIODIC (1<<30) +#define FBC_CTL_INTERVAL_SHIFT (16) +#define FBC_CTL_UNCOMPRESSIBLE (1<<14) +#define FBC_CTL_C3_IDLE (1<<13) +#define FBC_CTL_STRIDE_SHIFT (5) +#define FBC_CTL_FENCENO (1<<0) +#define FBC_COMMAND 0x0320c +#define FBC_CMD_COMPRESS (1<<0) +#define FBC_STATUS 0x03210 +#define FBC_STAT_COMPRESSING (1<<31) +#define FBC_STAT_COMPRESSED (1<<30) +#define FBC_STAT_MODIFIED (1<<29) +#define FBC_STAT_CURRENT_LINE (1<<0) +#define FBC_CONTROL2 0x03214 +#define FBC_CTL_FENCE_DBL (0<<4) +#define FBC_CTL_IDLE_IMM (0<<2) +#define FBC_CTL_IDLE_FULL (1<<2) +#define FBC_CTL_IDLE_LINE (2<<2) +#define FBC_CTL_IDLE_DEBUG (3<<2) +#define FBC_CTL_CPU_FENCE (1<<1) +#define FBC_CTL_PLANEA (0<<0) +#define FBC_CTL_PLANEB (1<<0) +#define FBC_FENCE_OFF 0x0321b +#define FBC_TAG 0x03300 + +#define FBC_LL_SIZE (1536) + +/* Framebuffer compression for GM45+ */ +#define DPFC_CB_BASE 0x3200 +#define DPFC_CONTROL 0x3208 +#define DPFC_CTL_EN (1<<31) +#define DPFC_CTL_PLANEA (0<<30) +#define DPFC_CTL_PLANEB (1<<30) +#define DPFC_CTL_FENCE_EN (1<<29) +#define DPFC_CTL_PERSISTENT_MODE (1<<25) +#define DPFC_SR_EN (1<<10) +#define DPFC_CTL_LIMIT_1X (0<<6) +#define DPFC_CTL_LIMIT_2X (1<<6) +#define DPFC_CTL_LIMIT_4X (2<<6) +#define DPFC_RECOMP_CTL 0x320c +#define DPFC_RECOMP_STALL_EN (1<<27) +#define DPFC_RECOMP_STALL_WM_SHIFT (16) +#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000) +#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0) +#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f) +#define DPFC_STATUS 0x3210 +#define DPFC_INVAL_SEG_SHIFT (16) +#define DPFC_INVAL_SEG_MASK (0x07ff0000) +#define DPFC_COMP_SEG_SHIFT (0) +#define DPFC_COMP_SEG_MASK (0x000003ff) +#define DPFC_STATUS2 0x3214 +#define DPFC_FENCE_YOFF 0x3218 +#define DPFC_CHICKEN 0x3224 +#define DPFC_HT_MODIFY (1<<31) + +/* Framebuffer compression for Ironlake */ +#define ILK_DPFC_CB_BASE 0x43200 +#define ILK_DPFC_CONTROL 0x43208 +/* The bit 28-8 is reserved */ +#define DPFC_RESERVED (0x1FFFFF00) +#define ILK_DPFC_RECOMP_CTL 0x4320c +#define ILK_DPFC_STATUS 0x43210 +#define ILK_DPFC_FENCE_YOFF 0x43218 +#define ILK_DPFC_CHICKEN 0x43224 +#define ILK_FBC_RT_BASE 0x2128 +#define ILK_FBC_RT_VALID (1<<0) + +#define ILK_DISPLAY_CHICKEN1 0x42000 +#define ILK_FBCQ_DIS (1<<22) +#define ILK_PABSTRETCH_DIS (1<<21) + + +/* + * Framebuffer compression for Sandybridge + * + * The following two registers are of type GTTMMADR + */ +#define SNB_DPFC_CTL_SA 0x100100 +#define SNB_CPU_FENCE_ENABLE (1<<29) +#define DPFC_CPU_FENCE_OFFSET 0x100104 + + +/* + * GPIO regs + */ +#define GPIOA 0x5010 +#define GPIOB 0x5014 +#define GPIOC 0x5018 +#define GPIOD 0x501c +#define GPIOE 0x5020 +#define GPIOF 0x5024 +#define GPIOG 0x5028 +#define GPIOH 0x502c +# define GPIO_CLOCK_DIR_MASK (1 << 0) +# define GPIO_CLOCK_DIR_IN (0 << 1) +# define GPIO_CLOCK_DIR_OUT (1 << 1) +# define GPIO_CLOCK_VAL_MASK (1 << 2) +# define GPIO_CLOCK_VAL_OUT (1 << 3) +# define GPIO_CLOCK_VAL_IN (1 << 4) +# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) +# define GPIO_DATA_DIR_MASK (1 << 8) +# define GPIO_DATA_DIR_IN (0 << 9) +# define GPIO_DATA_DIR_OUT (1 << 9) +# define GPIO_DATA_VAL_MASK (1 << 10) +# define GPIO_DATA_VAL_OUT (1 << 11) +# define GPIO_DATA_VAL_IN (1 << 12) +# define GPIO_DATA_PULLUP_DISABLE (1 << 13) + +#define GMBUS0 0x5100 /* clock/port select */ +#define GMBUS_RATE_100KHZ (0<<8) +#define GMBUS_RATE_50KHZ (1<<8) +#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */ +#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */ +#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */ +#define GMBUS_PORT_DISABLED 0 +#define GMBUS_PORT_SSC 1 +#define GMBUS_PORT_VGADDC 2 +#define GMBUS_PORT_PANEL 3 +#define GMBUS_PORT_DPC 4 /* HDMIC */ +#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */ +#define GMBUS_PORT_DPD 6 /* HDMID */ +#define GMBUS_PORT_RESERVED 7 /* 7 reserved */ +#define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1) +#define GMBUS1 0x5104 /* command/status */ +#define GMBUS_SW_CLR_INT (1<<31) +#define GMBUS_SW_RDY (1<<30) +#define GMBUS_ENT (1<<29) /* enable timeout */ +#define GMBUS_CYCLE_NONE (0<<25) +#define GMBUS_CYCLE_WAIT (1<<25) +#define GMBUS_CYCLE_INDEX (2<<25) +#define GMBUS_CYCLE_STOP (4<<25) +#define GMBUS_BYTE_COUNT_SHIFT 16 +#define GMBUS_SLAVE_INDEX_SHIFT 8 +#define GMBUS_SLAVE_ADDR_SHIFT 1 +#define GMBUS_SLAVE_READ (1<<0) +#define GMBUS_SLAVE_WRITE (0<<0) +#define GMBUS2 0x5108 /* status */ +#define GMBUS_INUSE (1<<15) +#define GMBUS_HW_WAIT_PHASE (1<<14) +#define GMBUS_STALL_TIMEOUT (1<<13) +#define GMBUS_INT (1<<12) +#define GMBUS_HW_RDY (1<<11) +#define GMBUS_SATOER (1<<10) +#define GMBUS_ACTIVE (1<<9) +#define GMBUS3 0x510c /* data buffer bytes 3-0 */ +#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */ +#define GMBUS_SLAVE_TIMEOUT_EN (1<<4) +#define GMBUS_NAK_EN (1<<3) +#define GMBUS_IDLE_EN (1<<2) +#define GMBUS_HW_WAIT_EN (1<<1) +#define GMBUS_HW_RDY_EN (1<<0) +#define GMBUS5 0x5120 /* byte index */ +#define GMBUS_2BYTE_INDEX_EN (1<<31) + +/* + * Clock control & power management + */ + +#define VGA0 0x6000 +#define VGA1 0x6004 +#define VGA_PD 0x6010 +#define VGA0_PD_P2_DIV_4 (1 << 7) +#define VGA0_PD_P1_DIV_2 (1 << 5) +#define VGA0_PD_P1_SHIFT 0 +#define VGA0_PD_P1_MASK (0x1f << 0) +#define VGA1_PD_P2_DIV_4 (1 << 15) +#define VGA1_PD_P1_DIV_2 (1 << 13) +#define VGA1_PD_P1_SHIFT 8 +#define VGA1_PD_P1_MASK (0x1f << 8) +#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014) +#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018) +#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) +#define DPLL_VCO_ENABLE (1 << 31) +#define DPLL_DVO_HIGH_SPEED (1 << 30) +#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30) +#define DPLL_SYNCLOCK_ENABLE (1 << 29) +#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29) +#define DPLL_VGA_MODE_DIS (1 << 28) +#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ +#define DPLLB_MODE_LVDS (2 << 26) /* i915 */ +#define DPLL_MODE_MASK (3 << 26) +#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */ +#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */ +#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */ +#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ +#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ +#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ +#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ +#define DPLL_LOCK_VLV (1<<15) +#define DPLL_INTEGRATED_CLOCK_VLV (1<<13) + +#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 +/* + * The i830 generation, in LVDS mode, defines P1 as the bit number set within + * this field (only one bit may be set). + */ +#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 +#define DPLL_FPA01_P1_POST_DIV_SHIFT 16 +#define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15 +/* i830, required in DVO non-gang */ +#define PLL_P2_DIVIDE_BY_4 (1 << 23) +#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ +#define PLL_REF_INPUT_DREFCLK (0 << 13) +#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */ +#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */ +#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) +#define PLL_REF_INPUT_MASK (3 << 13) +#define PLL_LOAD_PULSE_PHASE_SHIFT 9 +/* Ironlake */ +# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9 +# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9) +# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9) +# define DPLL_FPA1_P1_POST_DIV_SHIFT 0 +# define DPLL_FPA1_P1_POST_DIV_MASK 0xff + +/* + * Parallel to Serial Load Pulse phase selection. + * Selects the phase for the 10X DPLL clock for the PCIe + * digital display port. The range is 4 to 13; 10 or more + * is just a flip delay. The default is 6 + */ +#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT) +#define DISPLAY_RATE_SELECT_FPA1 (1 << 8) +/* + * SDVO multiplier for 945G/GM. Not used on 965. + */ +#define SDVO_MULTIPLIER_MASK 0x000000ff +#define SDVO_MULTIPLIER_SHIFT_HIRES 4 +#define SDVO_MULTIPLIER_SHIFT_VGA 0 +#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */ +/* + * UDI pixel divider, controlling how many pixels are stuffed into a packet. + * + * Value is pixels minus 1. Must be set to 1 pixel for SDVO. + */ +#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000 +#define DPLL_MD_UDI_DIVIDER_SHIFT 24 +/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */ +#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000 +#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16 +/* + * SDVO/UDI pixel multiplier. + * + * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus + * clock rate is 10 times the DPLL clock. At low resolution/refresh rate + * modes, the bus rate would be below the limits, so SDVO allows for stuffing + * dummy bytes in the datastream at an increased clock rate, with both sides of + * the link knowing how many bytes are fill. + * + * So, for a mode with a dotclock of 65Mhz, we would want to double the clock + * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be + * set to 130Mhz, and the SDVO multiplier set to 2x in this register and + * through an SDVO command. + * + * This register field has values of multiplication factor minus 1, with + * a maximum multiplier of 5 for SDVO. + */ +#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00 +#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8 +/* + * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK. + * This best be set to the default value (3) or the CRT won't work. No, + * I don't entirely understand what this does... + */ +#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f +#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 +#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */ +#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD) + +#define _FPA0 0x06040 +#define _FPA1 0x06044 +#define _FPB0 0x06048 +#define _FPB1 0x0604c +#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0) +#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1) +#define FP_N_DIV_MASK 0x003f0000 +#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 +#define FP_N_DIV_SHIFT 16 +#define FP_M1_DIV_MASK 0x00003f00 +#define FP_M1_DIV_SHIFT 8 +#define FP_M2_DIV_MASK 0x0000003f +#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff +#define FP_M2_DIV_SHIFT 0 +#define DPLL_TEST 0x606c +#define DPLLB_TEST_SDVO_DIV_1 (0 << 22) +#define DPLLB_TEST_SDVO_DIV_2 (1 << 22) +#define DPLLB_TEST_SDVO_DIV_4 (2 << 22) +#define DPLLB_TEST_SDVO_DIV_MASK (3 << 22) +#define DPLLB_TEST_N_BYPASS (1 << 19) +#define DPLLB_TEST_M_BYPASS (1 << 18) +#define DPLLB_INPUT_BUFFER_ENABLE (1 << 16) +#define DPLLA_TEST_N_BYPASS (1 << 3) +#define DPLLA_TEST_M_BYPASS (1 << 2) +#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) +#define D_STATE 0x6104 +#define DSTATE_GFX_RESET_I830 (1<<6) +#define DSTATE_PLL_D3_OFF (1<<3) +#define DSTATE_GFX_CLOCK_GATING (1<<1) +#define DSTATE_DOT_CLOCK_GATING (1<<0) +#define DSPCLK_GATE_D 0x6200 +# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ +# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ +# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ +# define VRDUNIT_CLOCK_GATE_DISABLE (1 << 27) /* 965 */ +# define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */ +# define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */ +# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */ +# define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */ +# define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */ +# define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */ +# define TVEUNIT_CLOCK_GATE_DISABLE (1 << 20) /* 915-945 */ +# define DVSUNIT_CLOCK_GATE_DISABLE (1 << 19) /* 915-945 */ +# define DSSUNIT_CLOCK_GATE_DISABLE (1 << 18) /* 915-945 */ +# define DDBUNIT_CLOCK_GATE_DISABLE (1 << 17) /* 915-945 */ +# define DPRUNIT_CLOCK_GATE_DISABLE (1 << 16) /* 915-945 */ +# define DPFUNIT_CLOCK_GATE_DISABLE (1 << 15) /* 915-945 */ +# define DPBMUNIT_CLOCK_GATE_DISABLE (1 << 14) /* 915-945 */ +# define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13) /* 915-945 */ +# define DPLUNIT_CLOCK_GATE_DISABLE (1 << 12) /* 915-945 */ +# define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11) +# define DPBUNIT_CLOCK_GATE_DISABLE (1 << 10) +# define DCUNIT_CLOCK_GATE_DISABLE (1 << 9) +# define DPUNIT_CLOCK_GATE_DISABLE (1 << 8) +# define VRUNIT_CLOCK_GATE_DISABLE (1 << 7) /* 915+: reserved */ +# define OVHUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 830-865 */ +# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */ +# define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5) +# define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4) +/** + * This bit must be set on the 830 to prevent hangs when turning off the + * overlay scaler. + */ +# define OVRUNIT_CLOCK_GATE_DISABLE (1 << 3) +# define OVCUNIT_CLOCK_GATE_DISABLE (1 << 2) +# define OVUUNIT_CLOCK_GATE_DISABLE (1 << 1) +# define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */ +# define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */ + +#define RENCLK_GATE_D1 0x6204 +# define BLITTER_CLOCK_GATE_DISABLE (1 << 13) /* 945GM only */ +# define MPEG_CLOCK_GATE_DISABLE (1 << 12) /* 945GM only */ +# define PC_FE_CLOCK_GATE_DISABLE (1 << 11) +# define PC_BE_CLOCK_GATE_DISABLE (1 << 10) +# define WINDOWER_CLOCK_GATE_DISABLE (1 << 9) +# define INTERPOLATOR_CLOCK_GATE_DISABLE (1 << 8) +# define COLOR_CALCULATOR_CLOCK_GATE_DISABLE (1 << 7) +# define MOTION_COMP_CLOCK_GATE_DISABLE (1 << 6) +# define MAG_CLOCK_GATE_DISABLE (1 << 5) +/** This bit must be unset on 855,865 */ +# define MECI_CLOCK_GATE_DISABLE (1 << 4) +# define DCMP_CLOCK_GATE_DISABLE (1 << 3) +# define MEC_CLOCK_GATE_DISABLE (1 << 2) +# define MECO_CLOCK_GATE_DISABLE (1 << 1) +/** This bit must be set on 855,865. */ +# define SV_CLOCK_GATE_DISABLE (1 << 0) +# define I915_MPEG_CLOCK_GATE_DISABLE (1 << 16) +# define I915_VLD_IP_PR_CLOCK_GATE_DISABLE (1 << 15) +# define I915_MOTION_COMP_CLOCK_GATE_DISABLE (1 << 14) +# define I915_BD_BF_CLOCK_GATE_DISABLE (1 << 13) +# define I915_SF_SE_CLOCK_GATE_DISABLE (1 << 12) +# define I915_WM_CLOCK_GATE_DISABLE (1 << 11) +# define I915_IZ_CLOCK_GATE_DISABLE (1 << 10) +# define I915_PI_CLOCK_GATE_DISABLE (1 << 9) +# define I915_DI_CLOCK_GATE_DISABLE (1 << 8) +# define I915_SH_SV_CLOCK_GATE_DISABLE (1 << 7) +# define I915_PL_DG_QC_FT_CLOCK_GATE_DISABLE (1 << 6) +# define I915_SC_CLOCK_GATE_DISABLE (1 << 5) +# define I915_FL_CLOCK_GATE_DISABLE (1 << 4) +# define I915_DM_CLOCK_GATE_DISABLE (1 << 3) +# define I915_PS_CLOCK_GATE_DISABLE (1 << 2) +# define I915_CC_CLOCK_GATE_DISABLE (1 << 1) +# define I915_BY_CLOCK_GATE_DISABLE (1 << 0) + +# define I965_RCZ_CLOCK_GATE_DISABLE (1 << 30) +/** This bit must always be set on 965G/965GM */ +# define I965_RCC_CLOCK_GATE_DISABLE (1 << 29) +# define I965_RCPB_CLOCK_GATE_DISABLE (1 << 28) +# define I965_DAP_CLOCK_GATE_DISABLE (1 << 27) +# define I965_ROC_CLOCK_GATE_DISABLE (1 << 26) +# define I965_GW_CLOCK_GATE_DISABLE (1 << 25) +# define I965_TD_CLOCK_GATE_DISABLE (1 << 24) +/** This bit must always be set on 965G */ +# define I965_ISC_CLOCK_GATE_DISABLE (1 << 23) +# define I965_IC_CLOCK_GATE_DISABLE (1 << 22) +# define I965_EU_CLOCK_GATE_DISABLE (1 << 21) +# define I965_IF_CLOCK_GATE_DISABLE (1 << 20) +# define I965_TC_CLOCK_GATE_DISABLE (1 << 19) +# define I965_SO_CLOCK_GATE_DISABLE (1 << 17) +# define I965_FBC_CLOCK_GATE_DISABLE (1 << 16) +# define I965_MARI_CLOCK_GATE_DISABLE (1 << 15) +# define I965_MASF_CLOCK_GATE_DISABLE (1 << 14) +# define I965_MAWB_CLOCK_GATE_DISABLE (1 << 13) +# define I965_EM_CLOCK_GATE_DISABLE (1 << 12) +# define I965_UC_CLOCK_GATE_DISABLE (1 << 11) +# define I965_SI_CLOCK_GATE_DISABLE (1 << 6) +# define I965_MT_CLOCK_GATE_DISABLE (1 << 5) +# define I965_PL_CLOCK_GATE_DISABLE (1 << 4) +# define I965_DG_CLOCK_GATE_DISABLE (1 << 3) +# define I965_QC_CLOCK_GATE_DISABLE (1 << 2) +# define I965_FT_CLOCK_GATE_DISABLE (1 << 1) +# define I965_DM_CLOCK_GATE_DISABLE (1 << 0) + +#define RENCLK_GATE_D2 0x6208 +#define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9) +#define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7) +#define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6) +#define RAMCLK_GATE_D 0x6210 /* CRL only */ +#define DEUC 0x6214 /* CRL only */ + +#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500) +#define FW_CSPWRDWNEN (1<<15) + +/* + * Palette regs + */ + +#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000) +#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800) +#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B) + +/* MCH MMIO space */ + +/* + * MCHBAR mirror. + * + * This mirrors the MCHBAR MMIO space whose location is determined by + * device 0 function 0's pci config register 0x44 or 0x48 and matches it in + * every way. It is not accessible from the CP register read instructions. + * + */ +#define MCHBAR_MIRROR_BASE 0x10000 + +#define MCHBAR_MIRROR_BASE_SNB 0x140000 + +/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */ +#define DCLK 0x5e04 + +/** 915-945 and GM965 MCH register controlling DRAM channel access */ +#define DCC 0x10200 +#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) +#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) +#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) +#define DCC_ADDRESSING_MODE_MASK (3 << 0) +#define DCC_CHANNEL_XOR_DISABLE (1 << 10) +#define DCC_CHANNEL_XOR_BIT_17 (1 << 9) + +/** Pineview MCH register contains DDR3 setting */ +#define CSHRDDR3CTL 0x101a8 +#define CSHRDDR3CTL_DDR3 (1 << 2) + +/** 965 MCH register controlling DRAM channel configuration */ +#define C0DRB3 0x10206 +#define C1DRB3 0x10606 + +/** snb MCH registers for reading the DRAM channel configuration */ +#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004) +#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008) +#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C) +#define MAD_DIMM_ECC_MASK (0x3 << 24) +#define MAD_DIMM_ECC_OFF (0x0 << 24) +#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24) +#define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24) +#define MAD_DIMM_ECC_ON (0x3 << 24) +#define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22) +#define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21) +#define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */ +#define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */ +#define MAD_DIMM_B_DUAL_RANK (0x1 << 18) +#define MAD_DIMM_A_DUAL_RANK (0x1 << 17) +#define MAD_DIMM_A_SELECT (0x1 << 16) +/* DIMM sizes are in multiples of 256mb. */ +#define MAD_DIMM_B_SIZE_SHIFT 8 +#define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT) +#define MAD_DIMM_A_SIZE_SHIFT 0 +#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT) + +/** snb MCH registers for priority tuning */ +#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10) +#define MCH_SSKPD_WM0_MASK 0x3f +#define MCH_SSKPD_WM0_VAL 0xc + +/* Clocking configuration register */ +#define CLKCFG 0x10c00 +#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ +#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */ +#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ +#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */ +#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */ +#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */ +/* Note, below two are guess */ +#define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */ +#define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */ +#define CLKCFG_FSB_MASK (7 << 0) +#define CLKCFG_MEM_533 (1 << 4) +#define CLKCFG_MEM_667 (2 << 4) +#define CLKCFG_MEM_800 (3 << 4) +#define CLKCFG_MEM_MASK (7 << 4) + +#define TSC1 0x11001 +#define TSE (1<<0) +#define TR1 0x11006 +#define TSFS 0x11020 +#define TSFS_SLOPE_MASK 0x0000ff00 +#define TSFS_SLOPE_SHIFT 8 +#define TSFS_INTR_MASK 0x000000ff + +#define CRSTANDVID 0x11100 +#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ +#define PXVFREQ_PX_MASK 0x7f000000 +#define PXVFREQ_PX_SHIFT 24 +#define VIDFREQ_BASE 0x11110 +#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */ +#define VIDFREQ2 0x11114 +#define VIDFREQ3 0x11118 +#define VIDFREQ4 0x1111c +#define VIDFREQ_P0_MASK 0x1f000000 +#define VIDFREQ_P0_SHIFT 24 +#define VIDFREQ_P0_CSCLK_MASK 0x00f00000 +#define VIDFREQ_P0_CSCLK_SHIFT 20 +#define VIDFREQ_P0_CRCLK_MASK 0x000f0000 +#define VIDFREQ_P0_CRCLK_SHIFT 16 +#define VIDFREQ_P1_MASK 0x00001f00 +#define VIDFREQ_P1_SHIFT 8 +#define VIDFREQ_P1_CSCLK_MASK 0x000000f0 +#define VIDFREQ_P1_CSCLK_SHIFT 4 +#define VIDFREQ_P1_CRCLK_MASK 0x0000000f +#define INTTOEXT_BASE_ILK 0x11300 +#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */ +#define INTTOEXT_MAP3_SHIFT 24 +#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT) +#define INTTOEXT_MAP2_SHIFT 16 +#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT) +#define INTTOEXT_MAP1_SHIFT 8 +#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT) +#define INTTOEXT_MAP0_SHIFT 0 +#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT) +#define MEMSWCTL 0x11170 /* Ironlake only */ +#define MEMCTL_CMD_MASK 0xe000 +#define MEMCTL_CMD_SHIFT 13 +#define MEMCTL_CMD_RCLK_OFF 0 +#define MEMCTL_CMD_RCLK_ON 1 +#define MEMCTL_CMD_CHFREQ 2 +#define MEMCTL_CMD_CHVID 3 +#define MEMCTL_CMD_VMMOFF 4 +#define MEMCTL_CMD_VMMON 5 +#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears + when command complete */ +#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */ +#define MEMCTL_FREQ_SHIFT 8 +#define MEMCTL_SFCAVM (1<<7) +#define MEMCTL_TGT_VID_MASK 0x007f +#define MEMIHYST 0x1117c +#define MEMINTREN 0x11180 /* 16 bits */ +#define MEMINT_RSEXIT_EN (1<<8) +#define MEMINT_CX_SUPR_EN (1<<7) +#define MEMINT_CONT_BUSY_EN (1<<6) +#define MEMINT_AVG_BUSY_EN (1<<5) +#define MEMINT_EVAL_CHG_EN (1<<4) +#define MEMINT_MON_IDLE_EN (1<<3) +#define MEMINT_UP_EVAL_EN (1<<2) +#define MEMINT_DOWN_EVAL_EN (1<<1) +#define MEMINT_SW_CMD_EN (1<<0) +#define MEMINTRSTR 0x11182 /* 16 bits */ +#define MEM_RSEXIT_MASK 0xc000 +#define MEM_RSEXIT_SHIFT 14 +#define MEM_CONT_BUSY_MASK 0x3000 +#define MEM_CONT_BUSY_SHIFT 12 +#define MEM_AVG_BUSY_MASK 0x0c00 +#define MEM_AVG_BUSY_SHIFT 10 +#define MEM_EVAL_CHG_MASK 0x0300 +#define MEM_EVAL_BUSY_SHIFT 8 +#define MEM_MON_IDLE_MASK 0x00c0 +#define MEM_MON_IDLE_SHIFT 6 +#define MEM_UP_EVAL_MASK 0x0030 +#define MEM_UP_EVAL_SHIFT 4 +#define MEM_DOWN_EVAL_MASK 0x000c +#define MEM_DOWN_EVAL_SHIFT 2 +#define MEM_SW_CMD_MASK 0x0003 +#define MEM_INT_STEER_GFX 0 +#define MEM_INT_STEER_CMR 1 +#define MEM_INT_STEER_SMI 2 +#define MEM_INT_STEER_SCI 3 +#define MEMINTRSTS 0x11184 +#define MEMINT_RSEXIT (1<<7) +#define MEMINT_CONT_BUSY (1<<6) +#define MEMINT_AVG_BUSY (1<<5) +#define MEMINT_EVAL_CHG (1<<4) +#define MEMINT_MON_IDLE (1<<3) +#define MEMINT_UP_EVAL (1<<2) +#define MEMINT_DOWN_EVAL (1<<1) +#define MEMINT_SW_CMD (1<<0) +#define MEMMODECTL 0x11190 +#define MEMMODE_BOOST_EN (1<<31) +#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */ +#define MEMMODE_BOOST_FREQ_SHIFT 24 +#define MEMMODE_IDLE_MODE_MASK 0x00030000 +#define MEMMODE_IDLE_MODE_SHIFT 16 +#define MEMMODE_IDLE_MODE_EVAL 0 +#define MEMMODE_IDLE_MODE_CONT 1 +#define MEMMODE_HWIDLE_EN (1<<15) +#define MEMMODE_SWMODE_EN (1<<14) +#define MEMMODE_RCLK_GATE (1<<13) +#define MEMMODE_HW_UPDATE (1<<12) +#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */ +#define MEMMODE_FSTART_SHIFT 8 +#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */ +#define MEMMODE_FMAX_SHIFT 4 +#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */ +#define RCBMAXAVG 0x1119c +#define MEMSWCTL2 0x1119e /* Cantiga only */ +#define SWMEMCMD_RENDER_OFF (0 << 13) +#define SWMEMCMD_RENDER_ON (1 << 13) +#define SWMEMCMD_SWFREQ (2 << 13) +#define SWMEMCMD_TARVID (3 << 13) +#define SWMEMCMD_VRM_OFF (4 << 13) +#define SWMEMCMD_VRM_ON (5 << 13) +#define CMDSTS (1<<12) +#define SFCAVM (1<<11) +#define SWFREQ_MASK 0x0380 /* P0-7 */ +#define SWFREQ_SHIFT 7 +#define TARVID_MASK 0x001f +#define MEMSTAT_CTG 0x111a0 +#define RCBMINAVG 0x111a0 +#define RCUPEI 0x111b0 +#define RCDNEI 0x111b4 +#define RSTDBYCTL 0x111b8 +#define RS1EN (1<<31) +#define RS2EN (1<<30) +#define RS3EN (1<<29) +#define D3RS3EN (1<<28) /* Display D3 imlies RS3 */ +#define SWPROMORSX (1<<27) /* RSx promotion timers ignored */ +#define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */ +#define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */ +#define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */ +#define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */ +#define RSX_STATUS_MASK (7<<20) +#define RSX_STATUS_ON (0<<20) +#define RSX_STATUS_RC1 (1<<20) +#define RSX_STATUS_RC1E (2<<20) +#define RSX_STATUS_RS1 (3<<20) +#define RSX_STATUS_RS2 (4<<20) /* aka rc6 */ +#define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */ +#define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */ +#define RSX_STATUS_RSVD2 (7<<20) +#define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */ +#define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */ +#define JRSC (1<<17) /* rsx coupled to cpu c-state */ +#define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */ +#define RS1CONTSAV_MASK (3<<14) +#define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */ +#define RS1CONTSAV_RSVD (1<<14) +#define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */ +#define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */ +#define NORMSLEXLAT_MASK (3<<12) +#define SLOW_RS123 (0<<12) +#define SLOW_RS23 (1<<12) +#define SLOW_RS3 (2<<12) +#define NORMAL_RS123 (3<<12) +#define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */ +#define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */ +#define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */ +#define STATELOCK (1<<7) /* locked to rs_cstate if 0 */ +#define RS_CSTATE_MASK (3<<4) +#define RS_CSTATE_C367_RS1 (0<<4) +#define RS_CSTATE_C36_RS1_C7_RS2 (1<<4) +#define RS_CSTATE_RSVD (2<<4) +#define RS_CSTATE_C367_RS2 (3<<4) +#define REDSAVES (1<<3) /* no context save if was idle during rs0 */ +#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */ +#define VIDCTL 0x111c0 +#define VIDSTS 0x111c8 +#define VIDSTART 0x111cc /* 8 bits */ +#define MEMSTAT_ILK 0x111f8 +#define MEMSTAT_VID_MASK 0x7f00 +#define MEMSTAT_VID_SHIFT 8 +#define MEMSTAT_PSTATE_MASK 0x00f8 +#define MEMSTAT_PSTATE_SHIFT 3 +#define MEMSTAT_MON_ACTV (1<<2) +#define MEMSTAT_SRC_CTL_MASK 0x0003 +#define MEMSTAT_SRC_CTL_CORE 0 +#define MEMSTAT_SRC_CTL_TRB 1 +#define MEMSTAT_SRC_CTL_THM 2 +#define MEMSTAT_SRC_CTL_STDBY 3 +#define RCPREVBSYTUPAVG 0x113b8 +#define RCPREVBSYTDNAVG 0x113bc +#define PMMISC 0x11214 +#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */ +#define SDEW 0x1124c +#define CSIEW0 0x11250 +#define CSIEW1 0x11254 +#define CSIEW2 0x11258 +#define PEW 0x1125c +#define DEW 0x11270 +#define MCHAFE 0x112c0 +#define CSIEC 0x112e0 +#define DMIEC 0x112e4 +#define DDREC 0x112e8 +#define PEG0EC 0x112ec +#define PEG1EC 0x112f0 +#define GFXEC 0x112f4 +#define RPPREVBSYTUPAVG 0x113b8 +#define RPPREVBSYTDNAVG 0x113bc +#define ECR 0x11600 +#define ECR_GPFE (1<<31) +#define ECR_IMONE (1<<30) +#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */ +#define OGW0 0x11608 +#define OGW1 0x1160c +#define EG0 0x11610 +#define EG1 0x11614 +#define EG2 0x11618 +#define EG3 0x1161c +#define EG4 0x11620 +#define EG5 0x11624 +#define EG6 0x11628 +#define EG7 0x1162c +#define PXW 0x11664 +#define PXWL 0x11680 +#define LCFUSE02 0x116c0 +#define LCFUSE_HIV_MASK 0x000000ff +#define CSIPLL0 0x12c10 +#define DDRMPLL1 0X12c20 +#define PEG_BAND_GAP_DATA 0x14d68 + +#define GEN6_GT_THREAD_STATUS_REG 0x13805c +#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 +#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16)) + +#define GEN6_GT_PERF_STATUS 0x145948 +#define GEN6_RP_STATE_LIMITS 0x145994 +#define GEN6_RP_STATE_CAP 0x145998 + +/* + * Logical Context regs + */ +#define CCID 0x2180 +#define CCID_EN (1<<0) +#define CXT_SIZE 0x21a0 +#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f) +#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f) +#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f) +#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f) +#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f) +#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_POWER_SIZE(cxt_reg) + \ + GEN6_CXT_RING_SIZE(cxt_reg) + \ + GEN6_CXT_RENDER_SIZE(cxt_reg) + \ + GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ + GEN6_CXT_PIPELINE_SIZE(cxt_reg)) +#define GEN7_CXT_SIZE 0x21a8 +#define GEN7_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 25) & 0x7f) +#define GEN7_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 22) & 0x7) +#define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f) +#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f) +#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7) +#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f) +#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \ + GEN7_CXT_RING_SIZE(ctx_reg) + \ + GEN7_CXT_RENDER_SIZE(ctx_reg) + \ + GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ + GEN7_CXT_GT1_SIZE(ctx_reg) + \ + GEN7_CXT_VFSTATE_SIZE(ctx_reg)) +/* Haswell does have the CXT_SIZE register however it does not appear to be + * valid. Now, docs explain in dwords what is in the context object. The full + * size is 70720 bytes, however, the power context and execlist context will + * never be saved (power context is stored elsewhere, and execlists don't work + * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages. + */ +#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) + +/* + * Overlay regs + */ + +#define OVADD 0x30000 +#define DOVSTA 0x30008 +#define OC_BUF (0x3<<20) +#define OGAMC5 0x30010 +#define OGAMC4 0x30014 +#define OGAMC3 0x30018 +#define OGAMC2 0x3001c +#define OGAMC1 0x30020 +#define OGAMC0 0x30024 + +/* + * Display engine regs + */ + +/* Pipe A timing regs */ +#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000) +#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004) +#define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008) +#define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c) +#define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010) +#define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014) +#define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c) +#define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020) +#define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028) + +/* Pipe B timing regs */ +#define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000) +#define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004) +#define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008) +#define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c) +#define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010) +#define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014) +#define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c) +#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020) +#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028) + + +#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) +#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B) +#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B) +#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B) +#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B) +#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B) +#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) +#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) + +/* VGA port control */ +#define ADPA 0x61100 +#define PCH_ADPA 0xe1100 +#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA) + +#define ADPA_DAC_ENABLE (1<<31) +#define ADPA_DAC_DISABLE 0 +#define ADPA_PIPE_SELECT_MASK (1<<30) +#define ADPA_PIPE_A_SELECT 0 +#define ADPA_PIPE_B_SELECT (1<<30) +#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) +/* CPT uses bits 29:30 for pch transcoder select */ +#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */ +#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24) +#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24) +#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24) +#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24) +#define ADPA_CRT_HOTPLUG_ENABLE (1<<23) +#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22) +#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22) +#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21) +#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21) +#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20) +#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20) +#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18) +#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18) +#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18) +#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18) +#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17) +#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) +#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) +#define ADPA_USE_VGA_HVPOLARITY (1<<15) +#define ADPA_SETS_HVPOLARITY 0 +#define ADPA_VSYNC_CNTL_DISABLE (1<<10) +#define ADPA_VSYNC_CNTL_ENABLE 0 +#define ADPA_HSYNC_CNTL_DISABLE (1<<11) +#define ADPA_HSYNC_CNTL_ENABLE 0 +#define ADPA_VSYNC_ACTIVE_HIGH (1<<4) +#define ADPA_VSYNC_ACTIVE_LOW 0 +#define ADPA_HSYNC_ACTIVE_HIGH (1<<3) +#define ADPA_HSYNC_ACTIVE_LOW 0 +#define ADPA_DPMS_MASK (~(3<<10)) +#define ADPA_DPMS_ON (0<<10) +#define ADPA_DPMS_SUSPEND (1<<10) +#define ADPA_DPMS_STANDBY (2<<10) +#define ADPA_DPMS_OFF (3<<10) + + +/* Hotplug control (945+ only) */ +#define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110) +#define PORTB_HOTPLUG_INT_EN (1 << 29) +#define PORTC_HOTPLUG_INT_EN (1 << 28) +#define PORTD_HOTPLUG_INT_EN (1 << 27) +#define SDVOB_HOTPLUG_INT_EN (1 << 26) +#define SDVOC_HOTPLUG_INT_EN (1 << 25) +#define TV_HOTPLUG_INT_EN (1 << 18) +#define CRT_HOTPLUG_INT_EN (1 << 9) +#define HOTPLUG_INT_EN_MASK (PORTB_HOTPLUG_INT_EN | \ + PORTC_HOTPLUG_INT_EN | \ + PORTD_HOTPLUG_INT_EN | \ + SDVOC_HOTPLUG_INT_EN | \ + SDVOB_HOTPLUG_INT_EN | \ + CRT_HOTPLUG_INT_EN) +#define CRT_HOTPLUG_FORCE_DETECT (1 << 3) +#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8) +/* must use period 64 on GM45 according to docs */ +#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8) +#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7) +#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7) +#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5) +#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5) +#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5) +#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5) +#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5) +#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4) +#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) +#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) +#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) + +#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) +/* + * HDMI/DP bits are gen4+ + * + * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. + * Please check the detailed lore in the commit message for for experimental + * evidence. + */ +#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29) +#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) +#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27) +#define PORTD_HOTPLUG_INT_STATUS (3 << 21) +#define PORTC_HOTPLUG_INT_STATUS (3 << 19) +#define PORTB_HOTPLUG_INT_STATUS (3 << 17) +/* CRT/TV common between gen3+ */ +#define CRT_HOTPLUG_INT_STATUS (1 << 11) +#define TV_HOTPLUG_INT_STATUS (1 << 10) +#define CRT_HOTPLUG_MONITOR_MASK (3 << 8) +#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) +#define CRT_HOTPLUG_MONITOR_MONO (2 << 8) +#define CRT_HOTPLUG_MONITOR_NONE (0 << 8) +/* SDVO is different across gen3/4 */ +#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3) +#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2) +/* + * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm, + * since reality corrobates that they're the same as on gen3. But keep these + * bits here (and the comment!) to help any other lost wanderers back onto the + * right tracks. + */ +#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4) +#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2) +#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7) +#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6) +#define HOTPLUG_INT_STATUS_G4X (CRT_HOTPLUG_INT_STATUS | \ + SDVOB_HOTPLUG_INT_STATUS_G4X | \ + SDVOC_HOTPLUG_INT_STATUS_G4X | \ + PORTB_HOTPLUG_INT_STATUS | \ + PORTC_HOTPLUG_INT_STATUS | \ + PORTD_HOTPLUG_INT_STATUS) + +#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \ + SDVOB_HOTPLUG_INT_STATUS_I915 | \ + SDVOC_HOTPLUG_INT_STATUS_I915 | \ + PORTB_HOTPLUG_INT_STATUS | \ + PORTC_HOTPLUG_INT_STATUS | \ + PORTD_HOTPLUG_INT_STATUS) + +/* SDVO and HDMI port control. + * The same register may be used for SDVO or HDMI */ +#define GEN3_SDVOB 0x61140 +#define GEN3_SDVOC 0x61160 +#define GEN4_HDMIB GEN3_SDVOB +#define GEN4_HDMIC GEN3_SDVOC +#define PCH_SDVOB 0xe1140 +#define PCH_HDMIB PCH_SDVOB +#define PCH_HDMIC 0xe1150 +#define PCH_HDMID 0xe1160 + +/* Gen 3 SDVO bits: */ +#define SDVO_ENABLE (1 << 31) +#define SDVO_PIPE_SEL(pipe) ((pipe) << 30) +#define SDVO_PIPE_SEL_MASK (1 << 30) +#define SDVO_PIPE_B_SELECT (1 << 30) +#define SDVO_STALL_SELECT (1 << 29) +#define SDVO_INTERRUPT_ENABLE (1 << 26) +/** + * 915G/GM SDVO pixel multiplier. + * Programmed value is multiplier - 1, up to 5x. + * \sa DPLL_MD_UDI_MULTIPLIER_MASK + */ +#define SDVO_PORT_MULTIPLY_MASK (7 << 23) +#define SDVO_PORT_MULTIPLY_SHIFT 23 +#define SDVO_PHASE_SELECT_MASK (15 << 19) +#define SDVO_PHASE_SELECT_DEFAULT (6 << 19) +#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) +#define SDVOC_GANG_MODE (1 << 16) /* Port C only */ +#define SDVO_BORDER_ENABLE (1 << 7) /* SDVO only */ +#define SDVOB_PCIE_CONCURRENCY (1 << 3) /* Port B only */ +#define SDVO_DETECTED (1 << 2) +/* Bits to be preserved when writing */ +#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \ + SDVO_INTERRUPT_ENABLE) +#define SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE) + +/* Gen 4 SDVO/HDMI bits: */ +#define SDVO_COLOR_FORMAT_8bpc (0 << 26) +#define SDVO_ENCODING_SDVO (0 << 10) +#define SDVO_ENCODING_HDMI (2 << 10) +#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */ +#define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */ +#define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */ +#define SDVO_AUDIO_ENABLE (1 << 6) +/* VSYNC/HSYNC bits new with 965, default is to be set */ +#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4) +#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3) + +/* Gen 5 (IBX) SDVO/HDMI bits: */ +#define HDMI_COLOR_FORMAT_12bpc (3 << 26) /* HDMI only */ +#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */ + +/* Gen 6 (CPT) SDVO/HDMI bits: */ +#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29) +#define SDVO_PIPE_SEL_MASK_CPT (3 << 29) + + +/* DVO port control */ +#define DVOA 0x61120 +#define DVOB 0x61140 +#define DVOC 0x61160 +#define DVO_ENABLE (1 << 31) +#define DVO_PIPE_B_SELECT (1 << 30) +#define DVO_PIPE_STALL_UNUSED (0 << 28) +#define DVO_PIPE_STALL (1 << 28) +#define DVO_PIPE_STALL_TV (2 << 28) +#define DVO_PIPE_STALL_MASK (3 << 28) +#define DVO_USE_VGA_SYNC (1 << 15) +#define DVO_DATA_ORDER_I740 (0 << 14) +#define DVO_DATA_ORDER_FP (1 << 14) +#define DVO_VSYNC_DISABLE (1 << 11) +#define DVO_HSYNC_DISABLE (1 << 10) +#define DVO_VSYNC_TRISTATE (1 << 9) +#define DVO_HSYNC_TRISTATE (1 << 8) +#define DVO_BORDER_ENABLE (1 << 7) +#define DVO_DATA_ORDER_GBRG (1 << 6) +#define DVO_DATA_ORDER_RGGB (0 << 6) +#define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6) +#define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6) +#define DVO_VSYNC_ACTIVE_HIGH (1 << 4) +#define DVO_HSYNC_ACTIVE_HIGH (1 << 3) +#define DVO_BLANK_ACTIVE_HIGH (1 << 2) +#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */ +#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */ +#define DVO_PRESERVE_MASK (0x7<<24) +#define DVOA_SRCDIM 0x61124 +#define DVOB_SRCDIM 0x61144 +#define DVOC_SRCDIM 0x61164 +#define DVO_SRCDIM_HORIZONTAL_SHIFT 12 +#define DVO_SRCDIM_VERTICAL_SHIFT 0 + +/* LVDS port control */ +#define LVDS 0x61180 +/* + * Enables the LVDS port. This bit must be set before DPLLs are enabled, as + * the DPLL semantics change when the LVDS is assigned to that pipe. + */ +#define LVDS_PORT_EN (1 << 31) +/* Selects pipe B for LVDS data. Must be set on pre-965. */ +#define LVDS_PIPEB_SELECT (1 << 30) +#define LVDS_PIPE_MASK (1 << 30) +#define LVDS_PIPE(pipe) ((pipe) << 30) +/* LVDS dithering flag on 965/g4x platform */ +#define LVDS_ENABLE_DITHER (1 << 25) +/* LVDS sync polarity flags. Set to invert (i.e. negative) */ +#define LVDS_VSYNC_POLARITY (1 << 21) +#define LVDS_HSYNC_POLARITY (1 << 20) + +/* Enable border for unscaled (or aspect-scaled) display */ +#define LVDS_BORDER_ENABLE (1 << 15) +/* + * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per + * pixel. + */ +#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) +#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) +#define LVDS_A0A2_CLKA_POWER_UP (3 << 8) +/* + * Controls the A3 data pair, which contains the additional LSBs for 24 bit + * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be + * on. + */ +#define LVDS_A3_POWER_MASK (3 << 6) +#define LVDS_A3_POWER_DOWN (0 << 6) +#define LVDS_A3_POWER_UP (3 << 6) +/* + * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP + * is set. + */ +#define LVDS_CLKB_POWER_MASK (3 << 4) +#define LVDS_CLKB_POWER_DOWN (0 << 4) +#define LVDS_CLKB_POWER_UP (3 << 4) +/* + * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 + * setting for whether we are in dual-channel mode. The B3 pair will + * additionally only be powered up when LVDS_A3_POWER_UP is set. + */ +#define LVDS_B0B3_POWER_MASK (3 << 2) +#define LVDS_B0B3_POWER_DOWN (0 << 2) +#define LVDS_B0B3_POWER_UP (3 << 2) + +/* Video Data Island Packet control */ +#define VIDEO_DIP_DATA 0x61178 +/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC + * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte + * of the infoframe structure specified by CEA-861. */ +#define VIDEO_DIP_DATA_SIZE 32 +#define VIDEO_DIP_CTL 0x61170 +/* Pre HSW: */ +#define VIDEO_DIP_ENABLE (1 << 31) +#define VIDEO_DIP_PORT_B (1 << 29) +#define VIDEO_DIP_PORT_C (2 << 29) +#define VIDEO_DIP_PORT_D (3 << 29) +#define VIDEO_DIP_PORT_MASK (3 << 29) +#define VIDEO_DIP_ENABLE_GCP (1 << 25) +#define VIDEO_DIP_ENABLE_AVI (1 << 21) +#define VIDEO_DIP_ENABLE_VENDOR (2 << 21) +#define VIDEO_DIP_ENABLE_GAMUT (4 << 21) +#define VIDEO_DIP_ENABLE_SPD (8 << 21) +#define VIDEO_DIP_SELECT_AVI (0 << 19) +#define VIDEO_DIP_SELECT_VENDOR (1 << 19) +#define VIDEO_DIP_SELECT_SPD (3 << 19) +#define VIDEO_DIP_SELECT_MASK (3 << 19) +#define VIDEO_DIP_FREQ_ONCE (0 << 16) +#define VIDEO_DIP_FREQ_VSYNC (1 << 16) +#define VIDEO_DIP_FREQ_2VSYNC (2 << 16) +#define VIDEO_DIP_FREQ_MASK (3 << 16) +/* HSW and later: */ +#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20) +#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16) +#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12) +#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8) +#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4) +#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) + +/* Panel power sequencing */ +#define PP_STATUS 0x61200 +#define PP_ON (1 << 31) +/* + * Indicates that all dependencies of the panel are on: + * + * - PLL enabled + * - pipe enabled + * - LVDS/DVOB/DVOC on + */ +#define PP_READY (1 << 30) +#define PP_SEQUENCE_NONE (0 << 28) +#define PP_SEQUENCE_POWER_UP (1 << 28) +#define PP_SEQUENCE_POWER_DOWN (2 << 28) +#define PP_SEQUENCE_MASK (3 << 28) +#define PP_SEQUENCE_SHIFT 28 +#define PP_CYCLE_DELAY_ACTIVE (1 << 27) +#define PP_SEQUENCE_STATE_MASK 0x0000000f +#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0) +#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0) +#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0) +#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0) +#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0) +#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0) +#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0) +#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0) +#define PP_SEQUENCE_STATE_RESET (0xf << 0) +#define PP_CONTROL 0x61204 +#define POWER_TARGET_ON (1 << 0) +#define PP_ON_DELAYS 0x61208 +#define PP_OFF_DELAYS 0x6120c +#define PP_DIVISOR 0x61210 + +/* Panel fitting */ +#define PFIT_CONTROL (dev_priv->info->display_mmio_offset + 0x61230) +#define PFIT_ENABLE (1 << 31) +#define PFIT_PIPE_MASK (3 << 29) +#define PFIT_PIPE_SHIFT 29 +#define VERT_INTERP_DISABLE (0 << 10) +#define VERT_INTERP_BILINEAR (1 << 10) +#define VERT_INTERP_MASK (3 << 10) +#define VERT_AUTO_SCALE (1 << 9) +#define HORIZ_INTERP_DISABLE (0 << 6) +#define HORIZ_INTERP_BILINEAR (1 << 6) +#define HORIZ_INTERP_MASK (3 << 6) +#define HORIZ_AUTO_SCALE (1 << 5) +#define PANEL_8TO6_DITHER_ENABLE (1 << 3) +#define PFIT_FILTER_FUZZY (0 << 24) +#define PFIT_SCALING_AUTO (0 << 26) +#define PFIT_SCALING_PROGRAMMED (1 << 26) +#define PFIT_SCALING_PILLAR (2 << 26) +#define PFIT_SCALING_LETTER (3 << 26) +#define PFIT_PGM_RATIOS (dev_priv->info->display_mmio_offset + 0x61234) +/* Pre-965 */ +#define PFIT_VERT_SCALE_SHIFT 20 +#define PFIT_VERT_SCALE_MASK 0xfff00000 +#define PFIT_HORIZ_SCALE_SHIFT 4 +#define PFIT_HORIZ_SCALE_MASK 0x0000fff0 +/* 965+ */ +#define PFIT_VERT_SCALE_SHIFT_965 16 +#define PFIT_VERT_SCALE_MASK_965 0x1fff0000 +#define PFIT_HORIZ_SCALE_SHIFT_965 0 +#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff + +#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238) + +/* Backlight control */ +#define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */ +#define BLM_PWM_ENABLE (1 << 31) +#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ +#define BLM_PIPE_SELECT (1 << 29) +#define BLM_PIPE_SELECT_IVB (3 << 29) +#define BLM_PIPE_A (0 << 29) +#define BLM_PIPE_B (1 << 29) +#define BLM_PIPE_C (2 << 29) /* ivb + */ +#define BLM_PIPE(pipe) ((pipe) << 29) +#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */ +#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26) +#define BLM_PHASE_IN_ENABLE (1 << 25) +#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24) +#define BLM_PHASE_IN_TIME_BASE_SHIFT (16) +#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16) +#define BLM_PHASE_IN_COUNT_SHIFT (8) +#define BLM_PHASE_IN_COUNT_MASK (0xff << 8) +#define BLM_PHASE_IN_INCR_SHIFT (0) +#define BLM_PHASE_IN_INCR_MASK (0xff << 0) +#define BLC_PWM_CTL (dev_priv->info->display_mmio_offset + 0x61254) +/* + * This is the most significant 15 bits of the number of backlight cycles in a + * complete cycle of the modulated backlight control. + * + * The actual value is this field multiplied by two. + */ +#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) +#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) +#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */ +/* + * This is the number of cycles out of the backlight modulation cycle for which + * the backlight is on. + * + * This field must be no greater than the number of cycles in the complete + * backlight modulation cycle. + */ +#define BACKLIGHT_DUTY_CYCLE_SHIFT (0) +#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) +#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) +#define BLM_POLARITY_PNV (1 << 0) /* pnv only */ + +#define BLC_HIST_CTL (dev_priv->info->display_mmio_offset + 0x61260) + +/* New registers for PCH-split platforms. Safe where new bits show up, the + * register layout machtes with gen4 BLC_PWM_CTL[12]. */ +#define BLC_PWM_CPU_CTL2 0x48250 +#define BLC_PWM_CPU_CTL 0x48254 + +/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is + * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ +#define BLC_PWM_PCH_CTL1 0xc8250 +#define BLM_PCH_PWM_ENABLE (1 << 31) +#define BLM_PCH_OVERRIDE_ENABLE (1 << 30) +#define BLM_PCH_POLARITY (1 << 29) +#define BLC_PWM_PCH_CTL2 0xc8254 + +/* TV port control */ +#define TV_CTL 0x68000 +/** Enables the TV encoder */ +# define TV_ENC_ENABLE (1 << 31) +/** Sources the TV encoder input from pipe B instead of A. */ +# define TV_ENC_PIPEB_SELECT (1 << 30) +/** Outputs composite video (DAC A only) */ +# define TV_ENC_OUTPUT_COMPOSITE (0 << 28) +/** Outputs SVideo video (DAC B/C) */ +# define TV_ENC_OUTPUT_SVIDEO (1 << 28) +/** Outputs Component video (DAC A/B/C) */ +# define TV_ENC_OUTPUT_COMPONENT (2 << 28) +/** Outputs Composite and SVideo (DAC A/B/C) */ +# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28) +# define TV_TRILEVEL_SYNC (1 << 21) +/** Enables slow sync generation (945GM only) */ +# define TV_SLOW_SYNC (1 << 20) +/** Selects 4x oversampling for 480i and 576p */ +# define TV_OVERSAMPLE_4X (0 << 18) +/** Selects 2x oversampling for 720p and 1080i */ +# define TV_OVERSAMPLE_2X (1 << 18) +/** Selects no oversampling for 1080p */ +# define TV_OVERSAMPLE_NONE (2 << 18) +/** Selects 8x oversampling */ +# define TV_OVERSAMPLE_8X (3 << 18) +/** Selects progressive mode rather than interlaced */ +# define TV_PROGRESSIVE (1 << 17) +/** Sets the colorburst to PAL mode. Required for non-M PAL modes. */ +# define TV_PAL_BURST (1 << 16) +/** Field for setting delay of Y compared to C */ +# define TV_YC_SKEW_MASK (7 << 12) +/** Enables a fix for 480p/576p standard definition modes on the 915GM only */ +# define TV_ENC_SDP_FIX (1 << 11) +/** + * Enables a fix for the 915GM only. + * + * Not sure what it does. + */ +# define TV_ENC_C0_FIX (1 << 10) +/** Bits that must be preserved by software */ +# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf) +# define TV_FUSE_STATE_MASK (3 << 4) +/** Read-only state that reports all features enabled */ +# define TV_FUSE_STATE_ENABLED (0 << 4) +/** Read-only state that reports that Macrovision is disabled in hardware*/ +# define TV_FUSE_STATE_NO_MACROVISION (1 << 4) +/** Read-only state that reports that TV-out is disabled in hardware. */ +# define TV_FUSE_STATE_DISABLED (2 << 4) +/** Normal operation */ +# define TV_TEST_MODE_NORMAL (0 << 0) +/** Encoder test pattern 1 - combo pattern */ +# define TV_TEST_MODE_PATTERN_1 (1 << 0) +/** Encoder test pattern 2 - full screen vertical 75% color bars */ +# define TV_TEST_MODE_PATTERN_2 (2 << 0) +/** Encoder test pattern 3 - full screen horizontal 75% color bars */ +# define TV_TEST_MODE_PATTERN_3 (3 << 0) +/** Encoder test pattern 4 - random noise */ +# define TV_TEST_MODE_PATTERN_4 (4 << 0) +/** Encoder test pattern 5 - linear color ramps */ +# define TV_TEST_MODE_PATTERN_5 (5 << 0) +/** + * This test mode forces the DACs to 50% of full output. + * + * This is used for load detection in combination with TVDAC_SENSE_MASK + */ +# define TV_TEST_MODE_MONITOR_DETECT (7 << 0) +# define TV_TEST_MODE_MASK (7 << 0) + +#define TV_DAC 0x68004 +# define TV_DAC_SAVE 0x00ffff00 +/** + * Reports that DAC state change logic has reported change (RO). + * + * This gets cleared when TV_DAC_STATE_EN is cleared +*/ +# define TVDAC_STATE_CHG (1 << 31) +# define TVDAC_SENSE_MASK (7 << 28) +/** Reports that DAC A voltage is above the detect threshold */ +# define TVDAC_A_SENSE (1 << 30) +/** Reports that DAC B voltage is above the detect threshold */ +# define TVDAC_B_SENSE (1 << 29) +/** Reports that DAC C voltage is above the detect threshold */ +# define TVDAC_C_SENSE (1 << 28) +/** + * Enables DAC state detection logic, for load-based TV detection. + * + * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set + * to off, for load detection to work. + */ +# define TVDAC_STATE_CHG_EN (1 << 27) +/** Sets the DAC A sense value to high */ +# define TVDAC_A_SENSE_CTL (1 << 26) +/** Sets the DAC B sense value to high */ +# define TVDAC_B_SENSE_CTL (1 << 25) +/** Sets the DAC C sense value to high */ +# define TVDAC_C_SENSE_CTL (1 << 24) +/** Overrides the ENC_ENABLE and DAC voltage levels */ +# define DAC_CTL_OVERRIDE (1 << 7) +/** Sets the slew rate. Must be preserved in software */ +# define ENC_TVDAC_SLEW_FAST (1 << 6) +# define DAC_A_1_3_V (0 << 4) +# define DAC_A_1_1_V (1 << 4) +# define DAC_A_0_7_V (2 << 4) +# define DAC_A_MASK (3 << 4) +# define DAC_B_1_3_V (0 << 2) +# define DAC_B_1_1_V (1 << 2) +# define DAC_B_0_7_V (2 << 2) +# define DAC_B_MASK (3 << 2) +# define DAC_C_1_3_V (0 << 0) +# define DAC_C_1_1_V (1 << 0) +# define DAC_C_0_7_V (2 << 0) +# define DAC_C_MASK (3 << 0) + +/** + * CSC coefficients are stored in a floating point format with 9 bits of + * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n, + * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with + * -1 (0x3) being the only legal negative value. + */ +#define TV_CSC_Y 0x68010 +# define TV_RY_MASK 0x07ff0000 +# define TV_RY_SHIFT 16 +# define TV_GY_MASK 0x00000fff +# define TV_GY_SHIFT 0 + +#define TV_CSC_Y2 0x68014 +# define TV_BY_MASK 0x07ff0000 +# define TV_BY_SHIFT 16 +/** + * Y attenuation for component video. + * + * Stored in 1.9 fixed point. + */ +# define TV_AY_MASK 0x000003ff +# define TV_AY_SHIFT 0 + +#define TV_CSC_U 0x68018 +# define TV_RU_MASK 0x07ff0000 +# define TV_RU_SHIFT 16 +# define TV_GU_MASK 0x000007ff +# define TV_GU_SHIFT 0 + +#define TV_CSC_U2 0x6801c +# define TV_BU_MASK 0x07ff0000 +# define TV_BU_SHIFT 16 +/** + * U attenuation for component video. + * + * Stored in 1.9 fixed point. + */ +# define TV_AU_MASK 0x000003ff +# define TV_AU_SHIFT 0 + +#define TV_CSC_V 0x68020 +# define TV_RV_MASK 0x0fff0000 +# define TV_RV_SHIFT 16 +# define TV_GV_MASK 0x000007ff +# define TV_GV_SHIFT 0 + +#define TV_CSC_V2 0x68024 +# define TV_BV_MASK 0x07ff0000 +# define TV_BV_SHIFT 16 +/** + * V attenuation for component video. + * + * Stored in 1.9 fixed point. + */ +# define TV_AV_MASK 0x000007ff +# define TV_AV_SHIFT 0 + +#define TV_CLR_KNOBS 0x68028 +/** 2s-complement brightness adjustment */ +# define TV_BRIGHTNESS_MASK 0xff000000 +# define TV_BRIGHTNESS_SHIFT 24 +/** Contrast adjustment, as a 2.6 unsigned floating point number */ +# define TV_CONTRAST_MASK 0x00ff0000 +# define TV_CONTRAST_SHIFT 16 +/** Saturation adjustment, as a 2.6 unsigned floating point number */ +# define TV_SATURATION_MASK 0x0000ff00 +# define TV_SATURATION_SHIFT 8 +/** Hue adjustment, as an integer phase angle in degrees */ +# define TV_HUE_MASK 0x000000ff +# define TV_HUE_SHIFT 0 + +#define TV_CLR_LEVEL 0x6802c +/** Controls the DAC level for black */ +# define TV_BLACK_LEVEL_MASK 0x01ff0000 +# define TV_BLACK_LEVEL_SHIFT 16 +/** Controls the DAC level for blanking */ +# define TV_BLANK_LEVEL_MASK 0x000001ff +# define TV_BLANK_LEVEL_SHIFT 0 + +#define TV_H_CTL_1 0x68030 +/** Number of pixels in the hsync. */ +# define TV_HSYNC_END_MASK 0x1fff0000 +# define TV_HSYNC_END_SHIFT 16 +/** Total number of pixels minus one in the line (display and blanking). */ +# define TV_HTOTAL_MASK 0x00001fff +# define TV_HTOTAL_SHIFT 0 + +#define TV_H_CTL_2 0x68034 +/** Enables the colorburst (needed for non-component color) */ +# define TV_BURST_ENA (1 << 31) +/** Offset of the colorburst from the start of hsync, in pixels minus one. */ +# define TV_HBURST_START_SHIFT 16 +# define TV_HBURST_START_MASK 0x1fff0000 +/** Length of the colorburst */ +# define TV_HBURST_LEN_SHIFT 0 +# define TV_HBURST_LEN_MASK 0x0001fff + +#define TV_H_CTL_3 0x68038 +/** End of hblank, measured in pixels minus one from start of hsync */ +# define TV_HBLANK_END_SHIFT 16 +# define TV_HBLANK_END_MASK 0x1fff0000 +/** Start of hblank, measured in pixels minus one from start of hsync */ +# define TV_HBLANK_START_SHIFT 0 +# define TV_HBLANK_START_MASK 0x0001fff + +#define TV_V_CTL_1 0x6803c +/** XXX */ +# define TV_NBR_END_SHIFT 16 +# define TV_NBR_END_MASK 0x07ff0000 +/** XXX */ +# define TV_VI_END_F1_SHIFT 8 +# define TV_VI_END_F1_MASK 0x00003f00 +/** XXX */ +# define TV_VI_END_F2_SHIFT 0 +# define TV_VI_END_F2_MASK 0x0000003f + +#define TV_V_CTL_2 0x68040 +/** Length of vsync, in half lines */ +# define TV_VSYNC_LEN_MASK 0x07ff0000 +# define TV_VSYNC_LEN_SHIFT 16 +/** Offset of the start of vsync in field 1, measured in one less than the + * number of half lines. + */ +# define TV_VSYNC_START_F1_MASK 0x00007f00 +# define TV_VSYNC_START_F1_SHIFT 8 +/** + * Offset of the start of vsync in field 2, measured in one less than the + * number of half lines. + */ +# define TV_VSYNC_START_F2_MASK 0x0000007f +# define TV_VSYNC_START_F2_SHIFT 0 + +#define TV_V_CTL_3 0x68044 +/** Enables generation of the equalization signal */ +# define TV_EQUAL_ENA (1 << 31) +/** Length of vsync, in half lines */ +# define TV_VEQ_LEN_MASK 0x007f0000 +# define TV_VEQ_LEN_SHIFT 16 +/** Offset of the start of equalization in field 1, measured in one less than + * the number of half lines. + */ +# define TV_VEQ_START_F1_MASK 0x0007f00 +# define TV_VEQ_START_F1_SHIFT 8 +/** + * Offset of the start of equalization in field 2, measured in one less than + * the number of half lines. + */ +# define TV_VEQ_START_F2_MASK 0x000007f +# define TV_VEQ_START_F2_SHIFT 0 + +#define TV_V_CTL_4 0x68048 +/** + * Offset to start of vertical colorburst, measured in one less than the + * number of lines from vertical start. + */ +# define TV_VBURST_START_F1_MASK 0x003f0000 +# define TV_VBURST_START_F1_SHIFT 16 +/** + * Offset to the end of vertical colorburst, measured in one less than the + * number of lines from the start of NBR. + */ +# define TV_VBURST_END_F1_MASK 0x000000ff +# define TV_VBURST_END_F1_SHIFT 0 + +#define TV_V_CTL_5 0x6804c +/** + * Offset to start of vertical colorburst, measured in one less than the + * number of lines from vertical start. + */ +# define TV_VBURST_START_F2_MASK 0x003f0000 +# define TV_VBURST_START_F2_SHIFT 16 +/** + * Offset to the end of vertical colorburst, measured in one less than the + * number of lines from the start of NBR. + */ +# define TV_VBURST_END_F2_MASK 0x000000ff +# define TV_VBURST_END_F2_SHIFT 0 + +#define TV_V_CTL_6 0x68050 +/** + * Offset to start of vertical colorburst, measured in one less than the + * number of lines from vertical start. + */ +# define TV_VBURST_START_F3_MASK 0x003f0000 +# define TV_VBURST_START_F3_SHIFT 16 +/** + * Offset to the end of vertical colorburst, measured in one less than the + * number of lines from the start of NBR. + */ +# define TV_VBURST_END_F3_MASK 0x000000ff +# define TV_VBURST_END_F3_SHIFT 0 + +#define TV_V_CTL_7 0x68054 +/** + * Offset to start of vertical colorburst, measured in one less than the + * number of lines from vertical start. + */ +# define TV_VBURST_START_F4_MASK 0x003f0000 +# define TV_VBURST_START_F4_SHIFT 16 +/** + * Offset to the end of vertical colorburst, measured in one less than the + * number of lines from the start of NBR. + */ +# define TV_VBURST_END_F4_MASK 0x000000ff +# define TV_VBURST_END_F4_SHIFT 0 + +#define TV_SC_CTL_1 0x68060 +/** Turns on the first subcarrier phase generation DDA */ +# define TV_SC_DDA1_EN (1 << 31) +/** Turns on the first subcarrier phase generation DDA */ +# define TV_SC_DDA2_EN (1 << 30) +/** Turns on the first subcarrier phase generation DDA */ +# define TV_SC_DDA3_EN (1 << 29) +/** Sets the subcarrier DDA to reset frequency every other field */ +# define TV_SC_RESET_EVERY_2 (0 << 24) +/** Sets the subcarrier DDA to reset frequency every fourth field */ +# define TV_SC_RESET_EVERY_4 (1 << 24) +/** Sets the subcarrier DDA to reset frequency every eighth field */ +# define TV_SC_RESET_EVERY_8 (2 << 24) +/** Sets the subcarrier DDA to never reset the frequency */ +# define TV_SC_RESET_NEVER (3 << 24) +/** Sets the peak amplitude of the colorburst.*/ +# define TV_BURST_LEVEL_MASK 0x00ff0000 +# define TV_BURST_LEVEL_SHIFT 16 +/** Sets the increment of the first subcarrier phase generation DDA */ +# define TV_SCDDA1_INC_MASK 0x00000fff +# define TV_SCDDA1_INC_SHIFT 0 + +#define TV_SC_CTL_2 0x68064 +/** Sets the rollover for the second subcarrier phase generation DDA */ +# define TV_SCDDA2_SIZE_MASK 0x7fff0000 +# define TV_SCDDA2_SIZE_SHIFT 16 +/** Sets the increent of the second subcarrier phase generation DDA */ +# define TV_SCDDA2_INC_MASK 0x00007fff +# define TV_SCDDA2_INC_SHIFT 0 + +#define TV_SC_CTL_3 0x68068 +/** Sets the rollover for the third subcarrier phase generation DDA */ +# define TV_SCDDA3_SIZE_MASK 0x7fff0000 +# define TV_SCDDA3_SIZE_SHIFT 16 +/** Sets the increent of the third subcarrier phase generation DDA */ +# define TV_SCDDA3_INC_MASK 0x00007fff +# define TV_SCDDA3_INC_SHIFT 0 + +#define TV_WIN_POS 0x68070 +/** X coordinate of the display from the start of horizontal active */ +# define TV_XPOS_MASK 0x1fff0000 +# define TV_XPOS_SHIFT 16 +/** Y coordinate of the display from the start of vertical active (NBR) */ +# define TV_YPOS_MASK 0x00000fff +# define TV_YPOS_SHIFT 0 + +#define TV_WIN_SIZE 0x68074 +/** Horizontal size of the display window, measured in pixels*/ +# define TV_XSIZE_MASK 0x1fff0000 +# define TV_XSIZE_SHIFT 16 +/** + * Vertical size of the display window, measured in pixels. + * + * Must be even for interlaced modes. + */ +# define TV_YSIZE_MASK 0x00000fff +# define TV_YSIZE_SHIFT 0 + +#define TV_FILTER_CTL_1 0x68080 +/** + * Enables automatic scaling calculation. + * + * If set, the rest of the registers are ignored, and the calculated values can + * be read back from the register. + */ +# define TV_AUTO_SCALE (1 << 31) +/** + * Disables the vertical filter. + * + * This is required on modes more than 1024 pixels wide */ +# define TV_V_FILTER_BYPASS (1 << 29) +/** Enables adaptive vertical filtering */ +# define TV_VADAPT (1 << 28) +# define TV_VADAPT_MODE_MASK (3 << 26) +/** Selects the least adaptive vertical filtering mode */ +# define TV_VADAPT_MODE_LEAST (0 << 26) +/** Selects the moderately adaptive vertical filtering mode */ +# define TV_VADAPT_MODE_MODERATE (1 << 26) +/** Selects the most adaptive vertical filtering mode */ +# define TV_VADAPT_MODE_MOST (3 << 26) +/** + * Sets the horizontal scaling factor. + * + * This should be the fractional part of the horizontal scaling factor divided + * by the oversampling rate. TV_HSCALE should be less than 1, and set to: + * + * (src width - 1) / ((oversample * dest width) - 1) + */ +# define TV_HSCALE_FRAC_MASK 0x00003fff +# define TV_HSCALE_FRAC_SHIFT 0 + +#define TV_FILTER_CTL_2 0x68084 +/** + * Sets the integer part of the 3.15 fixed-point vertical scaling factor. + * + * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1) + */ +# define TV_VSCALE_INT_MASK 0x00038000 +# define TV_VSCALE_INT_SHIFT 15 +/** + * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. + * + * \sa TV_VSCALE_INT_MASK + */ +# define TV_VSCALE_FRAC_MASK 0x00007fff +# define TV_VSCALE_FRAC_SHIFT 0 + +#define TV_FILTER_CTL_3 0x68088 +/** + * Sets the integer part of the 3.15 fixed-point vertical scaling factor. + * + * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1)) + * + * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. + */ +# define TV_VSCALE_IP_INT_MASK 0x00038000 +# define TV_VSCALE_IP_INT_SHIFT 15 +/** + * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. + * + * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. + * + * \sa TV_VSCALE_IP_INT_MASK + */ +# define TV_VSCALE_IP_FRAC_MASK 0x00007fff +# define TV_VSCALE_IP_FRAC_SHIFT 0 + +#define TV_CC_CONTROL 0x68090 +# define TV_CC_ENABLE (1 << 31) +/** + * Specifies which field to send the CC data in. + * + * CC data is usually sent in field 0. + */ +# define TV_CC_FID_MASK (1 << 27) +# define TV_CC_FID_SHIFT 27 +/** Sets the horizontal position of the CC data. Usually 135. */ +# define TV_CC_HOFF_MASK 0x03ff0000 +# define TV_CC_HOFF_SHIFT 16 +/** Sets the vertical position of the CC data. Usually 21 */ +# define TV_CC_LINE_MASK 0x0000003f +# define TV_CC_LINE_SHIFT 0 + +#define TV_CC_DATA 0x68094 +# define TV_CC_RDY (1 << 31) +/** Second word of CC data to be transmitted. */ +# define TV_CC_DATA_2_MASK 0x007f0000 +# define TV_CC_DATA_2_SHIFT 16 +/** First word of CC data to be transmitted. */ +# define TV_CC_DATA_1_MASK 0x0000007f +# define TV_CC_DATA_1_SHIFT 0 + +#define TV_H_LUMA_0 0x68100 +#define TV_H_LUMA_59 0x681ec +#define TV_H_CHROMA_0 0x68200 +#define TV_H_CHROMA_59 0x682ec +#define TV_V_LUMA_0 0x68300 +#define TV_V_LUMA_42 0x683a8 +#define TV_V_CHROMA_0 0x68400 +#define TV_V_CHROMA_42 0x684a8 + +/* Display Port */ +#define DP_A 0x64000 /* eDP */ +#define DP_B 0x64100 +#define DP_C 0x64200 +#define DP_D 0x64300 + +#define DP_PORT_EN (1 << 31) +#define DP_PIPEB_SELECT (1 << 30) +#define DP_PIPE_MASK (1 << 30) + +/* Link training mode - select a suitable mode for each stage */ +#define DP_LINK_TRAIN_PAT_1 (0 << 28) +#define DP_LINK_TRAIN_PAT_2 (1 << 28) +#define DP_LINK_TRAIN_PAT_IDLE (2 << 28) +#define DP_LINK_TRAIN_OFF (3 << 28) +#define DP_LINK_TRAIN_MASK (3 << 28) +#define DP_LINK_TRAIN_SHIFT 28 + +/* CPT Link training mode */ +#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8) +#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8) +#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8) +#define DP_LINK_TRAIN_OFF_CPT (3 << 8) +#define DP_LINK_TRAIN_MASK_CPT (7 << 8) +#define DP_LINK_TRAIN_SHIFT_CPT 8 + +/* Signal voltages. These are mostly controlled by the other end */ +#define DP_VOLTAGE_0_4 (0 << 25) +#define DP_VOLTAGE_0_6 (1 << 25) +#define DP_VOLTAGE_0_8 (2 << 25) +#define DP_VOLTAGE_1_2 (3 << 25) +#define DP_VOLTAGE_MASK (7 << 25) +#define DP_VOLTAGE_SHIFT 25 + +/* Signal pre-emphasis levels, like voltages, the other end tells us what + * they want + */ +#define DP_PRE_EMPHASIS_0 (0 << 22) +#define DP_PRE_EMPHASIS_3_5 (1 << 22) +#define DP_PRE_EMPHASIS_6 (2 << 22) +#define DP_PRE_EMPHASIS_9_5 (3 << 22) +#define DP_PRE_EMPHASIS_MASK (7 << 22) +#define DP_PRE_EMPHASIS_SHIFT 22 + +/* How many wires to use. I guess 3 was too hard */ +#define DP_PORT_WIDTH_1 (0 << 19) +#define DP_PORT_WIDTH_2 (1 << 19) +#define DP_PORT_WIDTH_4 (3 << 19) +#define DP_PORT_WIDTH_MASK (7 << 19) + +/* Mystic DPCD version 1.1 special mode */ +#define DP_ENHANCED_FRAMING (1 << 18) + +/* eDP */ +#define DP_PLL_FREQ_270MHZ (0 << 16) +#define DP_PLL_FREQ_160MHZ (1 << 16) +#define DP_PLL_FREQ_MASK (3 << 16) + +/** locked once port is enabled */ +#define DP_PORT_REVERSAL (1 << 15) + +/* eDP */ +#define DP_PLL_ENABLE (1 << 14) + +/** sends the clock on lane 15 of the PEG for debug */ +#define DP_CLOCK_OUTPUT_ENABLE (1 << 13) + +#define DP_SCRAMBLING_DISABLE (1 << 12) +#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7) + +/** limit RGB values to avoid confusing TVs */ +#define DP_COLOR_RANGE_16_235 (1 << 8) + +/** Turn on the audio link */ +#define DP_AUDIO_OUTPUT_ENABLE (1 << 6) + +/** vs and hs sync polarity */ +#define DP_SYNC_VS_HIGH (1 << 4) +#define DP_SYNC_HS_HIGH (1 << 3) + +/** A fantasy */ +#define DP_DETECTED (1 << 2) + +/** The aux channel provides a way to talk to the + * signal sink for DDC etc. Max packet size supported + * is 20 bytes in each direction, hence the 5 fixed + * data registers + */ +#define DPA_AUX_CH_CTL 0x64010 +#define DPA_AUX_CH_DATA1 0x64014 +#define DPA_AUX_CH_DATA2 0x64018 +#define DPA_AUX_CH_DATA3 0x6401c +#define DPA_AUX_CH_DATA4 0x64020 +#define DPA_AUX_CH_DATA5 0x64024 + +#define DPB_AUX_CH_CTL 0x64110 +#define DPB_AUX_CH_DATA1 0x64114 +#define DPB_AUX_CH_DATA2 0x64118 +#define DPB_AUX_CH_DATA3 0x6411c +#define DPB_AUX_CH_DATA4 0x64120 +#define DPB_AUX_CH_DATA5 0x64124 + +#define DPC_AUX_CH_CTL 0x64210 +#define DPC_AUX_CH_DATA1 0x64214 +#define DPC_AUX_CH_DATA2 0x64218 +#define DPC_AUX_CH_DATA3 0x6421c +#define DPC_AUX_CH_DATA4 0x64220 +#define DPC_AUX_CH_DATA5 0x64224 + +#define DPD_AUX_CH_CTL 0x64310 +#define DPD_AUX_CH_DATA1 0x64314 +#define DPD_AUX_CH_DATA2 0x64318 +#define DPD_AUX_CH_DATA3 0x6431c +#define DPD_AUX_CH_DATA4 0x64320 +#define DPD_AUX_CH_DATA5 0x64324 + +#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31) +#define DP_AUX_CH_CTL_DONE (1 << 30) +#define DP_AUX_CH_CTL_INTERRUPT (1 << 29) +#define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28) +#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26) +#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26) +#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26) +#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26) +#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26) +#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25) +#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20) +#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20 +#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16) +#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16 +#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15) +#define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14) +#define DP_AUX_CH_CTL_SYNC_TEST (1 << 13) +#define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12) +#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11) +#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff) +#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0 + +/* + * Computing GMCH M and N values for the Display Port link + * + * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes + * + * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz) + * + * The GMCH value is used internally + * + * bytes_per_pixel is the number of bytes coming out of the plane, + * which is after the LUTs, so we want the bytes for our color format. + * For our current usage, this is always 3, one byte for R, G and B. + */ +#define _PIPEA_GMCH_DATA_M 0x70050 +#define _PIPEB_GMCH_DATA_M 0x71050 + +/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ +#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ +#define TU_SIZE_MASK (0x3f << 25) + +#define DATA_LINK_M_N_MASK (0xffffff) +#define DATA_LINK_N_MAX (0x800000) + +#define _PIPEA_GMCH_DATA_N 0x70054 +#define _PIPEB_GMCH_DATA_N 0x71054 + +/* + * Computing Link M and N values for the Display Port link + * + * Link M / N = pixel_clock / ls_clk + * + * (the DP spec calls pixel_clock the 'strm_clk') + * + * The Link value is transmitted in the Main Stream + * Attributes and VB-ID. + */ + +#define _PIPEA_DP_LINK_M 0x70060 +#define _PIPEB_DP_LINK_M 0x71060 + +#define _PIPEA_DP_LINK_N 0x70064 +#define _PIPEB_DP_LINK_N 0x71064 + +#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M) +#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N) +#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M) +#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N) + +/* Display & cursor control */ + +/* Pipe A */ +#define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000) +#define DSL_LINEMASK_GEN2 0x00000fff +#define DSL_LINEMASK_GEN3 0x00001fff +#define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008) +#define PIPECONF_ENABLE (1<<31) +#define PIPECONF_DISABLE 0 +#define PIPECONF_DOUBLE_WIDE (1<<30) +#define I965_PIPECONF_ACTIVE (1<<30) +#define PIPECONF_FRAME_START_DELAY_MASK (3<<27) +#define PIPECONF_SINGLE_WIDE 0 +#define PIPECONF_PIPE_UNLOCKED 0 +#define PIPECONF_PIPE_LOCKED (1<<25) +#define PIPECONF_PALETTE 0 +#define PIPECONF_GAMMA (1<<24) +#define PIPECONF_FORCE_BORDER (1<<25) +#define PIPECONF_INTERLACE_MASK (7 << 21) +#define PIPECONF_INTERLACE_MASK_HSW (3 << 21) +/* Note that pre-gen3 does not support interlaced display directly. Panel + * fitting must be disabled on pre-ilk for interlaced. */ +#define PIPECONF_PROGRESSIVE (0 << 21) +#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL (4 << 21) /* gen4 only */ +#define PIPECONF_INTERLACE_W_SYNC_SHIFT (5 << 21) /* gen4 only */ +#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) +#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) /* gen3 only */ +/* Ironlake and later have a complete new set of values for interlaced. PFIT + * means panel fitter required, PF means progressive fetch, DBL means power + * saving pixel doubling. */ +#define PIPECONF_PFIT_PF_INTERLACED_ILK (1 << 21) +#define PIPECONF_INTERLACED_ILK (3 << 21) +#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ +#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ +#define PIPECONF_CXSR_DOWNCLOCK (1<<16) +#define PIPECONF_COLOR_RANGE_SELECT (1 << 13) +#define PIPECONF_BPC_MASK (0x7 << 5) +#define PIPECONF_8BPC (0<<5) +#define PIPECONF_10BPC (1<<5) +#define PIPECONF_6BPC (2<<5) +#define PIPECONF_12BPC (3<<5) +#define PIPECONF_DITHER_EN (1<<4) +#define PIPECONF_DITHER_TYPE_MASK (0x0000000c) +#define PIPECONF_DITHER_TYPE_SP (0<<2) +#define PIPECONF_DITHER_TYPE_ST1 (1<<2) +#define PIPECONF_DITHER_TYPE_ST2 (2<<2) +#define PIPECONF_DITHER_TYPE_TEMP (3<<2) +#define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024) +#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) +#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30) +#define PIPE_CRC_ERROR_ENABLE (1UL<<29) +#define PIPE_CRC_DONE_ENABLE (1UL<<28) +#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) +#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26) +#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26) +#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) +#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) +#define PIPE_DPST_EVENT_ENABLE (1UL<<23) +#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22) +#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) +#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) +#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) +#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ +#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ +#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) +#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16) +#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) +#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15) +#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14) +#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) +#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) +#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) +#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10) +#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) +#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) +#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) +#define PIPE_DPST_EVENT_STATUS (1UL<<7) +#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) +#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) +#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) +#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */ +#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ +#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) +#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) + +#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) +#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF) +#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) +#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) +#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) +#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) + +#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028) +#define PIPEB_LINE_COMPARE_INT_EN (1<<29) +#define PIPEB_HLINE_INT_EN (1<<28) +#define PIPEB_VBLANK_INT_EN (1<<27) +#define SPRITED_FLIPDONE_INT_EN (1<<26) +#define SPRITEC_FLIPDONE_INT_EN (1<<25) +#define PLANEB_FLIPDONE_INT_EN (1<<24) +#define PIPEA_LINE_COMPARE_INT_EN (1<<21) +#define PIPEA_HLINE_INT_EN (1<<20) +#define PIPEA_VBLANK_INT_EN (1<<19) +#define SPRITEB_FLIPDONE_INT_EN (1<<18) +#define SPRITEA_FLIPDONE_INT_EN (1<<17) +#define PLANEA_FLIPDONE_INT_EN (1<<16) + +#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */ +#define CURSORB_INVALID_GTT_INT_EN (1<<23) +#define CURSORA_INVALID_GTT_INT_EN (1<<22) +#define SPRITED_INVALID_GTT_INT_EN (1<<21) +#define SPRITEC_INVALID_GTT_INT_EN (1<<20) +#define PLANEB_INVALID_GTT_INT_EN (1<<19) +#define SPRITEB_INVALID_GTT_INT_EN (1<<18) +#define SPRITEA_INVALID_GTT_INT_EN (1<<17) +#define PLANEA_INVALID_GTT_INT_EN (1<<16) +#define DPINVGTT_EN_MASK 0xff0000 +#define CURSORB_INVALID_GTT_STATUS (1<<7) +#define CURSORA_INVALID_GTT_STATUS (1<<6) +#define SPRITED_INVALID_GTT_STATUS (1<<5) +#define SPRITEC_INVALID_GTT_STATUS (1<<4) +#define PLANEB_INVALID_GTT_STATUS (1<<3) +#define SPRITEB_INVALID_GTT_STATUS (1<<2) +#define SPRITEA_INVALID_GTT_STATUS (1<<1) +#define PLANEA_INVALID_GTT_STATUS (1<<0) +#define DPINVGTT_STATUS_MASK 0xff + +#define DSPARB 0x70030 +#define DSPARB_CSTART_MASK (0x7f << 7) +#define DSPARB_CSTART_SHIFT 7 +#define DSPARB_BSTART_MASK (0x7f) +#define DSPARB_BSTART_SHIFT 0 +#define DSPARB_BEND_SHIFT 9 /* on 855 */ +#define DSPARB_AEND_SHIFT 0 + +#define DSPFW1 (dev_priv->info->display_mmio_offset + 0x70034) +#define DSPFW_SR_SHIFT 23 +#define DSPFW_SR_MASK (0x1ff<<23) +#define DSPFW_CURSORB_SHIFT 16 +#define DSPFW_CURSORB_MASK (0x3f<<16) +#define DSPFW_PLANEB_SHIFT 8 +#define DSPFW_PLANEB_MASK (0x7f<<8) +#define DSPFW_PLANEA_MASK (0x7f) +#define DSPFW2 (dev_priv->info->display_mmio_offset + 0x70038) +#define DSPFW_CURSORA_MASK 0x00003f00 +#define DSPFW_CURSORA_SHIFT 8 +#define DSPFW_PLANEC_MASK (0x7f) +#define DSPFW3 (dev_priv->info->display_mmio_offset + 0x7003c) +#define DSPFW_HPLL_SR_EN (1<<31) +#define DSPFW_CURSOR_SR_SHIFT 24 +#define PINEVIEW_SELF_REFRESH_EN (1<<30) +#define DSPFW_CURSOR_SR_MASK (0x3f<<24) +#define DSPFW_HPLL_CURSOR_SHIFT 16 +#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) +#define DSPFW_HPLL_SR_MASK (0x1ff) +#define DSPFW4 (dev_priv->info->display_mmio_offset + 0x70070) +#define DSPFW7 (dev_priv->info->display_mmio_offset + 0x7007c) + +/* drain latency register values*/ +#define DRAIN_LATENCY_PRECISION_32 32 +#define DRAIN_LATENCY_PRECISION_16 16 +#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050) +#define DDL_CURSORA_PRECISION_32 (1<<31) +#define DDL_CURSORA_PRECISION_16 (0<<31) +#define DDL_CURSORA_SHIFT 24 +#define DDL_PLANEA_PRECISION_32 (1<<7) +#define DDL_PLANEA_PRECISION_16 (0<<7) +#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054) +#define DDL_CURSORB_PRECISION_32 (1<<31) +#define DDL_CURSORB_PRECISION_16 (0<<31) +#define DDL_CURSORB_SHIFT 24 +#define DDL_PLANEB_PRECISION_32 (1<<7) +#define DDL_PLANEB_PRECISION_16 (0<<7) + +/* FIFO watermark sizes etc */ +#define G4X_FIFO_LINE_SIZE 64 +#define I915_FIFO_LINE_SIZE 64 +#define I830_FIFO_LINE_SIZE 32 + +#define VALLEYVIEW_FIFO_SIZE 255 +#define G4X_FIFO_SIZE 127 +#define I965_FIFO_SIZE 512 +#define I945_FIFO_SIZE 127 +#define I915_FIFO_SIZE 95 +#define I855GM_FIFO_SIZE 127 /* In cachelines */ +#define I830_FIFO_SIZE 95 + +#define VALLEYVIEW_MAX_WM 0xff +#define G4X_MAX_WM 0x3f +#define I915_MAX_WM 0x3f + +#define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */ +#define PINEVIEW_FIFO_LINE_SIZE 64 +#define PINEVIEW_MAX_WM 0x1ff +#define PINEVIEW_DFT_WM 0x3f +#define PINEVIEW_DFT_HPLLOFF_WM 0 +#define PINEVIEW_GUARD_WM 10 +#define PINEVIEW_CURSOR_FIFO 64 +#define PINEVIEW_CURSOR_MAX_WM 0x3f +#define PINEVIEW_CURSOR_DFT_WM 0 +#define PINEVIEW_CURSOR_GUARD_WM 5 + +#define VALLEYVIEW_CURSOR_MAX_WM 64 +#define I965_CURSOR_FIFO 64 +#define I965_CURSOR_MAX_WM 32 +#define I965_CURSOR_DFT_WM 8 + +/* define the Watermark register on Ironlake */ +#define WM0_PIPEA_ILK 0x45100 +#define WM0_PIPE_PLANE_MASK (0x7f<<16) +#define WM0_PIPE_PLANE_SHIFT 16 +#define WM0_PIPE_SPRITE_MASK (0x3f<<8) +#define WM0_PIPE_SPRITE_SHIFT 8 +#define WM0_PIPE_CURSOR_MASK (0x1f) + +#define WM0_PIPEB_ILK 0x45104 +#define WM0_PIPEC_IVB 0x45200 +#define WM1_LP_ILK 0x45108 +#define WM1_LP_SR_EN (1<<31) +#define WM1_LP_LATENCY_SHIFT 24 +#define WM1_LP_LATENCY_MASK (0x7f<<24) +#define WM1_LP_FBC_MASK (0xf<<20) +#define WM1_LP_FBC_SHIFT 20 +#define WM1_LP_SR_MASK (0x1ff<<8) +#define WM1_LP_SR_SHIFT 8 +#define WM1_LP_CURSOR_MASK (0x3f) +#define WM2_LP_ILK 0x4510c +#define WM2_LP_EN (1<<31) +#define WM3_LP_ILK 0x45110 +#define WM3_LP_EN (1<<31) +#define WM1S_LP_ILK 0x45120 +#define WM2S_LP_IVB 0x45124 +#define WM3S_LP_IVB 0x45128 +#define WM1S_LP_EN (1<<31) + +/* Memory latency timer register */ +#define MLTR_ILK 0x11222 +#define MLTR_WM1_SHIFT 0 +#define MLTR_WM2_SHIFT 8 +/* the unit of memory self-refresh latency time is 0.5us */ +#define ILK_SRLT_MASK 0x3f +#define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK) +#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT) +#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT) + +/* define the fifo size on Ironlake */ +#define ILK_DISPLAY_FIFO 128 +#define ILK_DISPLAY_MAXWM 64 +#define ILK_DISPLAY_DFTWM 8 +#define ILK_CURSOR_FIFO 32 +#define ILK_CURSOR_MAXWM 16 +#define ILK_CURSOR_DFTWM 8 + +#define ILK_DISPLAY_SR_FIFO 512 +#define ILK_DISPLAY_MAX_SRWM 0x1ff +#define ILK_DISPLAY_DFT_SRWM 0x3f +#define ILK_CURSOR_SR_FIFO 64 +#define ILK_CURSOR_MAX_SRWM 0x3f +#define ILK_CURSOR_DFT_SRWM 8 + +#define ILK_FIFO_LINE_SIZE 64 + +/* define the WM info on Sandybridge */ +#define SNB_DISPLAY_FIFO 128 +#define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */ +#define SNB_DISPLAY_DFTWM 8 +#define SNB_CURSOR_FIFO 32 +#define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */ +#define SNB_CURSOR_DFTWM 8 + +#define SNB_DISPLAY_SR_FIFO 512 +#define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */ +#define SNB_DISPLAY_DFT_SRWM 0x3f +#define SNB_CURSOR_SR_FIFO 64 +#define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */ +#define SNB_CURSOR_DFT_SRWM 8 + +#define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */ + +#define SNB_FIFO_LINE_SIZE 64 + + +/* the address where we get all kinds of latency value */ +#define SSKPD 0x5d10 +#define SSKPD_WM_MASK 0x3f +#define SSKPD_WM0_SHIFT 0 +#define SSKPD_WM1_SHIFT 8 +#define SSKPD_WM2_SHIFT 16 +#define SSKPD_WM3_SHIFT 24 + +#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK) +#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT) +#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT) +#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT) +#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT) + +/* + * The two pipe frame counter registers are not synchronized, so + * reading a stable value is somewhat tricky. The following code + * should work: + * + * do { + * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> + * PIPE_FRAME_HIGH_SHIFT; + * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >> + * PIPE_FRAME_LOW_SHIFT); + * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> + * PIPE_FRAME_HIGH_SHIFT); + * } while (high1 != high2); + * frame = (high1 << 8) | low1; + */ +#define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040) +#define PIPE_FRAME_HIGH_MASK 0x0000ffff +#define PIPE_FRAME_HIGH_SHIFT 0 +#define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044) +#define PIPE_FRAME_LOW_MASK 0xff000000 +#define PIPE_FRAME_LOW_SHIFT 24 +#define PIPE_PIXEL_MASK 0x00ffffff +#define PIPE_PIXEL_SHIFT 0 +/* GM45+ just has to be different */ +#define _PIPEA_FRMCOUNT_GM45 0x70040 +#define _PIPEA_FLIPCOUNT_GM45 0x70044 +#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) + +/* Cursor A & B regs */ +#define _CURACNTR (dev_priv->info->display_mmio_offset + 0x70080) +/* Old style CUR*CNTR flags (desktop 8xx) */ +#define CURSOR_ENABLE 0x80000000 +#define CURSOR_GAMMA_ENABLE 0x40000000 +#define CURSOR_STRIDE_MASK 0x30000000 +#define CURSOR_PIPE_CSC_ENABLE (1<<24) +#define CURSOR_FORMAT_SHIFT 24 +#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT) +#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT) +#define CURSOR_FORMAT_3C (0x01 << CURSOR_FORMAT_SHIFT) +#define CURSOR_FORMAT_4C (0x02 << CURSOR_FORMAT_SHIFT) +#define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT) +#define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT) +/* New style CUR*CNTR flags */ +#define CURSOR_MODE 0x27 +#define CURSOR_MODE_DISABLE 0x00 +#define CURSOR_MODE_64_32B_AX 0x07 +#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) +#define MCURSOR_PIPE_SELECT (1 << 28) +#define MCURSOR_PIPE_A 0x00 +#define MCURSOR_PIPE_B (1 << 28) +#define MCURSOR_GAMMA_ENABLE (1 << 26) +#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084) +#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088) +#define CURSOR_POS_MASK 0x007FF +#define CURSOR_POS_SIGN 0x8000 +#define CURSOR_X_SHIFT 0 +#define CURSOR_Y_SHIFT 16 +#define CURSIZE 0x700a0 +#define _CURBCNTR (dev_priv->info->display_mmio_offset + 0x700c0) +#define _CURBBASE (dev_priv->info->display_mmio_offset + 0x700c4) +#define _CURBPOS (dev_priv->info->display_mmio_offset + 0x700c8) + +#define _CURBCNTR_IVB 0x71080 +#define _CURBBASE_IVB 0x71084 +#define _CURBPOS_IVB 0x71088 + +#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR) +#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE) +#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS) + +#define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB) +#define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB) +#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) + +/* Display A control */ +#define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180) +#define DISPLAY_PLANE_ENABLE (1<<31) +#define DISPLAY_PLANE_DISABLE 0 +#define DISPPLANE_GAMMA_ENABLE (1<<30) +#define DISPPLANE_GAMMA_DISABLE 0 +#define DISPPLANE_PIXFORMAT_MASK (0xf<<26) +#define DISPPLANE_YUV422 (0x0<<26) +#define DISPPLANE_8BPP (0x2<<26) +#define DISPPLANE_BGRA555 (0x3<<26) +#define DISPPLANE_BGRX555 (0x4<<26) +#define DISPPLANE_BGRX565 (0x5<<26) +#define DISPPLANE_BGRX888 (0x6<<26) +#define DISPPLANE_BGRA888 (0x7<<26) +#define DISPPLANE_RGBX101010 (0x8<<26) +#define DISPPLANE_RGBA101010 (0x9<<26) +#define DISPPLANE_BGRX101010 (0xa<<26) +#define DISPPLANE_RGBX161616 (0xc<<26) +#define DISPPLANE_RGBX888 (0xe<<26) +#define DISPPLANE_RGBA888 (0xf<<26) +#define DISPPLANE_STEREO_ENABLE (1<<25) +#define DISPPLANE_STEREO_DISABLE 0 +#define DISPPLANE_PIPE_CSC_ENABLE (1<<24) +#define DISPPLANE_SEL_PIPE_SHIFT 24 +#define DISPPLANE_SEL_PIPE_MASK (3<info->display_mmio_offset + 0x70184) +#define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188) +#define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */ +#define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190) +#define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */ +#define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */ +#define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */ +#define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC) + +#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) +#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) +#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE) +#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS) +#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE) +#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) +#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) +#define DSPLINOFF(plane) DSPADDR(plane) +#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET) +#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE) + +/* Display/Sprite base address macros */ +#define DISP_BASEADDR_MASK (0xfffff000) +#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK) +#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK) +#define I915_MODIFY_DISPBASE(reg, gfx_addr) \ + (I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg)))) + +/* VBIOS flags */ +#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410) +#define SWF01 (dev_priv->info->display_mmio_offset + 0x71414) +#define SWF02 (dev_priv->info->display_mmio_offset + 0x71418) +#define SWF03 (dev_priv->info->display_mmio_offset + 0x7141c) +#define SWF04 (dev_priv->info->display_mmio_offset + 0x71420) +#define SWF05 (dev_priv->info->display_mmio_offset + 0x71424) +#define SWF06 (dev_priv->info->display_mmio_offset + 0x71428) +#define SWF10 (dev_priv->info->display_mmio_offset + 0x70410) +#define SWF11 (dev_priv->info->display_mmio_offset + 0x70414) +#define SWF14 (dev_priv->info->display_mmio_offset + 0x71420) +#define SWF30 (dev_priv->info->display_mmio_offset + 0x72414) +#define SWF31 (dev_priv->info->display_mmio_offset + 0x72418) +#define SWF32 (dev_priv->info->display_mmio_offset + 0x7241c) + +/* Pipe B */ +#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000) +#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008) +#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024) +#define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040) +#define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044) +#define _PIPEB_FRMCOUNT_GM45 0x71040 +#define _PIPEB_FLIPCOUNT_GM45 0x71044 + + +/* Display B control */ +#define _DSPBCNTR (dev_priv->info->display_mmio_offset + 0x71180) +#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) +#define DISPPLANE_ALPHA_TRANS_DISABLE 0 +#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 +#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) +#define _DSPBADDR (dev_priv->info->display_mmio_offset + 0x71184) +#define _DSPBSTRIDE (dev_priv->info->display_mmio_offset + 0x71188) +#define _DSPBPOS (dev_priv->info->display_mmio_offset + 0x7118C) +#define _DSPBSIZE (dev_priv->info->display_mmio_offset + 0x71190) +#define _DSPBSURF (dev_priv->info->display_mmio_offset + 0x7119C) +#define _DSPBTILEOFF (dev_priv->info->display_mmio_offset + 0x711A4) +#define _DSPBOFFSET (dev_priv->info->display_mmio_offset + 0x711A4) +#define _DSPBSURFLIVE (dev_priv->info->display_mmio_offset + 0x711AC) + +/* Sprite A control */ +#define _DVSACNTR 0x72180 +#define DVS_ENABLE (1<<31) +#define DVS_GAMMA_ENABLE (1<<30) +#define DVS_PIXFORMAT_MASK (3<<25) +#define DVS_FORMAT_YUV422 (0<<25) +#define DVS_FORMAT_RGBX101010 (1<<25) +#define DVS_FORMAT_RGBX888 (2<<25) +#define DVS_FORMAT_RGBX161616 (3<<25) +#define DVS_PIPE_CSC_ENABLE (1<<24) +#define DVS_SOURCE_KEY (1<<22) +#define DVS_RGB_ORDER_XBGR (1<<20) +#define DVS_YUV_BYTE_ORDER_MASK (3<<16) +#define DVS_YUV_ORDER_YUYV (0<<16) +#define DVS_YUV_ORDER_UYVY (1<<16) +#define DVS_YUV_ORDER_YVYU (2<<16) +#define DVS_YUV_ORDER_VYUY (3<<16) +#define DVS_DEST_KEY (1<<2) +#define DVS_TRICKLE_FEED_DISABLE (1<<14) +#define DVS_TILED (1<<10) +#define _DVSALINOFF 0x72184 +#define _DVSASTRIDE 0x72188 +#define _DVSAPOS 0x7218c +#define _DVSASIZE 0x72190 +#define _DVSAKEYVAL 0x72194 +#define _DVSAKEYMSK 0x72198 +#define _DVSASURF 0x7219c +#define _DVSAKEYMAXVAL 0x721a0 +#define _DVSATILEOFF 0x721a4 +#define _DVSASURFLIVE 0x721ac +#define _DVSASCALE 0x72204 +#define DVS_SCALE_ENABLE (1<<31) +#define DVS_FILTER_MASK (3<<29) +#define DVS_FILTER_MEDIUM (0<<29) +#define DVS_FILTER_ENHANCING (1<<29) +#define DVS_FILTER_SOFTENING (2<<29) +#define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */ +#define DVS_VERTICAL_OFFSET_ENABLE (1<<27) +#define _DVSAGAMC 0x72300 + +#define _DVSBCNTR 0x73180 +#define _DVSBLINOFF 0x73184 +#define _DVSBSTRIDE 0x73188 +#define _DVSBPOS 0x7318c +#define _DVSBSIZE 0x73190 +#define _DVSBKEYVAL 0x73194 +#define _DVSBKEYMSK 0x73198 +#define _DVSBSURF 0x7319c +#define _DVSBKEYMAXVAL 0x731a0 +#define _DVSBTILEOFF 0x731a4 +#define _DVSBSURFLIVE 0x731ac +#define _DVSBSCALE 0x73204 +#define _DVSBGAMC 0x73300 + +#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR) +#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF) +#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE) +#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS) +#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF) +#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL) +#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE) +#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE) +#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) +#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) +#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) +#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE) + +#define _SPRA_CTL 0x70280 +#define SPRITE_ENABLE (1<<31) +#define SPRITE_GAMMA_ENABLE (1<<30) +#define SPRITE_PIXFORMAT_MASK (7<<25) +#define SPRITE_FORMAT_YUV422 (0<<25) +#define SPRITE_FORMAT_RGBX101010 (1<<25) +#define SPRITE_FORMAT_RGBX888 (2<<25) +#define SPRITE_FORMAT_RGBX161616 (3<<25) +#define SPRITE_FORMAT_YUV444 (4<<25) +#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */ +#define SPRITE_PIPE_CSC_ENABLE (1<<24) +#define SPRITE_SOURCE_KEY (1<<22) +#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */ +#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19) +#define SPRITE_YUV_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */ +#define SPRITE_YUV_BYTE_ORDER_MASK (3<<16) +#define SPRITE_YUV_ORDER_YUYV (0<<16) +#define SPRITE_YUV_ORDER_UYVY (1<<16) +#define SPRITE_YUV_ORDER_YVYU (2<<16) +#define SPRITE_YUV_ORDER_VYUY (3<<16) +#define SPRITE_TRICKLE_FEED_DISABLE (1<<14) +#define SPRITE_INT_GAMMA_ENABLE (1<<13) +#define SPRITE_TILED (1<<10) +#define SPRITE_DEST_KEY (1<<2) +#define _SPRA_LINOFF 0x70284 +#define _SPRA_STRIDE 0x70288 +#define _SPRA_POS 0x7028c +#define _SPRA_SIZE 0x70290 +#define _SPRA_KEYVAL 0x70294 +#define _SPRA_KEYMSK 0x70298 +#define _SPRA_SURF 0x7029c +#define _SPRA_KEYMAX 0x702a0 +#define _SPRA_TILEOFF 0x702a4 +#define _SPRA_OFFSET 0x702a4 +#define _SPRA_SURFLIVE 0x702ac +#define _SPRA_SCALE 0x70304 +#define SPRITE_SCALE_ENABLE (1<<31) +#define SPRITE_FILTER_MASK (3<<29) +#define SPRITE_FILTER_MEDIUM (0<<29) +#define SPRITE_FILTER_ENHANCING (1<<29) +#define SPRITE_FILTER_SOFTENING (2<<29) +#define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */ +#define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27) +#define _SPRA_GAMC 0x70400 + +#define _SPRB_CTL 0x71280 +#define _SPRB_LINOFF 0x71284 +#define _SPRB_STRIDE 0x71288 +#define _SPRB_POS 0x7128c +#define _SPRB_SIZE 0x71290 +#define _SPRB_KEYVAL 0x71294 +#define _SPRB_KEYMSK 0x71298 +#define _SPRB_SURF 0x7129c +#define _SPRB_KEYMAX 0x712a0 +#define _SPRB_TILEOFF 0x712a4 +#define _SPRB_OFFSET 0x712a4 +#define _SPRB_SURFLIVE 0x712ac +#define _SPRB_SCALE 0x71304 +#define _SPRB_GAMC 0x71400 + +#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL) +#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF) +#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE) +#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS) +#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE) +#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL) +#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK) +#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF) +#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) +#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) +#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET) +#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) +#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) +#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) + +#define _SPACNTR 0x72180 +#define SP_ENABLE (1<<31) +#define SP_GEAMMA_ENABLE (1<<30) +#define SP_PIXFORMAT_MASK (0xf<<26) +#define SP_FORMAT_YUV422 (0<<26) +#define SP_FORMAT_BGR565 (5<<26) +#define SP_FORMAT_BGRX8888 (6<<26) +#define SP_FORMAT_BGRA8888 (7<<26) +#define SP_FORMAT_RGBX1010102 (8<<26) +#define SP_FORMAT_RGBA1010102 (9<<26) +#define SP_FORMAT_RGBX8888 (0xe<<26) +#define SP_FORMAT_RGBA8888 (0xf<<26) +#define SP_SOURCE_KEY (1<<22) +#define SP_YUV_BYTE_ORDER_MASK (3<<16) +#define SP_YUV_ORDER_YUYV (0<<16) +#define SP_YUV_ORDER_UYVY (1<<16) +#define SP_YUV_ORDER_YVYU (2<<16) +#define SP_YUV_ORDER_VYUY (3<<16) +#define SP_TILED (1<<10) +#define _SPALINOFF 0x72184 +#define _SPASTRIDE 0x72188 +#define _SPAPOS 0x7218c +#define _SPASIZE 0x72190 +#define _SPAKEYMINVAL 0x72194 +#define _SPAKEYMSK 0x72198 +#define _SPASURF 0x7219c +#define _SPAKEYMAXVAL 0x721a0 +#define _SPATILEOFF 0x721a4 +#define _SPACONSTALPHA 0x721a8 +#define _SPAGAMC 0x721f4 + +#define _SPBCNTR 0x72280 +#define _SPBLINOFF 0x72284 +#define _SPBSTRIDE 0x72288 +#define _SPBPOS 0x7228c +#define _SPBSIZE 0x72290 +#define _SPBKEYMINVAL 0x72294 +#define _SPBKEYMSK 0x72298 +#define _SPBSURF 0x7229c +#define _SPBKEYMAXVAL 0x722a0 +#define _SPBTILEOFF 0x722a4 +#define _SPBCONSTALPHA 0x722a8 +#define _SPBGAMC 0x722f4 + +#define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR) +#define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF) +#define SPSTRIDE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASTRIDE, _SPBSTRIDE) +#define SPPOS(pipe, plane) _PIPE(pipe * 2 + plane, _SPAPOS, _SPBPOS) +#define SPSIZE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASIZE, _SPBSIZE) +#define SPKEYMINVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMINVAL, _SPBKEYMINVAL) +#define SPKEYMSK(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMSK, _SPBKEYMSK) +#define SPSURF(pipe, plane) _PIPE(pipe * 2 + plane, _SPASURF, _SPBSURF) +#define SPKEYMAXVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMAXVAL, _SPBKEYMAXVAL) +#define SPTILEOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPATILEOFF, _SPBTILEOFF) +#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA) +#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC) + +/* VBIOS regs */ +#define VGACNTRL 0x71400 +# define VGA_DISP_DISABLE (1 << 31) +# define VGA_2X_MODE (1 << 30) +# define VGA_PIPE_B_SELECT (1 << 29) + +#define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400) + +/* Ironlake */ + +#define CPU_VGACNTRL 0x41000 + +#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030 +#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4) +#define DIGITAL_PORTA_SHORT_PULSE_2MS (0 << 2) +#define DIGITAL_PORTA_SHORT_PULSE_4_5MS (1 << 2) +#define DIGITAL_PORTA_SHORT_PULSE_6MS (2 << 2) +#define DIGITAL_PORTA_SHORT_PULSE_100MS (3 << 2) +#define DIGITAL_PORTA_NO_DETECT (0 << 0) +#define DIGITAL_PORTA_LONG_PULSE_DETECT_MASK (1 << 1) +#define DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK (1 << 0) + +/* refresh rate hardware control */ +#define RR_HW_CTL 0x45300 +#define RR_HW_LOW_POWER_FRAMES_MASK 0xff +#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00 + +#define FDI_PLL_BIOS_0 0x46000 +#define FDI_PLL_FB_CLOCK_MASK 0xff +#define FDI_PLL_BIOS_1 0x46004 +#define FDI_PLL_BIOS_2 0x46008 +#define DISPLAY_PORT_PLL_BIOS_0 0x4600c +#define DISPLAY_PORT_PLL_BIOS_1 0x46010 +#define DISPLAY_PORT_PLL_BIOS_2 0x46014 + +#define PCH_3DCGDIS0 0x46020 +# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) +# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) + +#define PCH_3DCGDIS1 0x46024 +# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11) + +#define FDI_PLL_FREQ_CTL 0x46030 +#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) +#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 +#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff + + +#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030) +#define PIPE_DATA_M1_OFFSET 0 +#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034) +#define PIPE_DATA_N1_OFFSET 0 + +#define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038) +#define PIPE_DATA_M2_OFFSET 0 +#define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c) +#define PIPE_DATA_N2_OFFSET 0 + +#define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040) +#define PIPE_LINK_M1_OFFSET 0 +#define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044) +#define PIPE_LINK_N1_OFFSET 0 + +#define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048) +#define PIPE_LINK_M2_OFFSET 0 +#define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c) +#define PIPE_LINK_N2_OFFSET 0 + +/* PIPEB timing regs are same start from 0x61000 */ + +#define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030) +#define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034) + +#define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038) +#define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c) + +#define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040) +#define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044) + +#define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048) +#define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c) + +#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1) +#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1) +#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2) +#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2) +#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1) +#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1) +#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2) +#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2) + +/* CPU panel fitter */ +/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ +#define _PFA_CTL_1 0x68080 +#define _PFB_CTL_1 0x68880 +#define PF_ENABLE (1<<31) +#define PF_PIPE_SEL_MASK_IVB (3<<29) +#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29) +#define PF_FILTER_MASK (3<<23) +#define PF_FILTER_PROGRAMMED (0<<23) +#define PF_FILTER_MED_3x3 (1<<23) +#define PF_FILTER_EDGE_ENHANCE (2<<23) +#define PF_FILTER_EDGE_SOFTEN (3<<23) +#define _PFA_WIN_SZ 0x68074 +#define _PFB_WIN_SZ 0x68874 +#define _PFA_WIN_POS 0x68070 +#define _PFB_WIN_POS 0x68870 +#define _PFA_VSCALE 0x68084 +#define _PFB_VSCALE 0x68884 +#define _PFA_HSCALE 0x68090 +#define _PFB_HSCALE 0x68890 + +#define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1) +#define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ) +#define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS) +#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) +#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE) + +/* legacy palette */ +#define _LGC_PALETTE_A 0x4a000 +#define _LGC_PALETTE_B 0x4a800 +#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + +/* interrupts */ +#define DE_MASTER_IRQ_CONTROL (1 << 31) +#define DE_SPRITEB_FLIP_DONE (1 << 29) +#define DE_SPRITEA_FLIP_DONE (1 << 28) +#define DE_PLANEB_FLIP_DONE (1 << 27) +#define DE_PLANEA_FLIP_DONE (1 << 26) +#define DE_PCU_EVENT (1 << 25) +#define DE_GTT_FAULT (1 << 24) +#define DE_POISON (1 << 23) +#define DE_PERFORM_COUNTER (1 << 22) +#define DE_PCH_EVENT (1 << 21) +#define DE_AUX_CHANNEL_A (1 << 20) +#define DE_DP_A_HOTPLUG (1 << 19) +#define DE_GSE (1 << 18) +#define DE_PIPEB_VBLANK (1 << 15) +#define DE_PIPEB_EVEN_FIELD (1 << 14) +#define DE_PIPEB_ODD_FIELD (1 << 13) +#define DE_PIPEB_LINE_COMPARE (1 << 12) +#define DE_PIPEB_VSYNC (1 << 11) +#define DE_PIPEB_FIFO_UNDERRUN (1 << 8) +#define DE_PIPEA_VBLANK (1 << 7) +#define DE_PIPEA_EVEN_FIELD (1 << 6) +#define DE_PIPEA_ODD_FIELD (1 << 5) +#define DE_PIPEA_LINE_COMPARE (1 << 4) +#define DE_PIPEA_VSYNC (1 << 3) +#define DE_PIPEA_FIFO_UNDERRUN (1 << 0) + +/* More Ivybridge lolz */ +#define DE_ERR_DEBUG_IVB (1<<30) +#define DE_GSE_IVB (1<<29) +#define DE_PCH_EVENT_IVB (1<<28) +#define DE_DP_A_HOTPLUG_IVB (1<<27) +#define DE_AUX_CHANNEL_A_IVB (1<<26) +#define DE_SPRITEC_FLIP_DONE_IVB (1<<14) +#define DE_PLANEC_FLIP_DONE_IVB (1<<13) +#define DE_PIPEC_VBLANK_IVB (1<<10) +#define DE_SPRITEB_FLIP_DONE_IVB (1<<9) +#define DE_PLANEB_FLIP_DONE_IVB (1<<8) +#define DE_PIPEB_VBLANK_IVB (1<<5) +#define DE_SPRITEA_FLIP_DONE_IVB (1<<4) +#define DE_PLANEA_FLIP_DONE_IVB (1<<3) +#define DE_PIPEA_VBLANK_IVB (1<<0) + +#define VLV_MASTER_IER 0x4400c /* Gunit master IER */ +#define MASTER_INTERRUPT_ENABLE (1<<31) + +#define DEISR 0x44000 +#define DEIMR 0x44004 +#define DEIIR 0x44008 +#define DEIER 0x4400c + +/* GT interrupt. + * Note that for gen6+ the ring-specific interrupt bits do alias with the + * corresponding bits in the per-ring interrupt control registers. */ +#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26) +#define GT_GEN6_BLT_CS_ERROR_INTERRUPT (1 << 25) +#define GT_GEN6_BLT_USER_INTERRUPT (1 << 22) +#define GT_GEN6_BSD_CS_ERROR_INTERRUPT (1 << 15) +#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12) +#define GT_BSD_USER_INTERRUPT (1 << 5) /* ilk only */ +#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT (1 << 5) +#define GT_PIPE_NOTIFY (1 << 4) +#define GT_RENDER_CS_ERROR_INTERRUPT (1 << 3) +#define GT_SYNC_STATUS (1 << 2) +#define GT_USER_INTERRUPT (1 << 0) + +#define GTISR 0x44010 +#define GTIMR 0x44014 +#define GTIIR 0x44018 +#define GTIER 0x4401c + +#define ILK_DISPLAY_CHICKEN2 0x42004 +/* Required on all Ironlake and Sandybridge according to the B-Spec. */ +#define ILK_ELPIN_409_SELECT (1 << 25) +#define ILK_DPARB_GATE (1<<22) +#define ILK_VSDPFD_FULL (1<<21) +#define ILK_DISPLAY_CHICKEN_FUSES 0x42014 +#define ILK_INTERNAL_GRAPHICS_DISABLE (1<<31) +#define ILK_INTERNAL_DISPLAY_DISABLE (1<<30) +#define ILK_DISPLAY_DEBUG_DISABLE (1<<29) +#define ILK_HDCP_DISABLE (1<<25) +#define ILK_eDP_A_DISABLE (1<<24) +#define ILK_DESKTOP (1<<23) + +#define ILK_DSPCLK_GATE_D 0x42020 +#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) +#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) +#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) +#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7) +#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5) + +#define IVB_CHICKEN3 0x4200c +# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) +# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) + +#define DISP_ARB_CTL 0x45000 +#define DISP_TILE_SURFACE_SWIZZLING (1<<13) +#define DISP_FBC_WM_DIS (1<<15) +#define GEN7_MSG_CTL 0x45010 +#define WAIT_FOR_PCH_RESET_ACK (1<<1) +#define WAIT_FOR_PCH_FLR_ACK (1<<0) + +/* GEN7 chicken */ +#define GEN7_COMMON_SLICE_CHICKEN1 0x7010 +# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) + +#define GEN7_L3CNTLREG1 0xB01C +#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C +#define GEN7_L3AGDIS (1<<19) + +#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 +#define GEN7_WA_L3_CHICKEN_MODE 0x20000000 + +#define GEN7_L3SQCREG4 0xb034 +#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27) + +/* WaCatErrorRejectionIssue */ +#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 +#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) + +#define HSW_FUSE_STRAP 0x42014 +#define HSW_CDCLK_LIMIT (1 << 24) + +/* PCH */ + +/* south display engine interrupt: IBX */ +#define SDE_AUDIO_POWER_D (1 << 27) +#define SDE_AUDIO_POWER_C (1 << 26) +#define SDE_AUDIO_POWER_B (1 << 25) +#define SDE_AUDIO_POWER_SHIFT (25) +#define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT) +#define SDE_GMBUS (1 << 24) +#define SDE_AUDIO_HDCP_TRANSB (1 << 23) +#define SDE_AUDIO_HDCP_TRANSA (1 << 22) +#define SDE_AUDIO_HDCP_MASK (3 << 22) +#define SDE_AUDIO_TRANSB (1 << 21) +#define SDE_AUDIO_TRANSA (1 << 20) +#define SDE_AUDIO_TRANS_MASK (3 << 20) +#define SDE_POISON (1 << 19) +/* 18 reserved */ +#define SDE_FDI_RXB (1 << 17) +#define SDE_FDI_RXA (1 << 16) +#define SDE_FDI_MASK (3 << 16) +#define SDE_AUXD (1 << 15) +#define SDE_AUXC (1 << 14) +#define SDE_AUXB (1 << 13) +#define SDE_AUX_MASK (7 << 13) +/* 12 reserved */ +#define SDE_CRT_HOTPLUG (1 << 11) +#define SDE_PORTD_HOTPLUG (1 << 10) +#define SDE_PORTC_HOTPLUG (1 << 9) +#define SDE_PORTB_HOTPLUG (1 << 8) +#define SDE_SDVOB_HOTPLUG (1 << 6) +#define SDE_HOTPLUG_MASK (SDE_CRT_HOTPLUG | \ + SDE_SDVOB_HOTPLUG | \ + SDE_PORTB_HOTPLUG | \ + SDE_PORTC_HOTPLUG | \ + SDE_PORTD_HOTPLUG) +#define SDE_TRANSB_CRC_DONE (1 << 5) +#define SDE_TRANSB_CRC_ERR (1 << 4) +#define SDE_TRANSB_FIFO_UNDER (1 << 3) +#define SDE_TRANSA_CRC_DONE (1 << 2) +#define SDE_TRANSA_CRC_ERR (1 << 1) +#define SDE_TRANSA_FIFO_UNDER (1 << 0) +#define SDE_TRANS_MASK (0x3f) + +/* south display engine interrupt: CPT/PPT */ +#define SDE_AUDIO_POWER_D_CPT (1 << 31) +#define SDE_AUDIO_POWER_C_CPT (1 << 30) +#define SDE_AUDIO_POWER_B_CPT (1 << 29) +#define SDE_AUDIO_POWER_SHIFT_CPT 29 +#define SDE_AUDIO_POWER_MASK_CPT (7 << 29) +#define SDE_AUXD_CPT (1 << 27) +#define SDE_AUXC_CPT (1 << 26) +#define SDE_AUXB_CPT (1 << 25) +#define SDE_AUX_MASK_CPT (7 << 25) +#define SDE_PORTD_HOTPLUG_CPT (1 << 23) +#define SDE_PORTC_HOTPLUG_CPT (1 << 22) +#define SDE_PORTB_HOTPLUG_CPT (1 << 21) +#define SDE_CRT_HOTPLUG_CPT (1 << 19) +#define SDE_SDVOB_HOTPLUG_CPT (1 << 18) +#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ + SDE_SDVOB_HOTPLUG_CPT | \ + SDE_PORTD_HOTPLUG_CPT | \ + SDE_PORTC_HOTPLUG_CPT | \ + SDE_PORTB_HOTPLUG_CPT) +#define SDE_GMBUS_CPT (1 << 17) +#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10) +#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9) +#define SDE_FDI_RXC_CPT (1 << 8) +#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6) +#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5) +#define SDE_FDI_RXB_CPT (1 << 4) +#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2) +#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1) +#define SDE_FDI_RXA_CPT (1 << 0) +#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \ + SDE_AUDIO_CP_REQ_B_CPT | \ + SDE_AUDIO_CP_REQ_A_CPT) +#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \ + SDE_AUDIO_CP_CHG_B_CPT | \ + SDE_AUDIO_CP_CHG_A_CPT) +#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \ + SDE_FDI_RXB_CPT | \ + SDE_FDI_RXA_CPT) + +#define SDEISR 0xc4000 +#define SDEIMR 0xc4004 +#define SDEIIR 0xc4008 +#define SDEIER 0xc400c + +/* digital port hotplug */ +#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ +#define PORTD_HOTPLUG_ENABLE (1 << 20) +#define PORTD_PULSE_DURATION_2ms (0) +#define PORTD_PULSE_DURATION_4_5ms (1 << 18) +#define PORTD_PULSE_DURATION_6ms (2 << 18) +#define PORTD_PULSE_DURATION_100ms (3 << 18) +#define PORTD_PULSE_DURATION_MASK (3 << 18) +#define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16) +#define PORTD_HOTPLUG_NO_DETECT (0 << 16) +#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) +#define PORTD_HOTPLUG_LONG_DETECT (2 << 16) +#define PORTC_HOTPLUG_ENABLE (1 << 12) +#define PORTC_PULSE_DURATION_2ms (0) +#define PORTC_PULSE_DURATION_4_5ms (1 << 10) +#define PORTC_PULSE_DURATION_6ms (2 << 10) +#define PORTC_PULSE_DURATION_100ms (3 << 10) +#define PORTC_PULSE_DURATION_MASK (3 << 10) +#define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8) +#define PORTC_HOTPLUG_NO_DETECT (0 << 8) +#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) +#define PORTC_HOTPLUG_LONG_DETECT (2 << 8) +#define PORTB_HOTPLUG_ENABLE (1 << 4) +#define PORTB_PULSE_DURATION_2ms (0) +#define PORTB_PULSE_DURATION_4_5ms (1 << 2) +#define PORTB_PULSE_DURATION_6ms (2 << 2) +#define PORTB_PULSE_DURATION_100ms (3 << 2) +#define PORTB_PULSE_DURATION_MASK (3 << 2) +#define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0) +#define PORTB_HOTPLUG_NO_DETECT (0 << 0) +#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) +#define PORTB_HOTPLUG_LONG_DETECT (2 << 0) + +#define PCH_GPIOA 0xc5010 +#define PCH_GPIOB 0xc5014 +#define PCH_GPIOC 0xc5018 +#define PCH_GPIOD 0xc501c +#define PCH_GPIOE 0xc5020 +#define PCH_GPIOF 0xc5024 + +#define PCH_GMBUS0 0xc5100 +#define PCH_GMBUS1 0xc5104 +#define PCH_GMBUS2 0xc5108 +#define PCH_GMBUS3 0xc510c +#define PCH_GMBUS4 0xc5110 +#define PCH_GMBUS5 0xc5120 + +#define _PCH_DPLL_A 0xc6014 +#define _PCH_DPLL_B 0xc6018 +#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) + +#define _PCH_FPA0 0xc6040 +#define FP_CB_TUNE (0x3<<22) +#define _PCH_FPA1 0xc6044 +#define _PCH_FPB0 0xc6048 +#define _PCH_FPB1 0xc604c +#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0) +#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1) + +#define PCH_DPLL_TEST 0xc606c + +#define PCH_DREF_CONTROL 0xC6200 +#define DREF_CONTROL_MASK 0x7fc3 +#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13) +#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13) +#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3<<13) +#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13) +#define DREF_SSC_SOURCE_DISABLE (0<<11) +#define DREF_SSC_SOURCE_ENABLE (2<<11) +#define DREF_SSC_SOURCE_MASK (3<<11) +#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9) +#define DREF_NONSPREAD_CK505_ENABLE (1<<9) +#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9) +#define DREF_NONSPREAD_SOURCE_MASK (3<<9) +#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7) +#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7) +#define DREF_SUPERSPREAD_SOURCE_MASK (3<<7) +#define DREF_SSC4_DOWNSPREAD (0<<6) +#define DREF_SSC4_CENTERSPREAD (1<<6) +#define DREF_SSC1_DISABLE (0<<1) +#define DREF_SSC1_ENABLE (1<<1) +#define DREF_SSC4_DISABLE (0) +#define DREF_SSC4_ENABLE (1) + +#define PCH_RAWCLK_FREQ 0xc6204 +#define FDL_TP1_TIMER_SHIFT 12 +#define FDL_TP1_TIMER_MASK (3<<12) +#define FDL_TP2_TIMER_SHIFT 10 +#define FDL_TP2_TIMER_MASK (3<<10) +#define RAWCLK_FREQ_MASK 0x3ff + +#define PCH_DPLL_TMR_CFG 0xc6208 + +#define PCH_SSC4_PARMS 0xc6210 +#define PCH_SSC4_AUX_PARMS 0xc6214 + +#define PCH_DPLL_SEL 0xc7000 +#define TRANSA_DPLL_ENABLE (1<<3) +#define TRANSA_DPLLB_SEL (1<<0) +#define TRANSA_DPLLA_SEL 0 +#define TRANSB_DPLL_ENABLE (1<<7) +#define TRANSB_DPLLB_SEL (1<<4) +#define TRANSB_DPLLA_SEL (0) +#define TRANSC_DPLL_ENABLE (1<<11) +#define TRANSC_DPLLB_SEL (1<<8) +#define TRANSC_DPLLA_SEL (0) + +/* transcoder */ + +#define _TRANS_HTOTAL_A 0xe0000 +#define TRANS_HTOTAL_SHIFT 16 +#define TRANS_HACTIVE_SHIFT 0 +#define _TRANS_HBLANK_A 0xe0004 +#define TRANS_HBLANK_END_SHIFT 16 +#define TRANS_HBLANK_START_SHIFT 0 +#define _TRANS_HSYNC_A 0xe0008 +#define TRANS_HSYNC_END_SHIFT 16 +#define TRANS_HSYNC_START_SHIFT 0 +#define _TRANS_VTOTAL_A 0xe000c +#define TRANS_VTOTAL_SHIFT 16 +#define TRANS_VACTIVE_SHIFT 0 +#define _TRANS_VBLANK_A 0xe0010 +#define TRANS_VBLANK_END_SHIFT 16 +#define TRANS_VBLANK_START_SHIFT 0 +#define _TRANS_VSYNC_A 0xe0014 +#define TRANS_VSYNC_END_SHIFT 16 +#define TRANS_VSYNC_START_SHIFT 0 +#define _TRANS_VSYNCSHIFT_A 0xe0028 + +#define _TRANSA_DATA_M1 0xe0030 +#define _TRANSA_DATA_N1 0xe0034 +#define _TRANSA_DATA_M2 0xe0038 +#define _TRANSA_DATA_N2 0xe003c +#define _TRANSA_DP_LINK_M1 0xe0040 +#define _TRANSA_DP_LINK_N1 0xe0044 +#define _TRANSA_DP_LINK_M2 0xe0048 +#define _TRANSA_DP_LINK_N2 0xe004c + +/* Per-transcoder DIP controls */ + +#define _VIDEO_DIP_CTL_A 0xe0200 +#define _VIDEO_DIP_DATA_A 0xe0208 +#define _VIDEO_DIP_GCP_A 0xe0210 + +#define _VIDEO_DIP_CTL_B 0xe1200 +#define _VIDEO_DIP_DATA_B 0xe1208 +#define _VIDEO_DIP_GCP_B 0xe1210 + +#define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B) +#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) +#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) + +#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200) +#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208) +#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210) + +#define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170) +#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174) +#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178) + +#define VLV_TVIDEO_DIP_CTL(pipe) \ + _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B) +#define VLV_TVIDEO_DIP_DATA(pipe) \ + _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B) +#define VLV_TVIDEO_DIP_GCP(pipe) \ + _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B) + +/* Haswell DIP controls */ +#define HSW_VIDEO_DIP_CTL_A 0x60200 +#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220 +#define HSW_VIDEO_DIP_VS_DATA_A 0x60260 +#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0 +#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0 +#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320 +#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240 +#define HSW_VIDEO_DIP_VS_ECC_A 0x60280 +#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0 +#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300 +#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344 +#define HSW_VIDEO_DIP_GCP_A 0x60210 + +#define HSW_VIDEO_DIP_CTL_B 0x61200 +#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220 +#define HSW_VIDEO_DIP_VS_DATA_B 0x61260 +#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0 +#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0 +#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320 +#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240 +#define HSW_VIDEO_DIP_VS_ECC_B 0x61280 +#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0 +#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300 +#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344 +#define HSW_VIDEO_DIP_GCP_B 0x61210 + +#define HSW_TVIDEO_DIP_CTL(trans) \ + _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B) +#define HSW_TVIDEO_DIP_AVI_DATA(trans) \ + _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B) +#define HSW_TVIDEO_DIP_SPD_DATA(trans) \ + _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B) +#define HSW_TVIDEO_DIP_GCP(trans) \ + _TRANSCODER(trans, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B) +#define HSW_TVIDEO_DIP_VSC_DATA(trans) \ + _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B) + +#define _TRANS_HTOTAL_B 0xe1000 +#define _TRANS_HBLANK_B 0xe1004 +#define _TRANS_HSYNC_B 0xe1008 +#define _TRANS_VTOTAL_B 0xe100c +#define _TRANS_VBLANK_B 0xe1010 +#define _TRANS_VSYNC_B 0xe1014 +#define _TRANS_VSYNCSHIFT_B 0xe1028 + +#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B) +#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B) +#define TRANS_HSYNC(pipe) _PIPE(pipe, _TRANS_HSYNC_A, _TRANS_HSYNC_B) +#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B) +#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B) +#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B) +#define TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _TRANS_VSYNCSHIFT_A, \ + _TRANS_VSYNCSHIFT_B) + +#define _TRANSB_DATA_M1 0xe1030 +#define _TRANSB_DATA_N1 0xe1034 +#define _TRANSB_DATA_M2 0xe1038 +#define _TRANSB_DATA_N2 0xe103c +#define _TRANSB_DP_LINK_M1 0xe1040 +#define _TRANSB_DP_LINK_N1 0xe1044 +#define _TRANSB_DP_LINK_M2 0xe1048 +#define _TRANSB_DP_LINK_N2 0xe104c + +#define TRANSDATA_M1(pipe) _PIPE(pipe, _TRANSA_DATA_M1, _TRANSB_DATA_M1) +#define TRANSDATA_N1(pipe) _PIPE(pipe, _TRANSA_DATA_N1, _TRANSB_DATA_N1) +#define TRANSDATA_M2(pipe) _PIPE(pipe, _TRANSA_DATA_M2, _TRANSB_DATA_M2) +#define TRANSDATA_N2(pipe) _PIPE(pipe, _TRANSA_DATA_N2, _TRANSB_DATA_N2) +#define TRANSDPLINK_M1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M1, _TRANSB_DP_LINK_M1) +#define TRANSDPLINK_N1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N1, _TRANSB_DP_LINK_N1) +#define TRANSDPLINK_M2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M2, _TRANSB_DP_LINK_M2) +#define TRANSDPLINK_N2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N2, _TRANSB_DP_LINK_N2) + +#define _TRANSACONF 0xf0008 +#define _TRANSBCONF 0xf1008 +#define TRANSCONF(plane) _PIPE(plane, _TRANSACONF, _TRANSBCONF) +#define TRANS_DISABLE (0<<31) +#define TRANS_ENABLE (1<<31) +#define TRANS_STATE_MASK (1<<30) +#define TRANS_STATE_DISABLE (0<<30) +#define TRANS_STATE_ENABLE (1<<30) +#define TRANS_FSYNC_DELAY_HB1 (0<<27) +#define TRANS_FSYNC_DELAY_HB2 (1<<27) +#define TRANS_FSYNC_DELAY_HB3 (2<<27) +#define TRANS_FSYNC_DELAY_HB4 (3<<27) +#define TRANS_INTERLACE_MASK (7<<21) +#define TRANS_PROGRESSIVE (0<<21) +#define TRANS_INTERLACED (3<<21) +#define TRANS_LEGACY_INTERLACED_ILK (2<<21) +#define TRANS_8BPC (0<<5) +#define TRANS_10BPC (1<<5) +#define TRANS_6BPC (2<<5) +#define TRANS_12BPC (3<<5) + +#define _TRANSA_CHICKEN1 0xf0060 +#define _TRANSB_CHICKEN1 0xf1060 +#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1) +#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4) +#define _TRANSA_CHICKEN2 0xf0064 +#define _TRANSB_CHICKEN2 0xf1064 +#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) +#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) +#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29) +#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27) +#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26) +#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25) + +#define SOUTH_CHICKEN1 0xc2000 +#define FDIA_PHASE_SYNC_SHIFT_OVR 19 +#define FDIA_PHASE_SYNC_SHIFT_EN 18 +#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) +#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) +#define FDI_BC_BIFURCATION_SELECT (1 << 12) +#define SOUTH_CHICKEN2 0xc2004 +#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13) +#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12) +#define DPLS_EDP_PPS_FIX_DIS (1<<0) + +#define _FDI_RXA_CHICKEN 0xc200c +#define _FDI_RXB_CHICKEN 0xc2010 +#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1) +#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0) +#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) + +#define SOUTH_DSPCLK_GATE_D 0xc2020 +#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) +#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) + +/* CPU: FDI_TX */ +#define _FDI_TXA_CTL 0x60100 +#define _FDI_TXB_CTL 0x61100 +#define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL) +#define FDI_TX_DISABLE (0<<31) +#define FDI_TX_ENABLE (1<<31) +#define FDI_LINK_TRAIN_PATTERN_1 (0<<28) +#define FDI_LINK_TRAIN_PATTERN_2 (1<<28) +#define FDI_LINK_TRAIN_PATTERN_IDLE (2<<28) +#define FDI_LINK_TRAIN_NONE (3<<28) +#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0<<25) +#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1<<25) +#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2<<25) +#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3<<25) +#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22) +#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22) +#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22) +#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22) +/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level. + SNB has different settings. */ +/* SNB A-stepping */ +#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) +#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) +#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) +#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) +/* SNB B-stepping */ +#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) +#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) +#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) +#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) +#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22) +#define FDI_DP_PORT_WIDTH_X1 (0<<19) +#define FDI_DP_PORT_WIDTH_X2 (1<<19) +#define FDI_DP_PORT_WIDTH_X3 (2<<19) +#define FDI_DP_PORT_WIDTH_X4 (3<<19) +#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18) +/* Ironlake: hardwired to 1 */ +#define FDI_TX_PLL_ENABLE (1<<14) + +/* Ivybridge has different bits for lolz */ +#define FDI_LINK_TRAIN_PATTERN_1_IVB (0<<8) +#define FDI_LINK_TRAIN_PATTERN_2_IVB (1<<8) +#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2<<8) +#define FDI_LINK_TRAIN_NONE_IVB (3<<8) + +/* both Tx and Rx */ +#define FDI_COMPOSITE_SYNC (1<<11) +#define FDI_LINK_TRAIN_AUTO (1<<10) +#define FDI_SCRAMBLING_ENABLE (0<<7) +#define FDI_SCRAMBLING_DISABLE (1<<7) + +/* FDI_RX, FDI_X is hard-wired to Transcoder_X */ +#define _FDI_RXA_CTL 0xf000c +#define _FDI_RXB_CTL 0xf100c +#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL) +#define FDI_RX_ENABLE (1<<31) +/* train, dp width same as FDI_TX */ +#define FDI_FS_ERRC_ENABLE (1<<27) +#define FDI_FE_ERRC_ENABLE (1<<26) +#define FDI_DP_PORT_WIDTH_X8 (7<<19) +#define FDI_RX_POLARITY_REVERSED_LPT (1<<16) +#define FDI_8BPC (0<<16) +#define FDI_10BPC (1<<16) +#define FDI_6BPC (2<<16) +#define FDI_12BPC (3<<16) +#define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15) +#define FDI_DMI_LINK_REVERSE_MASK (1<<14) +#define FDI_RX_PLL_ENABLE (1<<13) +#define FDI_FS_ERR_CORRECT_ENABLE (1<<11) +#define FDI_FE_ERR_CORRECT_ENABLE (1<<10) +#define FDI_FS_ERR_REPORT_ENABLE (1<<9) +#define FDI_FE_ERR_REPORT_ENABLE (1<<8) +#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) +#define FDI_PCDCLK (1<<4) +/* CPT */ +#define FDI_AUTO_TRAINING (1<<10) +#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8) +#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8) +#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8) +#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) +#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) +/* LPT */ +#define FDI_PORT_WIDTH_2X_LPT (1<<19) +#define FDI_PORT_WIDTH_1X_LPT (0<<19) + +#define _FDI_RXA_MISC 0xf0010 +#define _FDI_RXB_MISC 0xf1010 +#define FDI_RX_PWRDN_LANE1_MASK (3<<26) +#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26) +#define FDI_RX_PWRDN_LANE0_MASK (3<<24) +#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24) +#define FDI_RX_TP1_TO_TP2_48 (2<<20) +#define FDI_RX_TP1_TO_TP2_64 (3<<20) +#define FDI_RX_FDI_DELAY_90 (0x90<<0) +#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) + +#define _FDI_RXA_TUSIZE1 0xf0030 +#define _FDI_RXA_TUSIZE2 0xf0038 +#define _FDI_RXB_TUSIZE1 0xf1030 +#define _FDI_RXB_TUSIZE2 0xf1038 +#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) +#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) + +/* FDI_RX interrupt register format */ +#define FDI_RX_INTER_LANE_ALIGN (1<<10) +#define FDI_RX_SYMBOL_LOCK (1<<9) /* train 2 */ +#define FDI_RX_BIT_LOCK (1<<8) /* train 1 */ +#define FDI_RX_TRAIN_PATTERN_2_FAIL (1<<7) +#define FDI_RX_FS_CODE_ERR (1<<6) +#define FDI_RX_FE_CODE_ERR (1<<5) +#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1<<4) +#define FDI_RX_HDCP_LINK_FAIL (1<<3) +#define FDI_RX_PIXEL_FIFO_OVERFLOW (1<<2) +#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1) +#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0) + +#define _FDI_RXA_IIR 0xf0014 +#define _FDI_RXA_IMR 0xf0018 +#define _FDI_RXB_IIR 0xf1014 +#define _FDI_RXB_IMR 0xf1018 +#define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR) +#define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR) + +#define FDI_PLL_CTL_1 0xfe000 +#define FDI_PLL_CTL_2 0xfe004 + +#define PCH_LVDS 0xe1180 +#define LVDS_DETECTED (1 << 1) + +/* vlv has 2 sets of panel control regs. */ +#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) +#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) +#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) +#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) +#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) + +#define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300) +#define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304) +#define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308) +#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c) +#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310) + +#define VLV_PIPE_PP_STATUS(pipe) _PIPE(pipe, PIPEA_PP_STATUS, PIPEB_PP_STATUS) +#define VLV_PIPE_PP_CONTROL(pipe) _PIPE(pipe, PIPEA_PP_CONTROL, PIPEB_PP_CONTROL) +#define VLV_PIPE_PP_ON_DELAYS(pipe) \ + _PIPE(pipe, PIPEA_PP_ON_DELAYS, PIPEB_PP_ON_DELAYS) +#define VLV_PIPE_PP_OFF_DELAYS(pipe) \ + _PIPE(pipe, PIPEA_PP_OFF_DELAYS, PIPEB_PP_OFF_DELAYS) +#define VLV_PIPE_PP_DIVISOR(pipe) \ + _PIPE(pipe, PIPEA_PP_DIVISOR, PIPEB_PP_DIVISOR) + +#define PCH_PP_STATUS 0xc7200 +#define PCH_PP_CONTROL 0xc7204 +#define PANEL_UNLOCK_REGS (0xabcd << 16) +#define PANEL_UNLOCK_MASK (0xffff << 16) +#define EDP_FORCE_VDD (1 << 3) +#define EDP_BLC_ENABLE (1 << 2) +#define PANEL_POWER_RESET (1 << 1) +#define PANEL_POWER_OFF (0 << 0) +#define PANEL_POWER_ON (1 << 0) +#define PCH_PP_ON_DELAYS 0xc7208 +#define PANEL_PORT_SELECT_MASK (3 << 30) +#define PANEL_PORT_SELECT_LVDS (0 << 30) +#define PANEL_PORT_SELECT_DPA (1 << 30) +#define EDP_PANEL (1 << 30) +#define PANEL_PORT_SELECT_DPC (2 << 30) +#define PANEL_PORT_SELECT_DPD (3 << 30) +#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000) +#define PANEL_POWER_UP_DELAY_SHIFT 16 +#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff) +#define PANEL_LIGHT_ON_DELAY_SHIFT 0 + +#define PCH_PP_OFF_DELAYS 0xc720c +#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30) +#define PANEL_POWER_PORT_LVDS (0 << 30) +#define PANEL_POWER_PORT_DP_A (1 << 30) +#define PANEL_POWER_PORT_DP_C (2 << 30) +#define PANEL_POWER_PORT_DP_D (3 << 30) +#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) +#define PANEL_POWER_DOWN_DELAY_SHIFT 16 +#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) +#define PANEL_LIGHT_OFF_DELAY_SHIFT 0 + +#define PCH_PP_DIVISOR 0xc7210 +#define PP_REFERENCE_DIVIDER_MASK (0xffffff00) +#define PP_REFERENCE_DIVIDER_SHIFT 8 +#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f) +#define PANEL_POWER_CYCLE_DELAY_SHIFT 0 + +#define PCH_DP_B 0xe4100 +#define PCH_DPB_AUX_CH_CTL 0xe4110 +#define PCH_DPB_AUX_CH_DATA1 0xe4114 +#define PCH_DPB_AUX_CH_DATA2 0xe4118 +#define PCH_DPB_AUX_CH_DATA3 0xe411c +#define PCH_DPB_AUX_CH_DATA4 0xe4120 +#define PCH_DPB_AUX_CH_DATA5 0xe4124 + +#define PCH_DP_C 0xe4200 +#define PCH_DPC_AUX_CH_CTL 0xe4210 +#define PCH_DPC_AUX_CH_DATA1 0xe4214 +#define PCH_DPC_AUX_CH_DATA2 0xe4218 +#define PCH_DPC_AUX_CH_DATA3 0xe421c +#define PCH_DPC_AUX_CH_DATA4 0xe4220 +#define PCH_DPC_AUX_CH_DATA5 0xe4224 + +#define PCH_DP_D 0xe4300 +#define PCH_DPD_AUX_CH_CTL 0xe4310 +#define PCH_DPD_AUX_CH_DATA1 0xe4314 +#define PCH_DPD_AUX_CH_DATA2 0xe4318 +#define PCH_DPD_AUX_CH_DATA3 0xe431c +#define PCH_DPD_AUX_CH_DATA4 0xe4320 +#define PCH_DPD_AUX_CH_DATA5 0xe4324 + +/* CPT */ +#define PORT_TRANS_A_SEL_CPT 0 +#define PORT_TRANS_B_SEL_CPT (1<<29) +#define PORT_TRANS_C_SEL_CPT (2<<29) +#define PORT_TRANS_SEL_MASK (3<<29) +#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29) +#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30) +#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29) + +#define TRANS_DP_CTL_A 0xe0300 +#define TRANS_DP_CTL_B 0xe1300 +#define TRANS_DP_CTL_C 0xe2300 +#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B) +#define TRANS_DP_OUTPUT_ENABLE (1<<31) +#define TRANS_DP_PORT_SEL_B (0<<29) +#define TRANS_DP_PORT_SEL_C (1<<29) +#define TRANS_DP_PORT_SEL_D (2<<29) +#define TRANS_DP_PORT_SEL_NONE (3<<29) +#define TRANS_DP_PORT_SEL_MASK (3<<29) +#define TRANS_DP_AUDIO_ONLY (1<<26) +#define TRANS_DP_ENH_FRAMING (1<<18) +#define TRANS_DP_8BPC (0<<9) +#define TRANS_DP_10BPC (1<<9) +#define TRANS_DP_6BPC (2<<9) +#define TRANS_DP_12BPC (3<<9) +#define TRANS_DP_BPC_MASK (3<<9) +#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) +#define TRANS_DP_VSYNC_ACTIVE_LOW 0 +#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) +#define TRANS_DP_HSYNC_ACTIVE_LOW 0 +#define TRANS_DP_SYNC_MASK (3<<3) + +/* SNB eDP training params */ +/* SNB A-stepping */ +#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) +#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) +#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) +#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) +/* SNB B-stepping */ +#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22) +#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22) +#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22) +#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22) +#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22) +#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) + +/* IVB */ +#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22) +#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22) +#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22) +#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22) +#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22) +#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22) +#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22) + +/* legacy values */ +#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22) +#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22) +#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22) +#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22) +#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22) + +#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22) + +#define FORCEWAKE 0xA18C +#define FORCEWAKE_VLV 0x1300b0 +#define FORCEWAKE_ACK_VLV 0x1300b4 +#define FORCEWAKE_MEDIA_VLV 0x1300b8 +#define FORCEWAKE_ACK_MEDIA_VLV 0x1300bc +#define FORCEWAKE_ACK_HSW 0x130044 +#define FORCEWAKE_ACK 0x130090 +#define VLV_GTLC_WAKE_CTRL 0x130090 +#define VLV_GTLC_PW_STATUS 0x130094 +#define FORCEWAKE_MT 0xa188 /* multi-threaded */ +#define FORCEWAKE_KERNEL 0x1 +#define FORCEWAKE_USER 0x2 +#define FORCEWAKE_MT_ACK 0x130040 +#define ECOBUS 0xa180 +#define FORCEWAKE_MT_ENABLE (1<<5) + +#define GTFIFODBG 0x120000 +#define GT_FIFO_CPU_ERROR_MASK 7 +#define GT_FIFO_OVFERR (1<<2) +#define GT_FIFO_IAWRERR (1<<1) +#define GT_FIFO_IARDERR (1<<0) + +#define GT_FIFO_FREE_ENTRIES 0x120008 +#define GT_FIFO_NUM_RESERVED_ENTRIES 20 + +#define GEN6_UCGCTL1 0x9400 +# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) +# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) + +#define GEN6_UCGCTL2 0x9404 +# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30) +# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22) +# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13) +# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) +# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) + +#define GEN7_UCGCTL4 0x940c +#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) + +#define GEN6_RPNSWREQ 0xA008 +#define GEN6_TURBO_DISABLE (1<<31) +#define GEN6_FREQUENCY(x) ((x)<<25) +#define HSW_FREQUENCY(x) ((x)<<24) +#define GEN6_OFFSET(x) ((x)<<19) +#define GEN6_AGGRESSIVE_TURBO (0<<15) +#define GEN6_RC_VIDEO_FREQ 0xA00C +#define GEN6_RC_CONTROL 0xA090 +#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16) +#define GEN6_RC_CTL_RC6p_ENABLE (1<<17) +#define GEN6_RC_CTL_RC6_ENABLE (1<<18) +#define GEN6_RC_CTL_RC1e_ENABLE (1<<20) +#define GEN6_RC_CTL_RC7_ENABLE (1<<22) +#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27) +#define GEN6_RC_CTL_HW_ENABLE (1<<31) +#define GEN6_RP_DOWN_TIMEOUT 0xA010 +#define GEN6_RP_INTERRUPT_LIMITS 0xA014 +#define GEN6_RPSTAT1 0xA01C +#define GEN6_CAGF_SHIFT 8 +#define HSW_CAGF_SHIFT 7 +#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) +#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT) +#define GEN6_RP_CONTROL 0xA024 +#define GEN6_RP_MEDIA_TURBO (1<<11) +#define GEN6_RP_MEDIA_MODE_MASK (3<<9) +#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9) +#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9) +#define GEN6_RP_MEDIA_HW_MODE (1<<9) +#define GEN6_RP_MEDIA_SW_MODE (0<<9) +#define GEN6_RP_MEDIA_IS_GFX (1<<8) +#define GEN6_RP_ENABLE (1<<7) +#define GEN6_RP_UP_IDLE_MIN (0x1<<3) +#define GEN6_RP_UP_BUSY_AVG (0x2<<3) +#define GEN6_RP_UP_BUSY_CONT (0x4<<3) +#define GEN7_RP_DOWN_IDLE_AVG (0x2<<0) +#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0) +#define GEN6_RP_UP_THRESHOLD 0xA02C +#define GEN6_RP_DOWN_THRESHOLD 0xA030 +#define GEN6_RP_CUR_UP_EI 0xA050 +#define GEN6_CURICONT_MASK 0xffffff +#define GEN6_RP_CUR_UP 0xA054 +#define GEN6_CURBSYTAVG_MASK 0xffffff +#define GEN6_RP_PREV_UP 0xA058 +#define GEN6_RP_CUR_DOWN_EI 0xA05C +#define GEN6_CURIAVG_MASK 0xffffff +#define GEN6_RP_CUR_DOWN 0xA060 +#define GEN6_RP_PREV_DOWN 0xA064 +#define GEN6_RP_UP_EI 0xA068 +#define GEN6_RP_DOWN_EI 0xA06C +#define GEN6_RP_IDLE_HYSTERSIS 0xA070 +#define GEN6_RC_STATE 0xA094 +#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098 +#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C +#define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0 +#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8 +#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC +#define GEN6_RC_SLEEP 0xA0B0 +#define GEN6_RC1e_THRESHOLD 0xA0B4 +#define GEN6_RC6_THRESHOLD 0xA0B8 +#define GEN6_RC6p_THRESHOLD 0xA0BC +#define GEN6_RC6pp_THRESHOLD 0xA0C0 +#define GEN6_PMINTRMSK 0xA168 + +#define GEN6_PMISR 0x44020 +#define GEN6_PMIMR 0x44024 /* rps_lock */ +#define GEN6_PMIIR 0x44028 +#define GEN6_PMIER 0x4402C +#define GEN6_PM_MBOX_EVENT (1<<25) +#define GEN6_PM_THERMAL_EVENT (1<<24) +#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6) +#define GEN6_PM_RP_UP_THRESHOLD (1<<5) +#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4) +#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2) +#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1) +#define GEN6_PM_DEFERRED_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \ + GEN6_PM_RP_DOWN_THRESHOLD | \ + GEN6_PM_RP_DOWN_TIMEOUT) + +#define GEN6_GT_GFX_RC6_LOCKED 0x138104 +#define GEN6_GT_GFX_RC6 0x138108 +#define GEN6_GT_GFX_RC6p 0x13810C +#define GEN6_GT_GFX_RC6pp 0x138110 + +#define GEN6_PCODE_MAILBOX 0x138124 +#define GEN6_PCODE_READY (1<<31) +#define GEN6_READ_OC_PARAMS 0xc +#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 +#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 +#define GEN6_PCODE_WRITE_RC6VIDS 0x4 +#define GEN6_PCODE_READ_RC6VIDS 0x5 +#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) +#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) +#define GEN6_PCODE_DATA 0x138128 +#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 +#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 + +#define VLV_IOSF_DOORBELL_REQ 0x182100 +#define IOSF_DEVFN_SHIFT 24 +#define IOSF_OPCODE_SHIFT 16 +#define IOSF_PORT_SHIFT 8 +#define IOSF_BYTE_ENABLES_SHIFT 4 +#define IOSF_BAR_SHIFT 1 +#define IOSF_SB_BUSY (1<<0) +#define IOSF_PORT_PUNIT 0x4 +#define VLV_IOSF_DATA 0x182104 +#define VLV_IOSF_ADDR 0x182108 + +#define PUNIT_OPCODE_REG_READ 6 +#define PUNIT_OPCODE_REG_WRITE 7 + +#define GEN6_GT_CORE_STATUS 0x138060 +#define GEN6_CORE_CPD_STATE_MASK (7<<4) +#define GEN6_RCn_MASK 7 +#define GEN6_RC0 0 +#define GEN6_RC3 2 +#define GEN6_RC6 3 +#define GEN6_RC7 4 + +#define GEN7_MISCCPCTL (0x9424) +#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0) + +/* IVYBRIDGE DPF */ +#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */ +#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14) +#define GEN7_PARITY_ERROR_VALID (1<<13) +#define GEN7_L3CDERRST1_BANK_MASK (3<<11) +#define GEN7_L3CDERRST1_SUBBANK_MASK (7<<8) +#define GEN7_PARITY_ERROR_ROW(reg) \ + ((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14) +#define GEN7_PARITY_ERROR_BANK(reg) \ + ((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11) +#define GEN7_PARITY_ERROR_SUBBANK(reg) \ + ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8) +#define GEN7_L3CDERRST1_ENABLE (1<<7) + +#define GEN7_L3LOG_BASE 0xB070 +#define GEN7_L3LOG_SIZE 0x80 + +#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */ +#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100 +#define GEN7_MAX_PS_THREAD_DEP (8<<12) +#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) + +#define GEN7_ROW_CHICKEN2 0xe4f4 +#define GEN7_ROW_CHICKEN2_GT2 0xf4f4 +#define DOP_CLOCK_GATING_DISABLE (1<<0) + +#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020) +#define INTEL_AUDIO_DEVCL 0x808629FB +#define INTEL_AUDIO_DEVBLC 0x80862801 +#define INTEL_AUDIO_DEVCTG 0x80862802 + +#define G4X_AUD_CNTL_ST 0x620B4 +#define G4X_ELDV_DEVCL_DEVBLC (1 << 13) +#define G4X_ELDV_DEVCTG (1 << 14) +#define G4X_ELD_ADDR (0xf << 5) +#define G4X_ELD_ACK (1 << 4) +#define G4X_HDMIW_HDMIEDID 0x6210C + +#define IBX_HDMIW_HDMIEDID_A 0xE2050 +#define IBX_HDMIW_HDMIEDID_B 0xE2150 +#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ + IBX_HDMIW_HDMIEDID_A, \ + IBX_HDMIW_HDMIEDID_B) +#define IBX_AUD_CNTL_ST_A 0xE20B4 +#define IBX_AUD_CNTL_ST_B 0xE21B4 +#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \ + IBX_AUD_CNTL_ST_A, \ + IBX_AUD_CNTL_ST_B) +#define IBX_ELD_BUFFER_SIZE (0x1f << 10) +#define IBX_ELD_ADDRESS (0x1f << 5) +#define IBX_ELD_ACK (1 << 4) +#define IBX_AUD_CNTL_ST2 0xE20C0 +#define IBX_ELD_VALIDB (1 << 0) +#define IBX_CP_READYB (1 << 1) + +#define CPT_HDMIW_HDMIEDID_A 0xE5050 +#define CPT_HDMIW_HDMIEDID_B 0xE5150 +#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ + CPT_HDMIW_HDMIEDID_A, \ + CPT_HDMIW_HDMIEDID_B) +#define CPT_AUD_CNTL_ST_A 0xE50B4 +#define CPT_AUD_CNTL_ST_B 0xE51B4 +#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \ + CPT_AUD_CNTL_ST_A, \ + CPT_AUD_CNTL_ST_B) +#define CPT_AUD_CNTRL_ST2 0xE50C0 + +/* These are the 4 32-bit write offset registers for each stream + * output buffer. It determines the offset from the + * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to. + */ +#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) + +#define IBX_AUD_CONFIG_A 0xe2000 +#define IBX_AUD_CONFIG_B 0xe2100 +#define IBX_AUD_CFG(pipe) _PIPE(pipe, \ + IBX_AUD_CONFIG_A, \ + IBX_AUD_CONFIG_B) +#define CPT_AUD_CONFIG_A 0xe5000 +#define CPT_AUD_CONFIG_B 0xe5100 +#define CPT_AUD_CFG(pipe) _PIPE(pipe, \ + CPT_AUD_CONFIG_A, \ + CPT_AUD_CONFIG_B) +#define AUD_CONFIG_N_VALUE_INDEX (1 << 29) +#define AUD_CONFIG_N_PROG_ENABLE (1 << 28) +#define AUD_CONFIG_UPPER_N_SHIFT 20 +#define AUD_CONFIG_UPPER_N_VALUE (0xff << 20) +#define AUD_CONFIG_LOWER_N_SHIFT 4 +#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4) +#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16 +#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16) +#define AUD_CONFIG_DISABLE_NCTS (1 << 3) + +/* HSW Audio */ +#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */ +#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */ +#define HSW_AUD_CFG(pipe) _PIPE(pipe, \ + HSW_AUD_CONFIG_A, \ + HSW_AUD_CONFIG_B) + +#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */ +#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */ +#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \ + HSW_AUD_MISC_CTRL_A, \ + HSW_AUD_MISC_CTRL_B) + +#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */ +#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */ +#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \ + HSW_AUD_DIP_ELD_CTRL_ST_A, \ + HSW_AUD_DIP_ELD_CTRL_ST_B) + +/* Audio Digital Converter */ +#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */ +#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */ +#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \ + HSW_AUD_DIG_CNVT_1, \ + HSW_AUD_DIG_CNVT_2) +#define DIP_PORT_SEL_MASK 0x3 + +#define HSW_AUD_EDID_DATA_A 0x65050 +#define HSW_AUD_EDID_DATA_B 0x65150 +#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \ + HSW_AUD_EDID_DATA_A, \ + HSW_AUD_EDID_DATA_B) + +#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */ +#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */ +#define AUDIO_INACTIVE_C (1<<11) +#define AUDIO_INACTIVE_B (1<<7) +#define AUDIO_INACTIVE_A (1<<3) +#define AUDIO_OUTPUT_ENABLE_A (1<<2) +#define AUDIO_OUTPUT_ENABLE_B (1<<6) +#define AUDIO_OUTPUT_ENABLE_C (1<<10) +#define AUDIO_ELD_VALID_A (1<<0) +#define AUDIO_ELD_VALID_B (1<<4) +#define AUDIO_ELD_VALID_C (1<<8) +#define AUDIO_CP_READY_A (1<<1) +#define AUDIO_CP_READY_B (1<<5) +#define AUDIO_CP_READY_C (1<<9) + +/* HSW Power Wells */ +#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */ +#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */ +#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */ +#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */ +#define HSW_PWR_WELL_ENABLE (1<<31) +#define HSW_PWR_WELL_STATE (1<<30) +#define HSW_PWR_WELL_CTL5 0x45410 +#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) +#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) +#define HSW_PWR_WELL_FORCE_ON (1<<19) +#define HSW_PWR_WELL_CTL6 0x45414 + +/* Per-pipe DDI Function Control */ +#define TRANS_DDI_FUNC_CTL_A 0x60400 +#define TRANS_DDI_FUNC_CTL_B 0x61400 +#define TRANS_DDI_FUNC_CTL_C 0x62400 +#define TRANS_DDI_FUNC_CTL_EDP 0x6F400 +#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \ + TRANS_DDI_FUNC_CTL_B) +#define TRANS_DDI_FUNC_ENABLE (1<<31) +/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ +#define TRANS_DDI_PORT_MASK (7<<28) +#define TRANS_DDI_SELECT_PORT(x) ((x)<<28) +#define TRANS_DDI_PORT_NONE (0<<28) +#define TRANS_DDI_MODE_SELECT_MASK (7<<24) +#define TRANS_DDI_MODE_SELECT_HDMI (0<<24) +#define TRANS_DDI_MODE_SELECT_DVI (1<<24) +#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24) +#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24) +#define TRANS_DDI_MODE_SELECT_FDI (4<<24) +#define TRANS_DDI_BPC_MASK (7<<20) +#define TRANS_DDI_BPC_8 (0<<20) +#define TRANS_DDI_BPC_10 (1<<20) +#define TRANS_DDI_BPC_6 (2<<20) +#define TRANS_DDI_BPC_12 (3<<20) +#define TRANS_DDI_PVSYNC (1<<17) +#define TRANS_DDI_PHSYNC (1<<16) +#define TRANS_DDI_EDP_INPUT_MASK (7<<12) +#define TRANS_DDI_EDP_INPUT_A_ON (0<<12) +#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12) +#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12) +#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12) +#define TRANS_DDI_BFI_ENABLE (1<<4) +#define TRANS_DDI_PORT_WIDTH_X1 (0<<1) +#define TRANS_DDI_PORT_WIDTH_X2 (1<<1) +#define TRANS_DDI_PORT_WIDTH_X4 (3<<1) + +/* DisplayPort Transport Control */ +#define DP_TP_CTL_A 0x64040 +#define DP_TP_CTL_B 0x64140 +#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B) +#define DP_TP_CTL_ENABLE (1<<31) +#define DP_TP_CTL_MODE_SST (0<<27) +#define DP_TP_CTL_MODE_MST (1<<27) +#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18) +#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15) +#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) +#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) +#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) +#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8) +#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8) +#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) +#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7) + +/* DisplayPort Transport Status */ +#define DP_TP_STATUS_A 0x64044 +#define DP_TP_STATUS_B 0x64144 +#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) +#define DP_TP_STATUS_IDLE_DONE (1<<25) +#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) + +/* DDI Buffer Control */ +#define DDI_BUF_CTL_A 0x64000 +#define DDI_BUF_CTL_B 0x64100 +#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) +#define DDI_BUF_CTL_ENABLE (1<<31) +#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ +#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ +#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ +#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */ +#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */ +#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */ +#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ +#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ +#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ +#define DDI_BUF_EMP_MASK (0xf<<24) +#define DDI_BUF_PORT_REVERSAL (1<<16) +#define DDI_BUF_IS_IDLE (1<<7) +#define DDI_A_4_LANES (1<<4) +#define DDI_PORT_WIDTH_X1 (0<<1) +#define DDI_PORT_WIDTH_X2 (1<<1) +#define DDI_PORT_WIDTH_X4 (3<<1) +#define DDI_INIT_DISPLAY_DETECTED (1<<0) + +/* DDI Buffer Translations */ +#define DDI_BUF_TRANS_A 0x64E00 +#define DDI_BUF_TRANS_B 0x64E60 +#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + +/* Sideband Interface (SBI) is programmed indirectly, via + * SBI_ADDR, which contains the register offset; and SBI_DATA, + * which contains the payload */ +#define SBI_ADDR 0xC6000 +#define SBI_DATA 0xC6004 +#define SBI_CTL_STAT 0xC6008 +#define SBI_CTL_DEST_ICLK (0x0<<16) +#define SBI_CTL_DEST_MPHY (0x1<<16) +#define SBI_CTL_OP_IORD (0x2<<8) +#define SBI_CTL_OP_IOWR (0x3<<8) +#define SBI_CTL_OP_CRRD (0x6<<8) +#define SBI_CTL_OP_CRWR (0x7<<8) +#define SBI_RESPONSE_FAIL (0x1<<1) +#define SBI_RESPONSE_SUCCESS (0x0<<1) +#define SBI_BUSY (0x1<<0) +#define SBI_READY (0x0<<0) + +/* SBI offsets */ +#define SBI_SSCDIVINTPHASE6 0x0600 +#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1) +#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1) +#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8) +#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8) +#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15) +#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) +#define SBI_SSCCTL 0x020c +#define SBI_SSCCTL6 0x060C +#define SBI_SSCCTL_PATHALT (1<<3) +#define SBI_SSCCTL_DISABLE (1<<0) +#define SBI_SSCAUXDIV6 0x0610 +#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) +#define SBI_DBUFF0 0x2a00 +#define SBI_DBUFF0_ENABLE (1<<0) + +/* LPT PIXCLK_GATE */ +#define PIXCLK_GATE 0xC6020 +#define PIXCLK_GATE_UNGATE (1<<0) +#define PIXCLK_GATE_GATE (0<<0) + +/* SPLL */ +#define SPLL_CTL 0x46020 +#define SPLL_PLL_ENABLE (1<<31) +#define SPLL_PLL_SSC (1<<28) +#define SPLL_PLL_NON_SSC (2<<28) +#define SPLL_PLL_FREQ_810MHz (0<<26) +#define SPLL_PLL_FREQ_1350MHz (1<<26) + +/* WRPLL */ +#define WRPLL_CTL1 0x46040 +#define WRPLL_CTL2 0x46060 +#define WRPLL_PLL_ENABLE (1<<31) +#define WRPLL_PLL_SELECT_SSC (0x01<<28) +#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28) +#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) +/* WRPLL divider programming */ +#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) +#define WRPLL_DIVIDER_POST(x) ((x)<<8) +#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16) + +/* Port clock selection */ +#define PORT_CLK_SEL_A 0x46100 +#define PORT_CLK_SEL_B 0x46104 +#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B) +#define PORT_CLK_SEL_LCPLL_2700 (0<<29) +#define PORT_CLK_SEL_LCPLL_1350 (1<<29) +#define PORT_CLK_SEL_LCPLL_810 (2<<29) +#define PORT_CLK_SEL_SPLL (3<<29) +#define PORT_CLK_SEL_WRPLL1 (4<<29) +#define PORT_CLK_SEL_WRPLL2 (5<<29) +#define PORT_CLK_SEL_NONE (7<<29) + +/* Transcoder clock selection */ +#define TRANS_CLK_SEL_A 0x46140 +#define TRANS_CLK_SEL_B 0x46144 +#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B) +/* For each transcoder, we need to select the corresponding port clock */ +#define TRANS_CLK_SEL_DISABLED (0x0<<29) +#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29) + +#define _TRANSA_MSA_MISC 0x60410 +#define _TRANSB_MSA_MISC 0x61410 +#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \ + _TRANSB_MSA_MISC) +#define TRANS_MSA_SYNC_CLK (1<<0) +#define TRANS_MSA_6_BPC (0<<5) +#define TRANS_MSA_8_BPC (1<<5) +#define TRANS_MSA_10_BPC (2<<5) +#define TRANS_MSA_12_BPC (3<<5) +#define TRANS_MSA_16_BPC (4<<5) + +/* LCPLL Control */ +#define LCPLL_CTL 0x130040 +#define LCPLL_PLL_DISABLE (1<<31) +#define LCPLL_PLL_LOCK (1<<30) +#define LCPLL_CLK_FREQ_MASK (3<<26) +#define LCPLL_CLK_FREQ_450 (0<<26) +#define LCPLL_CD_CLOCK_DISABLE (1<<25) +#define LCPLL_CD2X_CLOCK_DISABLE (1<<23) +#define LCPLL_CD_SOURCE_FCLK (1<<21) + +/* Pipe WM_LINETIME - watermark line time */ +#define PIPE_WM_LINETIME_A 0x45270 +#define PIPE_WM_LINETIME_B 0x45274 +#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \ + PIPE_WM_LINETIME_B) +#define PIPE_WM_LINETIME_MASK (0x1ff) +#define PIPE_WM_LINETIME_TIME(x) ((x)) +#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16) +#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16) + +/* SFUSE_STRAP */ +#define SFUSE_STRAP 0xc2014 +#define SFUSE_STRAP_DDIB_DETECTED (1<<2) +#define SFUSE_STRAP_DDIC_DETECTED (1<<1) +#define SFUSE_STRAP_DDID_DETECTED (1<<0) + +#define WM_DBG 0x45280 +#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0) +#define WM_DBG_DISALLOW_MAXFIFO (1<<1) +#define WM_DBG_DISALLOW_SPRITE (1<<2) + +/* pipe CSC */ +#define _PIPE_A_CSC_COEFF_RY_GY 0x49010 +#define _PIPE_A_CSC_COEFF_BY 0x49014 +#define _PIPE_A_CSC_COEFF_RU_GU 0x49018 +#define _PIPE_A_CSC_COEFF_BU 0x4901c +#define _PIPE_A_CSC_COEFF_RV_GV 0x49020 +#define _PIPE_A_CSC_COEFF_BV 0x49024 +#define _PIPE_A_CSC_MODE 0x49028 +#define _PIPE_A_CSC_PREOFF_HI 0x49030 +#define _PIPE_A_CSC_PREOFF_ME 0x49034 +#define _PIPE_A_CSC_PREOFF_LO 0x49038 +#define _PIPE_A_CSC_POSTOFF_HI 0x49040 +#define _PIPE_A_CSC_POSTOFF_ME 0x49044 +#define _PIPE_A_CSC_POSTOFF_LO 0x49048 + +#define _PIPE_B_CSC_COEFF_RY_GY 0x49110 +#define _PIPE_B_CSC_COEFF_BY 0x49114 +#define _PIPE_B_CSC_COEFF_RU_GU 0x49118 +#define _PIPE_B_CSC_COEFF_BU 0x4911c +#define _PIPE_B_CSC_COEFF_RV_GV 0x49120 +#define _PIPE_B_CSC_COEFF_BV 0x49124 +#define _PIPE_B_CSC_MODE 0x49128 +#define _PIPE_B_CSC_PREOFF_HI 0x49130 +#define _PIPE_B_CSC_PREOFF_ME 0x49134 +#define _PIPE_B_CSC_PREOFF_LO 0x49138 +#define _PIPE_B_CSC_POSTOFF_HI 0x49140 +#define _PIPE_B_CSC_POSTOFF_ME 0x49144 +#define _PIPE_B_CSC_POSTOFF_LO 0x49148 + +#define CSC_BLACK_SCREEN_OFFSET (1 << 2) +#define CSC_POSITION_BEFORE_GAMMA (1 << 1) +#define CSC_MODE_YUV_TO_RGB (1 << 0) + +#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY) +#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY) +#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU) +#define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU) +#define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV) +#define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV) +#define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE) +#define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI) +#define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME) +#define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO) +#define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI) +#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) +#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) + +#endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c new file mode 100644 index 0000000..369b3d8 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -0,0 +1,424 @@ +/* + * + * Copyright 2008 (c) Intel Corporation + * Jesse Barnes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include "intel_drv.h" +#include "i915_reg.h" + +static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE8(index_port, reg); + return I915_READ8(data_port); +} + +static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_READ8(st01); + I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); + return I915_READ8(VGA_AR_DATA_READ); +} + +static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_READ8(st01); + I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); + I915_WRITE8(VGA_AR_DATA_WRITE, val); +} + +static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE8(index_port, reg); + I915_WRITE8(data_port, val); +} + +static void i915_save_vga(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + u16 cr_index, cr_data, st01; + + /* VGA state */ + dev_priv->regfile.saveVGA0 = I915_READ(VGA0); + dev_priv->regfile.saveVGA1 = I915_READ(VGA1); + dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD); + dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev)); + + /* VGA color palette registers */ + dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK); + + /* MSR bits */ + dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ); + if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) { + cr_index = VGA_CR_INDEX_CGA; + cr_data = VGA_CR_DATA_CGA; + st01 = VGA_ST01_CGA; + } else { + cr_index = VGA_CR_INDEX_MDA; + cr_data = VGA_CR_DATA_MDA; + st01 = VGA_ST01_MDA; + } + + /* CRT controller regs */ + i915_write_indexed(dev, cr_index, cr_data, 0x11, + i915_read_indexed(dev, cr_index, cr_data, 0x11) & + (~0x80)); + for (i = 0; i <= 0x24; i++) + dev_priv->regfile.saveCR[i] = + i915_read_indexed(dev, cr_index, cr_data, i); + /* Make sure we don't turn off CR group 0 writes */ + dev_priv->regfile.saveCR[0x11] &= ~0x80; + + /* Attribute controller registers */ + I915_READ8(st01); + dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX); + for (i = 0; i <= 0x14; i++) + dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0); + I915_READ8(st01); + I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX); + I915_READ8(st01); + + /* Graphics controller registers */ + for (i = 0; i < 9; i++) + dev_priv->regfile.saveGR[i] = + i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i); + + dev_priv->regfile.saveGR[0x10] = + i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10); + dev_priv->regfile.saveGR[0x11] = + i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11); + dev_priv->regfile.saveGR[0x18] = + i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18); + + /* Sequencer registers */ + for (i = 0; i < 8; i++) + dev_priv->regfile.saveSR[i] = + i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i); +} + +static void i915_restore_vga(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + u16 cr_index, cr_data, st01; + + /* VGA state */ + I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL); + + I915_WRITE(VGA0, dev_priv->regfile.saveVGA0); + I915_WRITE(VGA1, dev_priv->regfile.saveVGA1); + I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD); + POSTING_READ(VGA_PD); + udelay(150); + + /* MSR bits */ + I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR); + if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) { + cr_index = VGA_CR_INDEX_CGA; + cr_data = VGA_CR_DATA_CGA; + st01 = VGA_ST01_CGA; + } else { + cr_index = VGA_CR_INDEX_MDA; + cr_data = VGA_CR_DATA_MDA; + st01 = VGA_ST01_MDA; + } + + /* Sequencer registers, don't write SR07 */ + for (i = 0; i < 7; i++) + i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i, + dev_priv->regfile.saveSR[i]); + + /* CRT controller regs */ + /* Enable CR group 0 writes */ + i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]); + for (i = 0; i <= 0x24; i++) + i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]); + + /* Graphics controller regs */ + for (i = 0; i < 9; i++) + i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i, + dev_priv->regfile.saveGR[i]); + + i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10, + dev_priv->regfile.saveGR[0x10]); + i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11, + dev_priv->regfile.saveGR[0x11]); + i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18, + dev_priv->regfile.saveGR[0x18]); + + /* Attribute controller registers */ + I915_READ8(st01); /* switch back to index mode */ + for (i = 0; i <= 0x14; i++) + i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0); + I915_READ8(st01); /* switch back to index mode */ + I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20); + I915_READ8(st01); + + /* VGA color palette registers */ + I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK); +} + +static void i915_save_display(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Display arbitration control */ + if (INTEL_INFO(dev)->gen <= 4) + dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); + + /* This is only meaningful in non-KMS mode */ + /* Don't regfile.save them in KMS mode */ + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + i915_save_display_reg(dev); + + /* LVDS state */ + if (HAS_PCH_SPLIT(dev)) { + dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL); + dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); + dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); + dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); + dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) + dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); + } else { + dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); + dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); + dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); + dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); + if (INTEL_INFO(dev)->gen >= 4) + dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); + if (IS_MOBILE(dev) && !IS_I830(dev)) + dev_priv->regfile.saveLVDS = I915_READ(LVDS); + } + + if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) + dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL); + + if (HAS_PCH_SPLIT(dev)) { + dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); + dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); + dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); + } else { + dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); + dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); + dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); + } + + /* Only regfile.save FBC state on the platform that supports FBC */ + if (I915_HAS_FBC(dev)) { + if (HAS_PCH_SPLIT(dev)) { + dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); + } else if (IS_GM45(dev)) { + dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); + } else { + dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); + dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); + dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); + dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); + } + } + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + i915_save_vga(dev); +} + +static void i915_restore_display(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 mask = 0xffffffff; + + /* Display arbitration */ + if (INTEL_INFO(dev)->gen <= 4) + I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + i915_restore_display_reg(dev); + + /* LVDS state */ + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) + I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + mask = ~LVDS_PORT_EN; + + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) + I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask); + else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev)) + I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask); + + if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) + I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL); + + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL); + I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); + /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2; + * otherwise we get blank eDP screen after S3 on some machines + */ + I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2); + I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL); + I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); + I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); + I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); + I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); + I915_WRITE(RSTDBYCTL, + dev_priv->regfile.saveMCHBAR_RENDER_STANDBY); + } else { + I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS); + I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL); + I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL); + I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); + I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); + I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); + I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL); + } + + /* only restore FBC info on the platform that supports FBC*/ + intel_disable_fbc(dev); + if (I915_HAS_FBC(dev)) { + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); + } else if (IS_GM45(dev)) { + I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); + } else { + I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE); + I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE); + I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2); + I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); + } + } + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + i915_restore_vga(dev); + else + i915_redisable_vga(dev); +} + +int i915_save_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB); + + mutex_lock(&dev->struct_mutex); + + i915_save_display(dev); + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + /* Interrupt state */ + if (HAS_PCH_SPLIT(dev)) { + dev_priv->regfile.saveDEIER = I915_READ(DEIER); + dev_priv->regfile.saveDEIMR = I915_READ(DEIMR); + dev_priv->regfile.saveGTIER = I915_READ(GTIER); + dev_priv->regfile.saveGTIMR = I915_READ(GTIMR); + dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); + dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); + dev_priv->regfile.saveMCHBAR_RENDER_STANDBY = + I915_READ(RSTDBYCTL); + dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); + } else { + dev_priv->regfile.saveIER = I915_READ(IER); + dev_priv->regfile.saveIMR = I915_READ(IMR); + } + } + + intel_disable_gt_powersave(dev); + + /* Cache mode state */ + dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); + + /* Memory Arbitration state */ + dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); + + /* Scratch space */ + for (i = 0; i < 16; i++) { + dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2)); + dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2)); + } + for (i = 0; i < 3; i++) + dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2)); + + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +int i915_restore_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB); + + mutex_lock(&dev->struct_mutex); + + i915_gem_restore_fences(dev); + i915_restore_display(dev); + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + /* Interrupt state */ + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(DEIER, dev_priv->regfile.saveDEIER); + I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR); + I915_WRITE(GTIER, dev_priv->regfile.saveGTIER); + I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR); + I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR); + I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR); + I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG); + } else { + I915_WRITE(IER, dev_priv->regfile.saveIER); + I915_WRITE(IMR, dev_priv->regfile.saveIMR); + } + } + + /* Cache mode state */ + I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000); + + /* Memory arbitration state */ + I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); + + for (i = 0; i < 16; i++) { + I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]); + I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]); + } + for (i = 0; i < 3; i++) + I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]); + + mutex_unlock(&dev->struct_mutex); + + intel_i2c_reset(dev); + + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c new file mode 100644 index 0000000..d5e1890 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -0,0 +1,408 @@ +/* + * Copyright © 2012 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Ben Widawsky + * + */ + +#include +#include +#include +#include +#include "intel_drv.h" +#include "i915_drv.h" + +#ifdef CONFIG_PM +static u32 calc_residency(struct drm_device *dev, const u32 reg) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u64 raw_time; /* 32b value may overflow during fixed point math */ + + if (!intel_enable_rc6(dev)) + return 0; + + raw_time = I915_READ(reg) * 128ULL; + return DIV_ROUND_UP_ULL(raw_time, 100000); +} + +static ssize_t +show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); + return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev)); +} + +static ssize_t +show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); + u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6); + return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency); +} + +static ssize_t +show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); + u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); + return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency); +} + +static ssize_t +show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); + u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); + return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); +} + +static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL); +static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL); +static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL); +static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL); + +static struct attribute *rc6_attrs[] = { + &dev_attr_rc6_enable.attr, + &dev_attr_rc6_residency_ms.attr, + &dev_attr_rc6p_residency_ms.attr, + &dev_attr_rc6pp_residency_ms.attr, + NULL +}; + +static struct attribute_group rc6_attr_group = { + .name = power_group_name, + .attrs = rc6_attrs +}; +#endif + +static int l3_access_valid(struct drm_device *dev, loff_t offset) +{ + if (!HAS_L3_GPU_CACHE(dev)) + return -EPERM; + + if (offset % 4 != 0) + return -EINVAL; + + if (offset >= GEN7_L3LOG_SIZE) + return -ENXIO; + + return 0; +} + +static ssize_t +i915_l3_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t offset, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); + struct drm_device *drm_dev = dminor->dev; + struct drm_i915_private *dev_priv = drm_dev->dev_private; + uint32_t misccpctl; + int i, ret; + + ret = l3_access_valid(drm_dev, offset); + if (ret) + return ret; + + ret = i915_mutex_lock_interruptible(drm_dev); + if (ret) + return ret; + + misccpctl = I915_READ(GEN7_MISCCPCTL); + I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); + + for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4) + *((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i); + + I915_WRITE(GEN7_MISCCPCTL, misccpctl); + + mutex_unlock(&drm_dev->struct_mutex); + + return i - offset; +} + +static ssize_t +i915_l3_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t offset, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); + struct drm_device *drm_dev = dminor->dev; + struct drm_i915_private *dev_priv = drm_dev->dev_private; + u32 *temp = NULL; /* Just here to make handling failures easy */ + int ret; + + ret = l3_access_valid(drm_dev, offset); + if (ret) + return ret; + + ret = i915_mutex_lock_interruptible(drm_dev); + if (ret) + return ret; + + if (!dev_priv->l3_parity.remap_info) { + temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); + if (!temp) { + mutex_unlock(&drm_dev->struct_mutex); + return -ENOMEM; + } + } + + ret = i915_gpu_idle(drm_dev); + if (ret) { + kfree(temp); + mutex_unlock(&drm_dev->struct_mutex); + return ret; + } + + /* TODO: Ideally we really want a GPU reset here to make sure errors + * aren't propagated. Since I cannot find a stable way to reset the GPU + * at this point it is left as a TODO. + */ + if (temp) + dev_priv->l3_parity.remap_info = temp; + + memcpy(dev_priv->l3_parity.remap_info + (offset/4), + buf + (offset/4), + count); + + i915_gem_l3_remap(drm_dev); + + mutex_unlock(&drm_dev->struct_mutex); + + return count; +} + +static struct bin_attribute dpf_attrs = { + .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)}, + .size = GEN7_L3LOG_SIZE, + .read = i915_l3_read, + .write = i915_l3_write, + .mmap = NULL +}; + +static ssize_t gt_cur_freq_mhz_show(struct device *kdev, + struct device_attribute *attr, char *buf) +{ + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + mutex_lock(&dev_priv->rps.hw_lock); + ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; + mutex_unlock(&dev_priv->rps.hw_lock); + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + mutex_lock(&dev_priv->rps.hw_lock); + ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; + mutex_unlock(&dev_priv->rps.hw_lock); + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t gt_max_freq_mhz_store(struct device *kdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val, rp_state_cap, hw_max, hw_min, non_oc_max; + ssize_t ret; + + ret = kstrtou32(buf, 0, &val); + if (ret) + return ret; + + val /= GT_FREQUENCY_MULTIPLIER; + + mutex_lock(&dev_priv->rps.hw_lock); + + rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + hw_max = dev_priv->rps.hw_max; + non_oc_max = (rp_state_cap & 0xff); + hw_min = ((rp_state_cap & 0xff0000) >> 16); + + if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { + mutex_unlock(&dev_priv->rps.hw_lock); + return -EINVAL; + } + + if (val > non_oc_max) + DRM_DEBUG("User requested overclocking to %d\n", + val * GT_FREQUENCY_MULTIPLIER); + + if (dev_priv->rps.cur_delay > val) + gen6_set_rps(dev_priv->dev, val); + + dev_priv->rps.max_delay = val; + + mutex_unlock(&dev_priv->rps.hw_lock); + + return count; +} + +static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + mutex_lock(&dev_priv->rps.hw_lock); + ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; + mutex_unlock(&dev_priv->rps.hw_lock); + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t gt_min_freq_mhz_store(struct device *kdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val, rp_state_cap, hw_max, hw_min; + ssize_t ret; + + ret = kstrtou32(buf, 0, &val); + if (ret) + return ret; + + val /= GT_FREQUENCY_MULTIPLIER; + + mutex_lock(&dev_priv->rps.hw_lock); + + rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + hw_max = dev_priv->rps.hw_max; + hw_min = ((rp_state_cap & 0xff0000) >> 16); + + if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { + mutex_unlock(&dev_priv->rps.hw_lock); + return -EINVAL; + } + + if (dev_priv->rps.cur_delay < val) + gen6_set_rps(dev_priv->dev, val); + + dev_priv->rps.min_delay = val; + + mutex_unlock(&dev_priv->rps.hw_lock); + + return count; + +} + +static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL); +static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); +static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); + + +static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); +static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); +static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); +static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); + +/* For now we have a static number of RP states */ +static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val, rp_state_cap; + ssize_t ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + mutex_unlock(&dev->struct_mutex); + + if (attr == &dev_attr_gt_RP0_freq_mhz) { + val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER; + } else if (attr == &dev_attr_gt_RP1_freq_mhz) { + val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER; + } else if (attr == &dev_attr_gt_RPn_freq_mhz) { + val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER; + } else { + BUG(); + } + return snprintf(buf, PAGE_SIZE, "%d\n", val); +} + +static const struct attribute *gen6_attrs[] = { + &dev_attr_gt_cur_freq_mhz.attr, + &dev_attr_gt_max_freq_mhz.attr, + &dev_attr_gt_min_freq_mhz.attr, + &dev_attr_gt_RP0_freq_mhz.attr, + &dev_attr_gt_RP1_freq_mhz.attr, + &dev_attr_gt_RPn_freq_mhz.attr, + NULL, +}; + +void i915_setup_sysfs(struct drm_device *dev) +{ + int ret; + +#ifdef CONFIG_PM + if (INTEL_INFO(dev)->gen >= 6) { + ret = sysfs_merge_group(&dev->primary->kdev.kobj, + &rc6_attr_group); + if (ret) + DRM_ERROR("RC6 residency sysfs setup failed\n"); + } +#endif + if (HAS_L3_GPU_CACHE(dev)) { + ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); + if (ret) + DRM_ERROR("l3 parity sysfs setup failed\n"); + } + + if (INTEL_INFO(dev)->gen >= 6) { + ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs); + if (ret) + DRM_ERROR("gen6 sysfs setup failed\n"); + } +} + +void i915_teardown_sysfs(struct drm_device *dev) +{ + sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); + device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); +#ifdef CONFIG_PM + sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); +#endif +} diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h new file mode 100644 index 0000000..3db4a68 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -0,0 +1,455 @@ +#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _I915_TRACE_H_ + +#include +#include +#include + +#include +#include "i915_drv.h" +#include "intel_ringbuffer.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM i915 +#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM) +#define TRACE_INCLUDE_FILE i915_trace + +/* object tracking */ + +TRACE_EVENT(i915_gem_object_create, + TP_PROTO(struct drm_i915_gem_object *obj), + TP_ARGS(obj), + + TP_STRUCT__entry( + __field(struct drm_i915_gem_object *, obj) + __field(u32, size) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->size = obj->base.size; + ), + + TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) +); + +TRACE_EVENT(i915_gem_object_bind, + TP_PROTO(struct drm_i915_gem_object *obj, bool mappable), + TP_ARGS(obj, mappable), + + TP_STRUCT__entry( + __field(struct drm_i915_gem_object *, obj) + __field(u32, offset) + __field(u32, size) + __field(bool, mappable) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->offset = obj->gtt_space->start; + __entry->size = obj->gtt_space->size; + __entry->mappable = mappable; + ), + + TP_printk("obj=%p, offset=%08x size=%x%s", + __entry->obj, __entry->offset, __entry->size, + __entry->mappable ? ", mappable" : "") +); + +TRACE_EVENT(i915_gem_object_unbind, + TP_PROTO(struct drm_i915_gem_object *obj), + TP_ARGS(obj), + + TP_STRUCT__entry( + __field(struct drm_i915_gem_object *, obj) + __field(u32, offset) + __field(u32, size) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->offset = obj->gtt_space->start; + __entry->size = obj->gtt_space->size; + ), + + TP_printk("obj=%p, offset=%08x size=%x", + __entry->obj, __entry->offset, __entry->size) +); + +TRACE_EVENT(i915_gem_object_change_domain, + TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write), + TP_ARGS(obj, old_read, old_write), + + TP_STRUCT__entry( + __field(struct drm_i915_gem_object *, obj) + __field(u32, read_domains) + __field(u32, write_domain) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->read_domains = obj->base.read_domains | (old_read << 16); + __entry->write_domain = obj->base.write_domain | (old_write << 16); + ), + + TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x", + __entry->obj, + __entry->read_domains >> 16, + __entry->read_domains & 0xffff, + __entry->write_domain >> 16, + __entry->write_domain & 0xffff) +); + +TRACE_EVENT(i915_gem_object_pwrite, + TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), + TP_ARGS(obj, offset, len), + + TP_STRUCT__entry( + __field(struct drm_i915_gem_object *, obj) + __field(u32, offset) + __field(u32, len) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->offset = offset; + __entry->len = len; + ), + + TP_printk("obj=%p, offset=%u, len=%u", + __entry->obj, __entry->offset, __entry->len) +); + +TRACE_EVENT(i915_gem_object_pread, + TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), + TP_ARGS(obj, offset, len), + + TP_STRUCT__entry( + __field(struct drm_i915_gem_object *, obj) + __field(u32, offset) + __field(u32, len) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->offset = offset; + __entry->len = len; + ), + + TP_printk("obj=%p, offset=%u, len=%u", + __entry->obj, __entry->offset, __entry->len) +); + +TRACE_EVENT(i915_gem_object_fault, + TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write), + TP_ARGS(obj, index, gtt, write), + + TP_STRUCT__entry( + __field(struct drm_i915_gem_object *, obj) + __field(u32, index) + __field(bool, gtt) + __field(bool, write) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->index = index; + __entry->gtt = gtt; + __entry->write = write; + ), + + TP_printk("obj=%p, %s index=%u %s", + __entry->obj, + __entry->gtt ? "GTT" : "CPU", + __entry->index, + __entry->write ? ", writable" : "") +); + +DECLARE_EVENT_CLASS(i915_gem_object, + TP_PROTO(struct drm_i915_gem_object *obj), + TP_ARGS(obj), + + TP_STRUCT__entry( + __field(struct drm_i915_gem_object *, obj) + ), + + TP_fast_assign( + __entry->obj = obj; + ), + + TP_printk("obj=%p", __entry->obj) +); + +DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, + TP_PROTO(struct drm_i915_gem_object *obj), + TP_ARGS(obj) +); + +DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, + TP_PROTO(struct drm_i915_gem_object *obj), + TP_ARGS(obj) +); + +TRACE_EVENT(i915_gem_evict, + TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable), + TP_ARGS(dev, size, align, mappable), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, size) + __field(u32, align) + __field(bool, mappable) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->size = size; + __entry->align = align; + __entry->mappable = mappable; + ), + + TP_printk("dev=%d, size=%d, align=%d %s", + __entry->dev, __entry->size, __entry->align, + __entry->mappable ? ", mappable" : "") +); + +TRACE_EVENT(i915_gem_evict_everything, + TP_PROTO(struct drm_device *dev), + TP_ARGS(dev), + + TP_STRUCT__entry( + __field(u32, dev) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + ), + + TP_printk("dev=%d", __entry->dev) +); + +TRACE_EVENT(i915_gem_ring_dispatch, + TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags), + TP_ARGS(ring, seqno, flags), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, ring) + __field(u32, seqno) + __field(u32, flags) + ), + + TP_fast_assign( + __entry->dev = ring->dev->primary->index; + __entry->ring = ring->id; + __entry->seqno = seqno; + __entry->flags = flags; + i915_trace_irq_get(ring, seqno); + ), + + TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", + __entry->dev, __entry->ring, __entry->seqno, __entry->flags) +); + +TRACE_EVENT(i915_gem_ring_flush, + TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush), + TP_ARGS(ring, invalidate, flush), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, ring) + __field(u32, invalidate) + __field(u32, flush) + ), + + TP_fast_assign( + __entry->dev = ring->dev->primary->index; + __entry->ring = ring->id; + __entry->invalidate = invalidate; + __entry->flush = flush; + ), + + TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x", + __entry->dev, __entry->ring, + __entry->invalidate, __entry->flush) +); + +DECLARE_EVENT_CLASS(i915_gem_request, + TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_ARGS(ring, seqno), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, ring) + __field(u32, seqno) + ), + + TP_fast_assign( + __entry->dev = ring->dev->primary->index; + __entry->ring = ring->id; + __entry->seqno = seqno; + ), + + TP_printk("dev=%u, ring=%u, seqno=%u", + __entry->dev, __entry->ring, __entry->seqno) +); + +DEFINE_EVENT(i915_gem_request, i915_gem_request_add, + TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_ARGS(ring, seqno) +); + +DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, + TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_ARGS(ring, seqno) +); + +DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, + TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_ARGS(ring, seqno) +); + +TRACE_EVENT(i915_gem_request_wait_begin, + TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_ARGS(ring, seqno), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, ring) + __field(u32, seqno) + __field(bool, blocking) + ), + + /* NB: the blocking information is racy since mutex_is_locked + * doesn't check that the current thread holds the lock. The only + * other option would be to pass the boolean information of whether + * or not the class was blocking down through the stack which is + * less desirable. + */ + TP_fast_assign( + __entry->dev = ring->dev->primary->index; + __entry->ring = ring->id; + __entry->seqno = seqno; + __entry->blocking = mutex_is_locked(&ring->dev->struct_mutex); + ), + + TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", + __entry->dev, __entry->ring, __entry->seqno, + __entry->blocking ? "yes (NB)" : "no") +); + +DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, + TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_ARGS(ring, seqno) +); + +DECLARE_EVENT_CLASS(i915_ring, + TP_PROTO(struct intel_ring_buffer *ring), + TP_ARGS(ring), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, ring) + ), + + TP_fast_assign( + __entry->dev = ring->dev->primary->index; + __entry->ring = ring->id; + ), + + TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring) +); + +DEFINE_EVENT(i915_ring, i915_ring_wait_begin, + TP_PROTO(struct intel_ring_buffer *ring), + TP_ARGS(ring) +); + +DEFINE_EVENT(i915_ring, i915_ring_wait_end, + TP_PROTO(struct intel_ring_buffer *ring), + TP_ARGS(ring) +); + +TRACE_EVENT(i915_flip_request, + TP_PROTO(int plane, struct drm_i915_gem_object *obj), + + TP_ARGS(plane, obj), + + TP_STRUCT__entry( + __field(int, plane) + __field(struct drm_i915_gem_object *, obj) + ), + + TP_fast_assign( + __entry->plane = plane; + __entry->obj = obj; + ), + + TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) +); + +TRACE_EVENT(i915_flip_complete, + TP_PROTO(int plane, struct drm_i915_gem_object *obj), + + TP_ARGS(plane, obj), + + TP_STRUCT__entry( + __field(int, plane) + __field(struct drm_i915_gem_object *, obj) + ), + + TP_fast_assign( + __entry->plane = plane; + __entry->obj = obj; + ), + + TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) +); + +TRACE_EVENT(i915_reg_rw, + TP_PROTO(bool write, u32 reg, u64 val, int len), + + TP_ARGS(write, reg, val, len), + + TP_STRUCT__entry( + __field(u64, val) + __field(u32, reg) + __field(u16, write) + __field(u16, len) + ), + + TP_fast_assign( + __entry->val = (u64)val; + __entry->reg = reg; + __entry->write = write; + __entry->len = len; + ), + + TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)", + __entry->write ? "write" : "read", + __entry->reg, __entry->len, + (u32)(__entry->val & 0xffffffff), + (u32)(__entry->val >> 32)) +); + +TRACE_EVENT(intel_gpu_freq_change, + TP_PROTO(u32 freq), + TP_ARGS(freq), + + TP_STRUCT__entry( + __field(u32, freq) + ), + + TP_fast_assign( + __entry->freq = freq; + ), + + TP_printk("new_freq=%u", __entry->freq) +); + +#endif /* _I915_TRACE_H_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c new file mode 100644 index 0000000..f1df2bd --- /dev/null +++ b/drivers/gpu/drm/i915/i915_trace_points.c @@ -0,0 +1,13 @@ +/* + * Copyright © 2009 Intel Corporation + * + * Authors: + * Chris Wilson + */ + +#include "i915_drv.h" + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "i915_trace.h" +#endif diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c new file mode 100644 index 0000000..985a097 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_ums.c @@ -0,0 +1,503 @@ +/* + * + * Copyright 2008 (c) Intel Corporation + * Jesse Barnes + * Copyright 2013 (c) Intel Corporation + * Daniel Vetter + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include "intel_drv.h" +#include "i915_reg.h" + +static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpll_reg; + + /* On IVB, 3rd pipe shares PLL with another one */ + if (pipe > 1) + return false; + + if (HAS_PCH_SPLIT(dev)) + dpll_reg = _PCH_DPLL(pipe); + else + dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; + + return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE); +} + +static void i915_save_palette(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); + u32 *array; + int i; + + if (!i915_pipe_enabled(dev, pipe)) + return; + + if (HAS_PCH_SPLIT(dev)) + reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; + + if (pipe == PIPE_A) + array = dev_priv->regfile.save_palette_a; + else + array = dev_priv->regfile.save_palette_b; + + for (i = 0; i < 256; i++) + array[i] = I915_READ(reg + (i << 2)); +} + +static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); + u32 *array; + int i; + + if (!i915_pipe_enabled(dev, pipe)) + return; + + if (HAS_PCH_SPLIT(dev)) + reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; + + if (pipe == PIPE_A) + array = dev_priv->regfile.save_palette_a; + else + array = dev_priv->regfile.save_palette_b; + + for (i = 0; i < 256; i++) + I915_WRITE(reg + (i << 2), array[i]); +} + +void i915_save_display_reg(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + /* Cursor state */ + dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR); + dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS); + dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE); + dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR); + dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS); + dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE); + if (IS_GEN2(dev)) + dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE); + + if (HAS_PCH_SPLIT(dev)) { + dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); + dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); + } + + /* Pipe & plane A info */ + dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF); + dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC); + if (HAS_PCH_SPLIT(dev)) { + dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0); + dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1); + dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A); + } else { + dev_priv->regfile.saveFPA0 = I915_READ(_FPA0); + dev_priv->regfile.saveFPA1 = I915_READ(_FPA1); + dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A); + } + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) + dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD); + dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A); + dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A); + dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A); + dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A); + dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A); + dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A); + if (!HAS_PCH_SPLIT(dev)) + dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A); + + if (HAS_PCH_SPLIT(dev)) { + dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); + dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); + dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); + dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); + + dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); + dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); + + dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1); + dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); + dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); + + dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF); + dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); + dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); + dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); + dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); + dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); + dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); + } + + dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR); + dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE); + dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE); + dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS); + dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR); + if (INTEL_INFO(dev)->gen >= 4) { + dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF); + dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF); + } + i915_save_palette(dev, PIPE_A); + dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT); + + /* Pipe & plane B info */ + dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF); + dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC); + if (HAS_PCH_SPLIT(dev)) { + dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0); + dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1); + dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B); + } else { + dev_priv->regfile.saveFPB0 = I915_READ(_FPB0); + dev_priv->regfile.saveFPB1 = I915_READ(_FPB1); + dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B); + } + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) + dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD); + dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B); + dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B); + dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B); + dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B); + dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B); + dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B); + if (!HAS_PCH_SPLIT(dev)) + dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B); + + if (HAS_PCH_SPLIT(dev)) { + dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); + dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); + dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); + dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); + + dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); + dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); + + dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1); + dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); + dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); + + dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF); + dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); + dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); + dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); + dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); + dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); + dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); + } + + dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR); + dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); + dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE); + dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS); + dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR); + if (INTEL_INFO(dev)->gen >= 4) { + dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF); + dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); + } + i915_save_palette(dev, PIPE_B); + dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT); + + /* Fences */ + switch (INTEL_INFO(dev)->gen) { + case 7: + case 6: + for (i = 0; i < 16; i++) + dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); + break; + case 5: + case 4: + for (i = 0; i < 16; i++) + dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); + break; + case 3: + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); + case 2: + for (i = 0; i < 8; i++) + dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); + break; + } + + /* CRT state */ + if (HAS_PCH_SPLIT(dev)) + dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA); + else + dev_priv->regfile.saveADPA = I915_READ(ADPA); + + /* Display Port state */ + if (SUPPORTS_INTEGRATED_DP(dev)) { + dev_priv->regfile.saveDP_B = I915_READ(DP_B); + dev_priv->regfile.saveDP_C = I915_READ(DP_C); + dev_priv->regfile.saveDP_D = I915_READ(DP_D); + dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); + dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); + dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); + dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); + dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); + dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); + dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); + dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); + } + /* FIXME: regfile.save TV & SDVO state */ + + return; +} + +void i915_restore_display_reg(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int dpll_a_reg, fpa0_reg, fpa1_reg; + int dpll_b_reg, fpb0_reg, fpb1_reg; + int i; + + /* Display port ratios (must be done before clock is set) */ + if (SUPPORTS_INTEGRATED_DP(dev)) { + I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M); + I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M); + I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N); + I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N); + I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M); + I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M); + I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N); + I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N); + } + + /* Fences */ + switch (INTEL_INFO(dev)->gen) { + case 7: + case 6: + for (i = 0; i < 16; i++) + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); + break; + case 5: + case 4: + for (i = 0; i < 16; i++) + I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); + break; + case 3: + case 2: + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]); + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]); + break; + } + + + if (HAS_PCH_SPLIT(dev)) { + dpll_a_reg = _PCH_DPLL_A; + dpll_b_reg = _PCH_DPLL_B; + fpa0_reg = _PCH_FPA0; + fpb0_reg = _PCH_FPB0; + fpa1_reg = _PCH_FPA1; + fpb1_reg = _PCH_FPB1; + } else { + dpll_a_reg = _DPLL_A; + dpll_b_reg = _DPLL_B; + fpa0_reg = _FPA0; + fpb0_reg = _FPB0; + fpa1_reg = _FPA1; + fpb1_reg = _FPB1; + } + + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL); + I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL); + } + + /* Pipe & plane A info */ + /* Prime the clock */ + if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) { + I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A & + ~DPLL_VCO_ENABLE); + POSTING_READ(dpll_a_reg); + udelay(150); + } + I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0); + I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1); + /* Actually enable it */ + I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A); + POSTING_READ(dpll_a_reg); + udelay(150); + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { + I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD); + POSTING_READ(_DPLL_A_MD); + } + udelay(150); + + /* Restore mode */ + I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A); + I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A); + I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A); + I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A); + I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A); + I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A); + if (!HAS_PCH_SPLIT(dev)) + I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A); + + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1); + I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1); + I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1); + I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1); + + I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL); + I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL); + + I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1); + I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ); + I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS); + + I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF); + I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A); + I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A); + I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A); + I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A); + I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A); + I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A); + } + + /* Restore plane info */ + I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE); + I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS); + I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC); + I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR); + I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE); + if (INTEL_INFO(dev)->gen >= 4) { + I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF); + I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF); + } + + I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF); + + i915_restore_palette(dev, PIPE_A); + /* Enable the plane */ + I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR); + I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); + + /* Pipe & plane B info */ + if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) { + I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B & + ~DPLL_VCO_ENABLE); + POSTING_READ(dpll_b_reg); + udelay(150); + } + I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0); + I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1); + /* Actually enable it */ + I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B); + POSTING_READ(dpll_b_reg); + udelay(150); + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { + I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD); + POSTING_READ(_DPLL_B_MD); + } + udelay(150); + + /* Restore mode */ + I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B); + I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B); + I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B); + I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B); + I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B); + I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B); + if (!HAS_PCH_SPLIT(dev)) + I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B); + + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1); + I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1); + I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1); + I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1); + + I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL); + I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL); + + I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1); + I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ); + I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS); + + I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF); + I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B); + I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B); + I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B); + I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B); + I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B); + I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B); + } + + /* Restore plane info */ + I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE); + I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS); + I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC); + I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR); + I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE); + if (INTEL_INFO(dev)->gen >= 4) { + I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF); + I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF); + } + + I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF); + + i915_restore_palette(dev, PIPE_B); + /* Enable the plane */ + I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR); + I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); + + /* Cursor state */ + I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS); + I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR); + I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE); + I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS); + I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR); + I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE); + if (IS_GEN2(dev)) + I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE); + + /* CRT state */ + if (HAS_PCH_SPLIT(dev)) + I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA); + else + I915_WRITE(ADPA, dev_priv->regfile.saveADPA); + + /* Display Port state */ + if (SUPPORTS_INTEGRATED_DP(dev)) { + I915_WRITE(DP_B, dev_priv->regfile.saveDP_B); + I915_WRITE(DP_C, dev_priv->regfile.saveDP_C); + I915_WRITE(DP_D, dev_priv->regfile.saveDP_D); + } + /* FIXME: restore TV & SDVO state */ + + return; +} diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c new file mode 100644 index 0000000..bcbbaea --- /dev/null +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -0,0 +1,251 @@ +/* + * Intel ACPI functions + * + * _DSM related code stolen from nouveau_acpi.c. + */ +#include +#include +#include +#include + +#include +#include "i915_drv.h" + +#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ + +#define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */ +#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */ + +static struct intel_dsm_priv { + acpi_handle dhandle; +} intel_dsm_priv; + +static const u8 intel_dsm_guid[] = { + 0xd3, 0x73, 0xd8, 0x7e, + 0xd0, 0xc2, + 0x4f, 0x4e, + 0xa8, 0x54, + 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c +}; + +static int intel_dsm(acpi_handle handle, int func, int arg) +{ + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_object_list input; + union acpi_object params[4]; + union acpi_object *obj; + u32 result; + int ret = 0; + + input.count = 4; + input.pointer = params; + params[0].type = ACPI_TYPE_BUFFER; + params[0].buffer.length = sizeof(intel_dsm_guid); + params[0].buffer.pointer = (char *)intel_dsm_guid; + params[1].type = ACPI_TYPE_INTEGER; + params[1].integer.value = INTEL_DSM_REVISION_ID; + params[2].type = ACPI_TYPE_INTEGER; + params[2].integer.value = func; + params[3].type = ACPI_TYPE_INTEGER; + params[3].integer.value = arg; + + ret = acpi_evaluate_object(handle, "_DSM", &input, &output); + if (ret) { + DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); + return ret; + } + + obj = (union acpi_object *)output.pointer; + + result = 0; + switch (obj->type) { + case ACPI_TYPE_INTEGER: + result = obj->integer.value; + break; + + case ACPI_TYPE_BUFFER: + if (obj->buffer.length == 4) { + result = (obj->buffer.pointer[0] | + (obj->buffer.pointer[1] << 8) | + (obj->buffer.pointer[2] << 16) | + (obj->buffer.pointer[3] << 24)); + break; + } + default: + ret = -EINVAL; + break; + } + if (result == 0x80000002) + ret = -ENODEV; + + kfree(output.pointer); + return ret; +} + +static char *intel_dsm_port_name(u8 id) +{ + switch (id) { + case 0: + return "Reserved"; + case 1: + return "Analog VGA"; + case 2: + return "LVDS"; + case 3: + return "Reserved"; + case 4: + return "HDMI/DVI_B"; + case 5: + return "HDMI/DVI_C"; + case 6: + return "HDMI/DVI_D"; + case 7: + return "DisplayPort_A"; + case 8: + return "DisplayPort_B"; + case 9: + return "DisplayPort_C"; + case 0xa: + return "DisplayPort_D"; + case 0xb: + case 0xc: + case 0xd: + return "Reserved"; + case 0xe: + return "WiDi"; + default: + return "bad type"; + } +} + +static char *intel_dsm_mux_type(u8 type) +{ + switch (type) { + case 0: + return "unknown"; + case 1: + return "No MUX, iGPU only"; + case 2: + return "No MUX, dGPU only"; + case 3: + return "MUXed between iGPU and dGPU"; + default: + return "bad type"; + } +} + +static void intel_dsm_platform_mux_info(void) +{ + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_object_list input; + union acpi_object params[4]; + union acpi_object *pkg; + int i, ret; + + input.count = 4; + input.pointer = params; + params[0].type = ACPI_TYPE_BUFFER; + params[0].buffer.length = sizeof(intel_dsm_guid); + params[0].buffer.pointer = (char *)intel_dsm_guid; + params[1].type = ACPI_TYPE_INTEGER; + params[1].integer.value = INTEL_DSM_REVISION_ID; + params[2].type = ACPI_TYPE_INTEGER; + params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO; + params[3].type = ACPI_TYPE_INTEGER; + params[3].integer.value = 0; + + ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input, + &output); + if (ret) { + DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); + goto out; + } + + pkg = (union acpi_object *)output.pointer; + + if (pkg->type == ACPI_TYPE_PACKAGE) { + union acpi_object *connector_count = &pkg->package.elements[0]; + DRM_DEBUG_DRIVER("MUX info connectors: %lld\n", + (unsigned long long)connector_count->integer.value); + for (i = 1; i < pkg->package.count; i++) { + union acpi_object *obj = &pkg->package.elements[i]; + union acpi_object *connector_id = + &obj->package.elements[0]; + union acpi_object *info = &obj->package.elements[1]; + DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n", + (unsigned long long)connector_id->integer.value); + DRM_DEBUG_DRIVER(" port id: %s\n", + intel_dsm_port_name(info->buffer.pointer[0])); + DRM_DEBUG_DRIVER(" display mux info: %s\n", + intel_dsm_mux_type(info->buffer.pointer[1])); + DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n", + intel_dsm_mux_type(info->buffer.pointer[2])); + DRM_DEBUG_DRIVER(" hpd mux info: %s\n", + intel_dsm_mux_type(info->buffer.pointer[3])); + } + } + +out: + kfree(output.pointer); +} + +static bool intel_dsm_pci_probe(struct pci_dev *pdev) +{ + acpi_handle dhandle, intel_handle; + acpi_status status; + int ret; + + dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + if (!dhandle) + return false; + + status = acpi_get_handle(dhandle, "_DSM", &intel_handle); + if (ACPI_FAILURE(status)) { + DRM_DEBUG_KMS("no _DSM method for intel device\n"); + return false; + } + + ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0); + if (ret < 0) { + DRM_DEBUG_KMS("failed to get supported _DSM functions\n"); + return false; + } + + intel_dsm_priv.dhandle = dhandle; + + intel_dsm_platform_mux_info(); + return true; +} + +static bool intel_dsm_detect(void) +{ + char acpi_method_name[255] = { 0 }; + struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; + struct pci_dev *pdev = NULL; + bool has_dsm = false; + int vga_count = 0; + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + vga_count++; + has_dsm |= intel_dsm_pci_probe(pdev); + } + + if (vga_count == 2 && has_dsm) { + acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); + DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n", + acpi_method_name); + return true; + } + + return false; +} + +void intel_register_dsm_handler(void) +{ + if (!intel_dsm_detect()) + return; +} + +void intel_unregister_dsm_handler(void) +{ +} diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c new file mode 100644 index 0000000..95070b2 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -0,0 +1,771 @@ +/* + * Copyright © 2006 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ +#include +#include +#include +#include +#include "i915_drv.h" +#include "intel_bios.h" + +#define SLAVE_ADDR1 0x70 +#define SLAVE_ADDR2 0x72 + +static int panel_type; + +static void * +find_section(struct bdb_header *bdb, int section_id) +{ + u8 *base = (u8 *)bdb; + int index = 0; + u16 total, current_size; + u8 current_id; + + /* skip to first section */ + index += bdb->header_size; + total = bdb->bdb_size; + + /* walk the sections looking for section_id */ + while (index < total) { + current_id = *(base + index); + index++; + current_size = *((u16 *)(base + index)); + index += 2; + if (current_id == section_id) + return base + index; + index += current_size; + } + + return NULL; +} + +static u16 +get_blocksize(void *p) +{ + u16 *block_ptr, block_size; + + block_ptr = (u16 *)((char *)p - 2); + block_size = *block_ptr; + return block_size; +} + +static void +fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, + const struct lvds_dvo_timing *dvo_timing) +{ + panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | + dvo_timing->hactive_lo; + panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay + + ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo); + panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start + + dvo_timing->hsync_pulse_width; + panel_fixed_mode->htotal = panel_fixed_mode->hdisplay + + ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo); + + panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) | + dvo_timing->vactive_lo; + panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay + + dvo_timing->vsync_off; + panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start + + dvo_timing->vsync_pulse_width; + panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay + + ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo); + panel_fixed_mode->clock = dvo_timing->clock * 10; + panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; + + if (dvo_timing->hsync_positive) + panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC; + else + panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC; + + if (dvo_timing->vsync_positive) + panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC; + else + panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; + + /* Some VBTs have bogus h/vtotal values */ + if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) + panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; + if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal) + panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1; + + drm_mode_set_name(panel_fixed_mode); +} + +static bool +lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a, + const struct lvds_dvo_timing *b) +{ + if (a->hactive_hi != b->hactive_hi || + a->hactive_lo != b->hactive_lo) + return false; + + if (a->hsync_off_hi != b->hsync_off_hi || + a->hsync_off_lo != b->hsync_off_lo) + return false; + + if (a->hsync_pulse_width != b->hsync_pulse_width) + return false; + + if (a->hblank_hi != b->hblank_hi || + a->hblank_lo != b->hblank_lo) + return false; + + if (a->vactive_hi != b->vactive_hi || + a->vactive_lo != b->vactive_lo) + return false; + + if (a->vsync_off != b->vsync_off) + return false; + + if (a->vsync_pulse_width != b->vsync_pulse_width) + return false; + + if (a->vblank_hi != b->vblank_hi || + a->vblank_lo != b->vblank_lo) + return false; + + return true; +} + +static const struct lvds_dvo_timing * +get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data, + const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs, + int index) +{ + /* + * the size of fp_timing varies on the different platform. + * So calculate the DVO timing relative offset in LVDS data + * entry to get the DVO timing entry + */ + + int lfp_data_size = + lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset - + lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset; + int dvo_timing_offset = + lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset - + lvds_lfp_data_ptrs->ptr[0].fp_timing_offset; + char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index; + + return (struct lvds_dvo_timing *)(entry + dvo_timing_offset); +} + +/* get lvds_fp_timing entry + * this function may return NULL if the corresponding entry is invalid + */ +static const struct lvds_fp_timing * +get_lvds_fp_timing(const struct bdb_header *bdb, + const struct bdb_lvds_lfp_data *data, + const struct bdb_lvds_lfp_data_ptrs *ptrs, + int index) +{ + size_t data_ofs = (const u8 *)data - (const u8 *)bdb; + u16 data_size = ((const u16 *)data)[-1]; /* stored in header */ + size_t ofs; + + if (index >= ARRAY_SIZE(ptrs->ptr)) + return NULL; + ofs = ptrs->ptr[index].fp_timing_offset; + if (ofs < data_ofs || + ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size) + return NULL; + return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs); +} + +/* Try to find integrated panel data */ +static void +parse_lfp_panel_data(struct drm_i915_private *dev_priv, + struct bdb_header *bdb) +{ + const struct bdb_lvds_options *lvds_options; + const struct bdb_lvds_lfp_data *lvds_lfp_data; + const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; + const struct lvds_dvo_timing *panel_dvo_timing; + const struct lvds_fp_timing *fp_timing; + struct drm_display_mode *panel_fixed_mode; + int i, downclock; + + lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); + if (!lvds_options) + return; + + dev_priv->lvds_dither = lvds_options->pixel_dither; + if (lvds_options->panel_type == 0xff) + return; + + panel_type = lvds_options->panel_type; + + lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); + if (!lvds_lfp_data) + return; + + lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS); + if (!lvds_lfp_data_ptrs) + return; + + dev_priv->lvds_vbt = 1; + + panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, + lvds_lfp_data_ptrs, + lvds_options->panel_type); + + panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); + if (!panel_fixed_mode) + return; + + fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing); + + dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; + + DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n"); + drm_mode_debug_printmodeline(panel_fixed_mode); + + /* + * Iterate over the LVDS panel timing info to find the lowest clock + * for the native resolution. + */ + downclock = panel_dvo_timing->clock; + for (i = 0; i < 16; i++) { + const struct lvds_dvo_timing *dvo_timing; + + dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, + lvds_lfp_data_ptrs, + i); + if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) && + dvo_timing->clock < downclock) + downclock = dvo_timing->clock; + } + + if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) { + dev_priv->lvds_downclock_avail = 1; + dev_priv->lvds_downclock = downclock * 10; + DRM_DEBUG_KMS("LVDS downclock is found in VBT. " + "Normal Clock %dKHz, downclock %dKHz\n", + panel_fixed_mode->clock, 10*downclock); + } + + fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data, + lvds_lfp_data_ptrs, + lvds_options->panel_type); + if (fp_timing) { + /* check the resolution, just to be sure */ + if (fp_timing->x_res == panel_fixed_mode->hdisplay && + fp_timing->y_res == panel_fixed_mode->vdisplay) { + dev_priv->bios_lvds_val = fp_timing->lvds_reg_val; + DRM_DEBUG_KMS("VBT initial LVDS value %x\n", + dev_priv->bios_lvds_val); + } + } +} + +/* Try to find sdvo panel data */ +static void +parse_sdvo_panel_data(struct drm_i915_private *dev_priv, + struct bdb_header *bdb) +{ + struct lvds_dvo_timing *dvo_timing; + struct drm_display_mode *panel_fixed_mode; + int index; + + index = i915_vbt_sdvo_panel_type; + if (index == -2) { + DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n"); + return; + } + + if (index == -1) { + struct bdb_sdvo_lvds_options *sdvo_lvds_options; + + sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); + if (!sdvo_lvds_options) + return; + + index = sdvo_lvds_options->panel_type; + } + + dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS); + if (!dvo_timing) + return; + + panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); + if (!panel_fixed_mode) + return; + + fill_detail_timing_data(panel_fixed_mode, dvo_timing + index); + + dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode; + + DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n"); + drm_mode_debug_printmodeline(panel_fixed_mode); +} + +static int intel_bios_ssc_frequency(struct drm_device *dev, + bool alternate) +{ + switch (INTEL_INFO(dev)->gen) { + case 2: + return alternate ? 66 : 48; + case 3: + case 4: + return alternate ? 100 : 96; + default: + return alternate ? 100 : 120; + } +} + +static void +parse_general_features(struct drm_i915_private *dev_priv, + struct bdb_header *bdb) +{ + struct drm_device *dev = dev_priv->dev; + struct bdb_general_features *general; + + general = find_section(bdb, BDB_GENERAL_FEATURES); + if (general) { + dev_priv->int_tv_support = general->int_tv_support; + dev_priv->int_crt_support = general->int_crt_support; + dev_priv->lvds_use_ssc = general->enable_ssc; + dev_priv->lvds_ssc_freq = + intel_bios_ssc_frequency(dev, general->ssc_freq); + dev_priv->display_clock_mode = general->display_clock_mode; + dev_priv->fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; + DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", + dev_priv->int_tv_support, + dev_priv->int_crt_support, + dev_priv->lvds_use_ssc, + dev_priv->lvds_ssc_freq, + dev_priv->display_clock_mode, + dev_priv->fdi_rx_polarity_inverted); + } +} + +static void +parse_general_definitions(struct drm_i915_private *dev_priv, + struct bdb_header *bdb) +{ + struct bdb_general_definitions *general; + + general = find_section(bdb, BDB_GENERAL_DEFINITIONS); + if (general) { + u16 block_size = get_blocksize(general); + if (block_size >= sizeof(*general)) { + int bus_pin = general->crt_ddc_gmbus_pin; + DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); + if (intel_gmbus_is_port_valid(bus_pin)) + dev_priv->crt_ddc_pin = bus_pin; + } else { + DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", + block_size); + } + } +} + +static void +parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, + struct bdb_header *bdb) +{ + struct sdvo_device_mapping *p_mapping; + struct bdb_general_definitions *p_defs; + struct child_device_config *p_child; + int i, child_device_num, count; + u16 block_size; + + p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); + if (!p_defs) { + DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n"); + return; + } + /* judge whether the size of child device meets the requirements. + * If the child device size obtained from general definition block + * is different with sizeof(struct child_device_config), skip the + * parsing of sdvo device info + */ + if (p_defs->child_dev_size != sizeof(*p_child)) { + /* different child dev size . Ignore it */ + DRM_DEBUG_KMS("different child size is found. Invalid.\n"); + return; + } + /* get the block size of general definitions */ + block_size = get_blocksize(p_defs); + /* get the number of child device */ + child_device_num = (block_size - sizeof(*p_defs)) / + sizeof(*p_child); + count = 0; + for (i = 0; i < child_device_num; i++) { + p_child = &(p_defs->devices[i]); + if (!p_child->device_type) { + /* skip the device block if device type is invalid */ + continue; + } + if (p_child->slave_addr != SLAVE_ADDR1 && + p_child->slave_addr != SLAVE_ADDR2) { + /* + * If the slave address is neither 0x70 nor 0x72, + * it is not a SDVO device. Skip it. + */ + continue; + } + if (p_child->dvo_port != DEVICE_PORT_DVOB && + p_child->dvo_port != DEVICE_PORT_DVOC) { + /* skip the incorrect SDVO port */ + DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n"); + continue; + } + DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on" + " %s port\n", + p_child->slave_addr, + (p_child->dvo_port == DEVICE_PORT_DVOB) ? + "SDVOB" : "SDVOC"); + p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]); + if (!p_mapping->initialized) { + p_mapping->dvo_port = p_child->dvo_port; + p_mapping->slave_addr = p_child->slave_addr; + p_mapping->dvo_wiring = p_child->dvo_wiring; + p_mapping->ddc_pin = p_child->ddc_pin; + p_mapping->i2c_pin = p_child->i2c_pin; + p_mapping->initialized = 1; + DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", + p_mapping->dvo_port, + p_mapping->slave_addr, + p_mapping->dvo_wiring, + p_mapping->ddc_pin, + p_mapping->i2c_pin); + } else { + DRM_DEBUG_KMS("Maybe one SDVO port is shared by " + "two SDVO device.\n"); + } + if (p_child->slave2_addr) { + /* Maybe this is a SDVO device with multiple inputs */ + /* And the mapping info is not added */ + DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this" + " is a SDVO device with multiple inputs.\n"); + } + count++; + } + + if (!count) { + /* No SDVO device info is found */ + DRM_DEBUG_KMS("No SDVO device info is found in VBT\n"); + } + return; +} + +static void +parse_driver_features(struct drm_i915_private *dev_priv, + struct bdb_header *bdb) +{ + struct drm_device *dev = dev_priv->dev; + struct bdb_driver_features *driver; + + driver = find_section(bdb, BDB_DRIVER_FEATURES); + if (!driver) + return; + + if (SUPPORTS_EDP(dev) && + driver->lvds_config == BDB_DRIVER_FEATURE_EDP) + dev_priv->edp.support = 1; + + if (driver->dual_frequency) + dev_priv->render_reclock_avail = true; +} + +static void +parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) +{ + struct bdb_edp *edp; + struct edp_power_seq *edp_pps; + struct edp_link_params *edp_link_params; + + edp = find_section(bdb, BDB_EDP); + if (!edp) { + if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) + DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n"); + return; + } + + switch ((edp->color_depth >> (panel_type * 2)) & 3) { + case EDP_18BPP: + dev_priv->edp.bpp = 18; + break; + case EDP_24BPP: + dev_priv->edp.bpp = 24; + break; + case EDP_30BPP: + dev_priv->edp.bpp = 30; + break; + } + + /* Get the eDP sequencing and link info */ + edp_pps = &edp->power_seqs[panel_type]; + edp_link_params = &edp->link_params[panel_type]; + + dev_priv->edp.pps = *edp_pps; + + dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 : + DP_LINK_BW_1_62; + switch (edp_link_params->lanes) { + case 0: + dev_priv->edp.lanes = 1; + break; + case 1: + dev_priv->edp.lanes = 2; + break; + case 3: + default: + dev_priv->edp.lanes = 4; + break; + } + switch (edp_link_params->preemphasis) { + case 0: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0; + break; + case 1: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; + break; + case 2: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6; + break; + case 3: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; + break; + } + switch (edp_link_params->vswing) { + case 0: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400; + break; + case 1: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600; + break; + case 2: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800; + break; + case 3: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200; + break; + } +} + +static void +parse_device_mapping(struct drm_i915_private *dev_priv, + struct bdb_header *bdb) +{ + struct bdb_general_definitions *p_defs; + struct child_device_config *p_child, *child_dev_ptr; + int i, child_device_num, count; + u16 block_size; + + p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); + if (!p_defs) { + DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); + return; + } + /* judge whether the size of child device meets the requirements. + * If the child device size obtained from general definition block + * is different with sizeof(struct child_device_config), skip the + * parsing of sdvo device info + */ + if (p_defs->child_dev_size != sizeof(*p_child)) { + /* different child dev size . Ignore it */ + DRM_DEBUG_KMS("different child size is found. Invalid.\n"); + return; + } + /* get the block size of general definitions */ + block_size = get_blocksize(p_defs); + /* get the number of child device */ + child_device_num = (block_size - sizeof(*p_defs)) / + sizeof(*p_child); + count = 0; + /* get the number of child device that is present */ + for (i = 0; i < child_device_num; i++) { + p_child = &(p_defs->devices[i]); + if (!p_child->device_type) { + /* skip the device block if device type is invalid */ + continue; + } + count++; + } + if (!count) { + DRM_DEBUG_KMS("no child dev is parsed from VBT\n"); + return; + } + dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL); + if (!dev_priv->child_dev) { + DRM_DEBUG_KMS("No memory space for child device\n"); + return; + } + + dev_priv->child_dev_num = count; + count = 0; + for (i = 0; i < child_device_num; i++) { + p_child = &(p_defs->devices[i]); + if (!p_child->device_type) { + /* skip the device block if device type is invalid */ + continue; + } + child_dev_ptr = dev_priv->child_dev + count; + count++; + memcpy((void *)child_dev_ptr, (void *)p_child, + sizeof(*p_child)); + } + return; +} + +static void +init_vbt_defaults(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + + dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC; + + /* LFP panel data */ + dev_priv->lvds_dither = 1; + dev_priv->lvds_vbt = 0; + + /* SDVO panel data */ + dev_priv->sdvo_lvds_vbt_mode = NULL; + + /* general features */ + dev_priv->int_tv_support = 1; + dev_priv->int_crt_support = 1; + + /* Default to using SSC */ + dev_priv->lvds_use_ssc = 1; + dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); + DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); +} + +static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) +{ + DRM_DEBUG_KMS("Falling back to manually reading VBT from " + "VBIOS ROM for %s\n", + id->ident); + return 1; +} + +static const struct dmi_system_id intel_no_opregion_vbt[] = { + { + .callback = intel_no_opregion_vbt_callback, + .ident = "ThinkCentre A57", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"), + }, + }, + { } +}; + +/** + * intel_parse_bios - find VBT and initialize settings from the BIOS + * @dev: DRM device + * + * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers + * to appropriate values. + * + * Returns 0 on success, nonzero on failure. + */ +int +intel_parse_bios(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = dev->pdev; + struct bdb_header *bdb = NULL; + u8 __iomem *bios = NULL; + + if (HAS_PCH_NOP(dev)) + return -ENODEV; + + init_vbt_defaults(dev_priv); + + /* XXX Should this validation be moved to intel_opregion.c? */ + if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) { + struct vbt_header *vbt = dev_priv->opregion.vbt; + if (memcmp(vbt->signature, "$VBT", 4) == 0) { + DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n", + vbt->signature); + bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset); + } else + dev_priv->opregion.vbt = NULL; + } + + if (bdb == NULL) { + struct vbt_header *vbt = NULL; + size_t size; + int i; + + bios = pci_map_rom(pdev, &size); + if (!bios) + return -1; + + /* Scour memory looking for the VBT signature */ + for (i = 0; i + 4 < size; i++) { + if (!memcmp(bios + i, "$VBT", 4)) { + vbt = (struct vbt_header *)(bios + i); + break; + } + } + + if (!vbt) { + DRM_DEBUG_DRIVER("VBT signature missing\n"); + pci_unmap_rom(pdev, bios); + return -1; + } + + bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); + } + + /* Grab useful general definitions */ + parse_general_features(dev_priv, bdb); + parse_general_definitions(dev_priv, bdb); + parse_lfp_panel_data(dev_priv, bdb); + parse_sdvo_panel_data(dev_priv, bdb); + parse_sdvo_device_mapping(dev_priv, bdb); + parse_device_mapping(dev_priv, bdb); + parse_driver_features(dev_priv, bdb); + parse_edp(dev_priv, bdb); + + if (bios) + pci_unmap_rom(pdev, bios); + + return 0; +} + +/* Ensure that vital registers have been initialised, even if the BIOS + * is absent or just failing to do its job. + */ +void intel_setup_bios(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Set the Panel Power On/Off timings if uninitialized. */ + if (!HAS_PCH_SPLIT(dev) && + I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) { + /* Set T2 to 40ms and T5 to 200ms */ + I915_WRITE(PP_ON_DELAYS, 0x019007d0); + + /* Set T3 to 35ms and Tx to 200ms */ + I915_WRITE(PP_OFF_DELAYS, 0x015e07d0); + } +} diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h new file mode 100644 index 0000000..e088d6f --- /dev/null +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -0,0 +1,621 @@ +/* + * Copyright © 2006 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +#ifndef _I830_BIOS_H_ +#define _I830_BIOS_H_ + +#include + +struct vbt_header { + u8 signature[20]; /**< Always starts with 'VBT$' */ + u16 version; /**< decimal */ + u16 header_size; /**< in bytes */ + u16 vbt_size; /**< in bytes */ + u8 vbt_checksum; + u8 reserved0; + u32 bdb_offset; /**< from beginning of VBT */ + u32 aim_offset[4]; /**< from beginning of VBT */ +} __attribute__((packed)); + +struct bdb_header { + u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */ + u16 version; /**< decimal */ + u16 header_size; /**< in bytes */ + u16 bdb_size; /**< in bytes */ +}; + +/* strictly speaking, this is a "skip" block, but it has interesting info */ +struct vbios_data { + u8 type; /* 0 == desktop, 1 == mobile */ + u8 relstage; + u8 chipset; + u8 lvds_present:1; + u8 tv_present:1; + u8 rsvd2:6; /* finish byte */ + u8 rsvd3[4]; + u8 signon[155]; + u8 copyright[61]; + u16 code_segment; + u8 dos_boot_mode; + u8 bandwidth_percent; + u8 rsvd4; /* popup memory size */ + u8 resize_pci_bios; + u8 rsvd5; /* is crt already on ddc2 */ +} __attribute__((packed)); + +/* + * There are several types of BIOS data blocks (BDBs), each block has + * an ID and size in the first 3 bytes (ID in first, size in next 2). + * Known types are listed below. + */ +#define BDB_GENERAL_FEATURES 1 +#define BDB_GENERAL_DEFINITIONS 2 +#define BDB_OLD_TOGGLE_LIST 3 +#define BDB_MODE_SUPPORT_LIST 4 +#define BDB_GENERIC_MODE_TABLE 5 +#define BDB_EXT_MMIO_REGS 6 +#define BDB_SWF_IO 7 +#define BDB_SWF_MMIO 8 +#define BDB_DOT_CLOCK_TABLE 9 +#define BDB_MODE_REMOVAL_TABLE 10 +#define BDB_CHILD_DEVICE_TABLE 11 +#define BDB_DRIVER_FEATURES 12 +#define BDB_DRIVER_PERSISTENCE 13 +#define BDB_EXT_TABLE_PTRS 14 +#define BDB_DOT_CLOCK_OVERRIDE 15 +#define BDB_DISPLAY_SELECT 16 +/* 17 rsvd */ +#define BDB_DRIVER_ROTATION 18 +#define BDB_DISPLAY_REMOVE 19 +#define BDB_OEM_CUSTOM 20 +#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */ +#define BDB_SDVO_LVDS_OPTIONS 22 +#define BDB_SDVO_PANEL_DTDS 23 +#define BDB_SDVO_LVDS_PNP_IDS 24 +#define BDB_SDVO_LVDS_POWER_SEQ 25 +#define BDB_TV_OPTIONS 26 +#define BDB_EDP 27 +#define BDB_LVDS_OPTIONS 40 +#define BDB_LVDS_LFP_DATA_PTRS 41 +#define BDB_LVDS_LFP_DATA 42 +#define BDB_LVDS_BACKLIGHT 43 +#define BDB_LVDS_POWER 44 +#define BDB_SKIP 254 /* VBIOS private block, ignore */ + +struct bdb_general_features { + /* bits 1 */ + u8 panel_fitting:2; + u8 flexaim:1; + u8 msg_enable:1; + u8 clear_screen:3; + u8 color_flip:1; + + /* bits 2 */ + u8 download_ext_vbt:1; + u8 enable_ssc:1; + u8 ssc_freq:1; + u8 enable_lfp_on_override:1; + u8 disable_ssc_ddt:1; + u8 rsvd7:1; + u8 display_clock_mode:1; + u8 rsvd8:1; /* finish byte */ + + /* bits 3 */ + u8 disable_smooth_vision:1; + u8 single_dvi:1; + u8 rsvd9:1; + u8 fdi_rx_polarity_inverted:1; + u8 rsvd10:4; /* finish byte */ + + /* bits 4 */ + u8 legacy_monitor_detect; + + /* bits 5 */ + u8 int_crt_support:1; + u8 int_tv_support:1; + u8 int_efp_support:1; + u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */ + u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */ + u8 rsvd11:3; /* finish byte */ +} __attribute__((packed)); + +/* pre-915 */ +#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */ +#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */ +#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */ +#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */ + +/* Pre 915 */ +#define DEVICE_TYPE_NONE 0x00 +#define DEVICE_TYPE_CRT 0x01 +#define DEVICE_TYPE_TV 0x09 +#define DEVICE_TYPE_EFP 0x12 +#define DEVICE_TYPE_LFP 0x22 +/* On 915+ */ +#define DEVICE_TYPE_CRT_DPMS 0x6001 +#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001 +#define DEVICE_TYPE_TV_COMPOSITE 0x0209 +#define DEVICE_TYPE_TV_MACROVISION 0x0289 +#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c +#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609 +#define DEVICE_TYPE_TV_SCART 0x0209 +#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009 +#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012 +#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052 +#define DEVICE_TYPE_EFP_DVI_I 0x6053 +#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152 +#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2 +#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062 +#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162 +#define DEVICE_TYPE_LFP_PANELLINK 0x5012 +#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042 +#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062 +#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162 +#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2 + +#define DEVICE_CFG_NONE 0x00 +#define DEVICE_CFG_12BIT_DVOB 0x01 +#define DEVICE_CFG_12BIT_DVOC 0x02 +#define DEVICE_CFG_24BIT_DVOBC 0x09 +#define DEVICE_CFG_24BIT_DVOCB 0x0a +#define DEVICE_CFG_DUAL_DVOB 0x11 +#define DEVICE_CFG_DUAL_DVOC 0x12 +#define DEVICE_CFG_DUAL_DVOBC 0x13 +#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19 +#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a + +#define DEVICE_WIRE_NONE 0x00 +#define DEVICE_WIRE_DVOB 0x01 +#define DEVICE_WIRE_DVOC 0x02 +#define DEVICE_WIRE_DVOBC 0x03 +#define DEVICE_WIRE_DVOBB 0x05 +#define DEVICE_WIRE_DVOCC 0x06 +#define DEVICE_WIRE_DVOB_MASTER 0x0d +#define DEVICE_WIRE_DVOC_MASTER 0x0e + +#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */ +#define DEVICE_PORT_DVOB 0x01 +#define DEVICE_PORT_DVOC 0x02 + +struct child_device_config { + u16 handle; + u16 device_type; + u8 device_id[10]; /* ascii string */ + u16 addin_offset; + u8 dvo_port; /* See Device_PORT_* above */ + u8 i2c_pin; + u8 slave_addr; + u8 ddc_pin; + u16 edid_ptr; + u8 dvo_cfg; /* See DEVICE_CFG_* above */ + u8 dvo2_port; + u8 i2c2_pin; + u8 slave2_addr; + u8 ddc2_pin; + u8 capabilities; + u8 dvo_wiring;/* See DEVICE_WIRE_* above */ + u8 dvo2_wiring; + u16 extended_type; + u8 dvo_function; +} __attribute__((packed)); + +struct bdb_general_definitions { + /* DDC GPIO */ + u8 crt_ddc_gmbus_pin; + + /* DPMS bits */ + u8 dpms_acpi:1; + u8 skip_boot_crt_detect:1; + u8 dpms_aim:1; + u8 rsvd1:5; /* finish byte */ + + /* boot device bits */ + u8 boot_display[2]; + u8 child_dev_size; + + /* + * Device info: + * If TV is present, it'll be at devices[0]. + * LVDS will be next, either devices[0] or [1], if present. + * On some platforms the number of device is 6. But could be as few as + * 4 if both TV and LVDS are missing. + * And the device num is related with the size of general definition + * block. It is obtained by using the following formula: + * number = (block_size - sizeof(bdb_general_definitions))/ + * sizeof(child_device_config); + */ + struct child_device_config devices[0]; +} __attribute__((packed)); + +struct bdb_lvds_options { + u8 panel_type; + u8 rsvd1; + /* LVDS capabilities, stored in a dword */ + u8 pfit_mode:2; + u8 pfit_text_mode_enhanced:1; + u8 pfit_gfx_mode_enhanced:1; + u8 pfit_ratio_auto:1; + u8 pixel_dither:1; + u8 lvds_edid:1; + u8 rsvd2:1; + u8 rsvd4; +} __attribute__((packed)); + +/* LFP pointer table contains entries to the struct below */ +struct bdb_lvds_lfp_data_ptr { + u16 fp_timing_offset; /* offsets are from start of bdb */ + u8 fp_table_size; + u16 dvo_timing_offset; + u8 dvo_table_size; + u16 panel_pnp_id_offset; + u8 pnp_table_size; +} __attribute__((packed)); + +struct bdb_lvds_lfp_data_ptrs { + u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */ + struct bdb_lvds_lfp_data_ptr ptr[16]; +} __attribute__((packed)); + +/* LFP data has 3 blocks per entry */ +struct lvds_fp_timing { + u16 x_res; + u16 y_res; + u32 lvds_reg; + u32 lvds_reg_val; + u32 pp_on_reg; + u32 pp_on_reg_val; + u32 pp_off_reg; + u32 pp_off_reg_val; + u32 pp_cycle_reg; + u32 pp_cycle_reg_val; + u32 pfit_reg; + u32 pfit_reg_val; + u16 terminator; +} __attribute__((packed)); + +struct lvds_dvo_timing { + u16 clock; /**< In 10khz */ + u8 hactive_lo; + u8 hblank_lo; + u8 hblank_hi:4; + u8 hactive_hi:4; + u8 vactive_lo; + u8 vblank_lo; + u8 vblank_hi:4; + u8 vactive_hi:4; + u8 hsync_off_lo; + u8 hsync_pulse_width; + u8 vsync_pulse_width:4; + u8 vsync_off:4; + u8 rsvd0:6; + u8 hsync_off_hi:2; + u8 h_image; + u8 v_image; + u8 max_hv; + u8 h_border; + u8 v_border; + u8 rsvd1:3; + u8 digital:2; + u8 vsync_positive:1; + u8 hsync_positive:1; + u8 rsvd2:1; +} __attribute__((packed)); + +struct lvds_pnp_id { + u16 mfg_name; + u16 product_code; + u32 serial; + u8 mfg_week; + u8 mfg_year; +} __attribute__((packed)); + +struct bdb_lvds_lfp_data_entry { + struct lvds_fp_timing fp_timing; + struct lvds_dvo_timing dvo_timing; + struct lvds_pnp_id pnp_id; +} __attribute__((packed)); + +struct bdb_lvds_lfp_data { + struct bdb_lvds_lfp_data_entry data[16]; +} __attribute__((packed)); + +struct aimdb_header { + char signature[16]; + char oem_device[20]; + u16 aimdb_version; + u16 aimdb_header_size; + u16 aimdb_size; +} __attribute__((packed)); + +struct aimdb_block { + u8 aimdb_id; + u16 aimdb_size; +} __attribute__((packed)); + +struct vch_panel_data { + u16 fp_timing_offset; + u8 fp_timing_size; + u16 dvo_timing_offset; + u8 dvo_timing_size; + u16 text_fitting_offset; + u8 text_fitting_size; + u16 graphics_fitting_offset; + u8 graphics_fitting_size; +} __attribute__((packed)); + +struct vch_bdb_22 { + struct aimdb_block aimdb_block; + struct vch_panel_data panels[16]; +} __attribute__((packed)); + +struct bdb_sdvo_lvds_options { + u8 panel_backlight; + u8 h40_set_panel_type; + u8 panel_type; + u8 ssc_clk_freq; + u16 als_low_trip; + u16 als_high_trip; + u8 sclalarcoeff_tab_row_num; + u8 sclalarcoeff_tab_row_size; + u8 coefficient[8]; + u8 panel_misc_bits_1; + u8 panel_misc_bits_2; + u8 panel_misc_bits_3; + u8 panel_misc_bits_4; +} __attribute__((packed)); + + +#define BDB_DRIVER_FEATURE_NO_LVDS 0 +#define BDB_DRIVER_FEATURE_INT_LVDS 1 +#define BDB_DRIVER_FEATURE_SDVO_LVDS 2 +#define BDB_DRIVER_FEATURE_EDP 3 + +struct bdb_driver_features { + u8 boot_dev_algorithm:1; + u8 block_display_switch:1; + u8 allow_display_switch:1; + u8 hotplug_dvo:1; + u8 dual_view_zoom:1; + u8 int15h_hook:1; + u8 sprite_in_clone:1; + u8 primary_lfp_id:1; + + u16 boot_mode_x; + u16 boot_mode_y; + u8 boot_mode_bpp; + u8 boot_mode_refresh; + + u16 enable_lfp_primary:1; + u16 selective_mode_pruning:1; + u16 dual_frequency:1; + u16 render_clock_freq:1; /* 0: high freq; 1: low freq */ + u16 nt_clone_support:1; + u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */ + u16 sprite_display_assign:1; /* 0: secondary; 1: primary */ + u16 cui_aspect_scaling:1; + u16 preserve_aspect_ratio:1; + u16 sdvo_device_power_down:1; + u16 crt_hotplug:1; + u16 lvds_config:2; + u16 tv_hotplug:1; + u16 hdmi_config:2; + + u8 static_display:1; + u8 reserved2:7; + u16 legacy_crt_max_x; + u16 legacy_crt_max_y; + u8 legacy_crt_max_refresh; + + u8 hdmi_termination; + u8 custom_vbt_version; +} __attribute__((packed)); + +#define EDP_18BPP 0 +#define EDP_24BPP 1 +#define EDP_30BPP 2 +#define EDP_RATE_1_62 0 +#define EDP_RATE_2_7 1 +#define EDP_LANE_1 0 +#define EDP_LANE_2 1 +#define EDP_LANE_4 3 +#define EDP_PREEMPHASIS_NONE 0 +#define EDP_PREEMPHASIS_3_5dB 1 +#define EDP_PREEMPHASIS_6dB 2 +#define EDP_PREEMPHASIS_9_5dB 3 +#define EDP_VSWING_0_4V 0 +#define EDP_VSWING_0_6V 1 +#define EDP_VSWING_0_8V 2 +#define EDP_VSWING_1_2V 3 + +struct edp_power_seq { + u16 t1_t3; + u16 t8; + u16 t9; + u16 t10; + u16 t11_t12; +} __attribute__ ((packed)); + +struct edp_link_params { + u8 rate:4; + u8 lanes:4; + u8 preemphasis:4; + u8 vswing:4; +} __attribute__ ((packed)); + +struct bdb_edp { + struct edp_power_seq power_seqs[16]; + u32 color_depth; + struct edp_link_params link_params[16]; + u32 sdrrs_msa_timing_delay; + + /* ith bit indicates enabled/disabled for (i+1)th panel */ + u16 edp_s3d_feature; + u16 edp_t3_optimization; +} __attribute__ ((packed)); + +void intel_setup_bios(struct drm_device *dev); +int intel_parse_bios(struct drm_device *dev); + +/* + * Driver<->VBIOS interaction occurs through scratch bits in + * GR18 & SWF*. + */ + +/* GR18 bits are set on display switch and hotkey events */ +#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */ +#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */ +#define GR18_HK_NONE (0x0<<3) +#define GR18_HK_LFP_STRETCH (0x1<<3) +#define GR18_HK_TOGGLE_DISP (0x2<<3) +#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */ +#define GR18_HK_POPUP_DISABLED (0x6<<3) +#define GR18_HK_POPUP_ENABLED (0x7<<3) +#define GR18_HK_PFIT (0x8<<3) +#define GR18_HK_APM_CHANGE (0xa<<3) +#define GR18_HK_MULTIPLE (0xc<<3) +#define GR18_USER_INT_EN (1<<2) +#define GR18_A0000_FLUSH_EN (1<<1) +#define GR18_SMM_EN (1<<0) + +/* Set by driver, cleared by VBIOS */ +#define SWF00_YRES_SHIFT 16 +#define SWF00_XRES_SHIFT 0 +#define SWF00_RES_MASK 0xffff + +/* Set by VBIOS at boot time and driver at runtime */ +#define SWF01_TV2_FORMAT_SHIFT 8 +#define SWF01_TV1_FORMAT_SHIFT 0 +#define SWF01_TV_FORMAT_MASK 0xffff + +#define SWF10_VBIOS_BLC_I2C_EN (1<<29) +#define SWF10_GTT_OVERRIDE_EN (1<<28) +#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */ +#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24) +#define SWF10_OLD_TOGGLE 0x0 +#define SWF10_TOGGLE_LIST_1 0x1 +#define SWF10_TOGGLE_LIST_2 0x2 +#define SWF10_TOGGLE_LIST_3 0x3 +#define SWF10_TOGGLE_LIST_4 0x4 +#define SWF10_PANNING_EN (1<<23) +#define SWF10_DRIVER_LOADED (1<<22) +#define SWF10_EXTENDED_DESKTOP (1<<21) +#define SWF10_EXCLUSIVE_MODE (1<<20) +#define SWF10_OVERLAY_EN (1<<19) +#define SWF10_PLANEB_HOLDOFF (1<<18) +#define SWF10_PLANEA_HOLDOFF (1<<17) +#define SWF10_VGA_HOLDOFF (1<<16) +#define SWF10_ACTIVE_DISP_MASK 0xffff +#define SWF10_PIPEB_LFP2 (1<<15) +#define SWF10_PIPEB_EFP2 (1<<14) +#define SWF10_PIPEB_TV2 (1<<13) +#define SWF10_PIPEB_CRT2 (1<<12) +#define SWF10_PIPEB_LFP (1<<11) +#define SWF10_PIPEB_EFP (1<<10) +#define SWF10_PIPEB_TV (1<<9) +#define SWF10_PIPEB_CRT (1<<8) +#define SWF10_PIPEA_LFP2 (1<<7) +#define SWF10_PIPEA_EFP2 (1<<6) +#define SWF10_PIPEA_TV2 (1<<5) +#define SWF10_PIPEA_CRT2 (1<<4) +#define SWF10_PIPEA_LFP (1<<3) +#define SWF10_PIPEA_EFP (1<<2) +#define SWF10_PIPEA_TV (1<<1) +#define SWF10_PIPEA_CRT (1<<0) + +#define SWF11_MEMORY_SIZE_SHIFT 16 +#define SWF11_SV_TEST_EN (1<<15) +#define SWF11_IS_AGP (1<<14) +#define SWF11_DISPLAY_HOLDOFF (1<<13) +#define SWF11_DPMS_REDUCED (1<<12) +#define SWF11_IS_VBE_MODE (1<<11) +#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */ +#define SWF11_DPMS_MASK 0x07 +#define SWF11_DPMS_OFF (1<<2) +#define SWF11_DPMS_SUSPEND (1<<1) +#define SWF11_DPMS_STANDBY (1<<0) +#define SWF11_DPMS_ON 0 + +#define SWF14_GFX_PFIT_EN (1<<31) +#define SWF14_TEXT_PFIT_EN (1<<30) +#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */ +#define SWF14_POPUP_EN (1<<28) +#define SWF14_DISPLAY_HOLDOFF (1<<27) +#define SWF14_DISP_DETECT_EN (1<<26) +#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */ +#define SWF14_DRIVER_STATUS (1<<24) +#define SWF14_OS_TYPE_WIN9X (1<<23) +#define SWF14_OS_TYPE_WINNT (1<<22) +/* 21:19 rsvd */ +#define SWF14_PM_TYPE_MASK 0x00070000 +#define SWF14_PM_ACPI_VIDEO (0x4 << 16) +#define SWF14_PM_ACPI (0x3 << 16) +#define SWF14_PM_APM_12 (0x2 << 16) +#define SWF14_PM_APM_11 (0x1 << 16) +#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */ + /* if GR18 indicates a display switch */ +#define SWF14_DS_PIPEB_LFP2_EN (1<<15) +#define SWF14_DS_PIPEB_EFP2_EN (1<<14) +#define SWF14_DS_PIPEB_TV2_EN (1<<13) +#define SWF14_DS_PIPEB_CRT2_EN (1<<12) +#define SWF14_DS_PIPEB_LFP_EN (1<<11) +#define SWF14_DS_PIPEB_EFP_EN (1<<10) +#define SWF14_DS_PIPEB_TV_EN (1<<9) +#define SWF14_DS_PIPEB_CRT_EN (1<<8) +#define SWF14_DS_PIPEA_LFP2_EN (1<<7) +#define SWF14_DS_PIPEA_EFP2_EN (1<<6) +#define SWF14_DS_PIPEA_TV2_EN (1<<5) +#define SWF14_DS_PIPEA_CRT2_EN (1<<4) +#define SWF14_DS_PIPEA_LFP_EN (1<<3) +#define SWF14_DS_PIPEA_EFP_EN (1<<2) +#define SWF14_DS_PIPEA_TV_EN (1<<1) +#define SWF14_DS_PIPEA_CRT_EN (1<<0) + /* if GR18 indicates a panel fitting request */ +#define SWF14_PFIT_EN (1<<0) /* 0 means disable */ + /* if GR18 indicates an APM change request */ +#define SWF14_APM_HIBERNATE 0x4 +#define SWF14_APM_SUSPEND 0x3 +#define SWF14_APM_STANDBY 0x1 +#define SWF14_APM_RESTORE 0x0 + +/* Add the device class for LFP, TV, HDMI */ +#define DEVICE_TYPE_INT_LFP 0x1022 +#define DEVICE_TYPE_INT_TV 0x1009 +#define DEVICE_TYPE_HDMI 0x60D2 +#define DEVICE_TYPE_DP 0x68C6 +#define DEVICE_TYPE_eDP 0x78C6 + +/* define the DVO port for HDMI output type */ +#define DVO_B 1 +#define DVO_C 2 +#define DVO_D 3 + +/* define the PORT for DP output type */ +#define PORT_IDPB 7 +#define PORT_IDPC 8 +#define PORT_IDPD 9 + +#endif /* _I830_BIOS_H_ */ diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c new file mode 100644 index 0000000..58b4a53 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -0,0 +1,809 @@ +/* + * Copyright © 2006-2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + */ + +#include +#include +#include +#include +#include +#include +#include +#include "intel_drv.h" +#include +#include "i915_drv.h" + +/* Here's the desired hotplug mode */ +#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \ + ADPA_CRT_HOTPLUG_WARMUP_10MS | \ + ADPA_CRT_HOTPLUG_SAMPLE_4S | \ + ADPA_CRT_HOTPLUG_VOLTAGE_50 | \ + ADPA_CRT_HOTPLUG_VOLREF_325MV | \ + ADPA_CRT_HOTPLUG_ENABLE) + +struct intel_crt { + struct intel_encoder base; + /* DPMS state is stored in the connector, which we need in the + * encoder's enable/disable callbacks */ + struct intel_connector *connector; + bool force_hotplug_required; + u32 adpa_reg; +}; + +static struct intel_crt *intel_attached_crt(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_crt, base); +} + +static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) +{ + return container_of(encoder, struct intel_crt, base); +} + +static bool intel_crt_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crt *crt = intel_encoder_to_crt(encoder); + u32 tmp; + + tmp = I915_READ(crt->adpa_reg); + + if (!(tmp & ADPA_DAC_ENABLE)) + return false; + + if (HAS_PCH_CPT(dev)) + *pipe = PORT_TO_PIPE_CPT(tmp); + else + *pipe = PORT_TO_PIPE(tmp); + + return true; +} + +/* Note: The caller is required to filter out dpms modes not supported by the + * platform. */ +static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crt *crt = intel_encoder_to_crt(encoder); + u32 temp; + + temp = I915_READ(crt->adpa_reg); + temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); + temp &= ~ADPA_DAC_ENABLE; + + switch (mode) { + case DRM_MODE_DPMS_ON: + temp |= ADPA_DAC_ENABLE; + break; + case DRM_MODE_DPMS_STANDBY: + temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE; + break; + case DRM_MODE_DPMS_SUSPEND: + temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE; + break; + case DRM_MODE_DPMS_OFF: + temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE; + break; + } + + I915_WRITE(crt->adpa_reg, temp); +} + +static void intel_disable_crt(struct intel_encoder *encoder) +{ + intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF); +} + +static void intel_enable_crt(struct intel_encoder *encoder) +{ + struct intel_crt *crt = intel_encoder_to_crt(encoder); + + intel_crt_set_dpms(encoder, crt->connector->base.dpms); +} + + +static void intel_crt_dpms(struct drm_connector *connector, int mode) +{ + struct drm_device *dev = connector->dev; + struct intel_encoder *encoder = intel_attached_encoder(connector); + struct drm_crtc *crtc; + int old_dpms; + + /* PCH platforms and VLV only support on/off. */ + if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON) + mode = DRM_MODE_DPMS_OFF; + + if (mode == connector->dpms) + return; + + old_dpms = connector->dpms; + connector->dpms = mode; + + /* Only need to change hw state when actually enabled */ + crtc = encoder->base.crtc; + if (!crtc) { + encoder->connectors_active = false; + return; + } + + /* We need the pipe to run for anything but OFF. */ + if (mode == DRM_MODE_DPMS_OFF) + encoder->connectors_active = false; + else + encoder->connectors_active = true; + + if (mode < old_dpms) { + /* From off to on, enable the pipe first. */ + intel_crtc_update_dpms(crtc); + + intel_crt_set_dpms(encoder, mode); + } else { + intel_crt_set_dpms(encoder, mode); + + intel_crtc_update_dpms(crtc); + } + + intel_modeset_check_state(connector->dev); +} + +static int intel_crt_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct drm_device *dev = connector->dev; + + int max_clock = 0; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + + if (mode->clock < 25000) + return MODE_CLOCK_LOW; + + if (IS_GEN2(dev)) + max_clock = 350000; + else + max_clock = 400000; + if (mode->clock > max_clock) + return MODE_CLOCK_HIGH; + + /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */ + if (HAS_PCH_LPT(dev) && + (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2)) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static bool intel_crt_compute_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + struct drm_device *dev = encoder->base.dev; + + if (HAS_PCH_SPLIT(dev)) + pipe_config->has_pch_encoder = true; + + return true; +} + +static void intel_crt_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + + struct drm_device *dev = encoder->dev; + struct drm_crtc *crtc = encoder->crtc; + struct intel_crt *crt = + intel_encoder_to_crt(to_intel_encoder(encoder)); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_i915_private *dev_priv = dev->dev_private; + u32 adpa; + + if (HAS_PCH_SPLIT(dev)) + adpa = ADPA_HOTPLUG_BITS; + else + adpa = 0; + + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + adpa |= ADPA_HSYNC_ACTIVE_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + adpa |= ADPA_VSYNC_ACTIVE_HIGH; + + /* For CPT allow 3 pipe config, for others just use A or B */ + if (HAS_PCH_LPT(dev)) + ; /* Those bits don't exist here */ + else if (HAS_PCH_CPT(dev)) + adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); + else if (intel_crtc->pipe == 0) + adpa |= ADPA_PIPE_A_SELECT; + else + adpa |= ADPA_PIPE_B_SELECT; + + if (!HAS_PCH_SPLIT(dev)) + I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); + + I915_WRITE(crt->adpa_reg, adpa); +} + +static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct intel_crt *crt = intel_attached_crt(connector); + struct drm_i915_private *dev_priv = dev->dev_private; + u32 adpa; + bool ret; + + /* The first time through, trigger an explicit detection cycle */ + if (crt->force_hotplug_required) { + bool turn_off_dac = HAS_PCH_SPLIT(dev); + u32 save_adpa; + + crt->force_hotplug_required = 0; + + save_adpa = adpa = I915_READ(crt->adpa_reg); + DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); + + adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; + if (turn_off_dac) + adpa &= ~ADPA_DAC_ENABLE; + + I915_WRITE(crt->adpa_reg, adpa); + + if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, + 1000)) + DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); + + if (turn_off_dac) { + I915_WRITE(crt->adpa_reg, save_adpa); + POSTING_READ(crt->adpa_reg); + } + } + + /* Check the status to see if both blue and green are on now */ + adpa = I915_READ(crt->adpa_reg); + if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) + ret = true; + else + ret = false; + DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret); + + return ret; +} + +static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct intel_crt *crt = intel_attached_crt(connector); + struct drm_i915_private *dev_priv = dev->dev_private; + u32 adpa; + bool ret; + u32 save_adpa; + + save_adpa = adpa = I915_READ(crt->adpa_reg); + DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); + + adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; + + I915_WRITE(crt->adpa_reg, adpa); + + if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, + 1000)) { + DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); + I915_WRITE(crt->adpa_reg, save_adpa); + } + + /* Check the status to see if both blue and green are on now */ + adpa = I915_READ(crt->adpa_reg); + if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) + ret = true; + else + ret = false; + + DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); + + /* FIXME: debug force function and remove */ + ret = true; + + return ret; +} + +/** + * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence. + * + * Not for i915G/i915GM + * + * \return true if CRT is connected. + * \return false if CRT is disconnected. + */ +static bool intel_crt_detect_hotplug(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 hotplug_en, orig, stat; + bool ret = false; + int i, tries = 0; + + if (HAS_PCH_SPLIT(dev)) + return intel_ironlake_crt_detect_hotplug(connector); + + if (IS_VALLEYVIEW(dev)) + return valleyview_crt_detect_hotplug(connector); + + /* + * On 4 series desktop, CRT detect sequence need to be done twice + * to get a reliable result. + */ + + if (IS_G4X(dev) && !IS_GM45(dev)) + tries = 2; + else + tries = 1; + hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN); + hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; + + for (i = 0; i < tries ; i++) { + /* turn on the FORCE_DETECT */ + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); + /* wait for FORCE_DETECT to go off */ + if (wait_for((I915_READ(PORT_HOTPLUG_EN) & + CRT_HOTPLUG_FORCE_DETECT) == 0, + 1000)) + DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); + } + + stat = I915_READ(PORT_HOTPLUG_STAT); + if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE) + ret = true; + + /* clear the interrupt we just generated, if any */ + I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS); + + /* and put the bits back */ + I915_WRITE(PORT_HOTPLUG_EN, orig); + + return ret; +} + +static struct edid *intel_crt_get_edid(struct drm_connector *connector, + struct i2c_adapter *i2c) +{ + struct edid *edid; + + edid = drm_get_edid(connector, i2c); + + if (!edid && !intel_gmbus_is_forced_bit(i2c)) { + DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n"); + intel_gmbus_force_bit(i2c, true); + edid = drm_get_edid(connector, i2c); + intel_gmbus_force_bit(i2c, false); + } + + return edid; +} + +/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */ +static int intel_crt_ddc_get_modes(struct drm_connector *connector, + struct i2c_adapter *adapter) +{ + struct edid *edid; + int ret; + + edid = intel_crt_get_edid(connector, adapter); + if (!edid) + return 0; + + ret = intel_connector_update_modes(connector, edid); + kfree(edid); + + return ret; +} + +static bool intel_crt_detect_ddc(struct drm_connector *connector) +{ + struct intel_crt *crt = intel_attached_crt(connector); + struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; + struct edid *edid; + struct i2c_adapter *i2c; + + BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); + + i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); + edid = intel_crt_get_edid(connector, i2c); + + if (edid) { + bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; + + /* + * This may be a DVI-I connector with a shared DDC + * link between analog and digital outputs, so we + * have to check the EDID input spec of the attached device. + */ + if (!is_digital) { + DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); + return true; + } + + DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); + } else { + DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); + } + + kfree(edid); + + return false; +} + +static enum drm_connector_status +intel_crt_load_detect(struct intel_crt *crt) +{ + struct drm_device *dev = crt->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe; + uint32_t save_bclrpat; + uint32_t save_vtotal; + uint32_t vtotal, vactive; + uint32_t vsample; + uint32_t vblank, vblank_start, vblank_end; + uint32_t dsl; + uint32_t bclrpat_reg; + uint32_t vtotal_reg; + uint32_t vblank_reg; + uint32_t vsync_reg; + uint32_t pipeconf_reg; + uint32_t pipe_dsl_reg; + uint8_t st00; + enum drm_connector_status status; + + DRM_DEBUG_KMS("starting load-detect on CRT\n"); + + bclrpat_reg = BCLRPAT(pipe); + vtotal_reg = VTOTAL(pipe); + vblank_reg = VBLANK(pipe); + vsync_reg = VSYNC(pipe); + pipeconf_reg = PIPECONF(pipe); + pipe_dsl_reg = PIPEDSL(pipe); + + save_bclrpat = I915_READ(bclrpat_reg); + save_vtotal = I915_READ(vtotal_reg); + vblank = I915_READ(vblank_reg); + + vtotal = ((save_vtotal >> 16) & 0xfff) + 1; + vactive = (save_vtotal & 0x7ff) + 1; + + vblank_start = (vblank & 0xfff) + 1; + vblank_end = ((vblank >> 16) & 0xfff) + 1; + + /* Set the border color to purple. */ + I915_WRITE(bclrpat_reg, 0x500050); + + if (!IS_GEN2(dev)) { + uint32_t pipeconf = I915_READ(pipeconf_reg); + I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); + POSTING_READ(pipeconf_reg); + /* Wait for next Vblank to substitue + * border color for Color info */ + intel_wait_for_vblank(dev, pipe); + st00 = I915_READ8(VGA_MSR_WRITE); + status = ((st00 & (1 << 4)) != 0) ? + connector_status_connected : + connector_status_disconnected; + + I915_WRITE(pipeconf_reg, pipeconf); + } else { + bool restore_vblank = false; + int count, detect; + + /* + * If there isn't any border, add some. + * Yes, this will flicker + */ + if (vblank_start <= vactive && vblank_end >= vtotal) { + uint32_t vsync = I915_READ(vsync_reg); + uint32_t vsync_start = (vsync & 0xffff) + 1; + + vblank_start = vsync_start; + I915_WRITE(vblank_reg, + (vblank_start - 1) | + ((vblank_end - 1) << 16)); + restore_vblank = true; + } + /* sample in the vertical border, selecting the larger one */ + if (vblank_start - vactive >= vtotal - vblank_end) + vsample = (vblank_start + vactive) >> 1; + else + vsample = (vtotal + vblank_end) >> 1; + + /* + * Wait for the border to be displayed + */ + while (I915_READ(pipe_dsl_reg) >= vactive) + ; + while ((dsl = I915_READ(pipe_dsl_reg)) <= vsample) + ; + /* + * Watch ST00 for an entire scanline + */ + detect = 0; + count = 0; + do { + count++; + /* Read the ST00 VGA status register */ + st00 = I915_READ8(VGA_MSR_WRITE); + if (st00 & (1 << 4)) + detect++; + } while ((I915_READ(pipe_dsl_reg) == dsl)); + + /* restore vblank if necessary */ + if (restore_vblank) + I915_WRITE(vblank_reg, vblank); + /* + * If more than 3/4 of the scanline detected a monitor, + * then it is assumed to be present. This works even on i830, + * where there isn't any way to force the border color across + * the screen + */ + status = detect * 4 > count * 3 ? + connector_status_connected : + connector_status_disconnected; + } + + /* Restore previous settings */ + I915_WRITE(bclrpat_reg, save_bclrpat); + + return status; +} + +static enum drm_connector_status +intel_crt_detect(struct drm_connector *connector, bool force) +{ + struct drm_device *dev = connector->dev; + struct intel_crt *crt = intel_attached_crt(connector); + enum drm_connector_status status; + struct intel_load_detect_pipe tmp; + + if (I915_HAS_HOTPLUG(dev)) { + /* We can not rely on the HPD pin always being correctly wired + * up, for example many KVM do not pass it through, and so + * only trust an assertion that the monitor is connected. + */ + if (intel_crt_detect_hotplug(connector)) { + DRM_DEBUG_KMS("CRT detected via hotplug\n"); + return connector_status_connected; + } else + DRM_DEBUG_KMS("CRT not detected via hotplug\n"); + } + + if (intel_crt_detect_ddc(connector)) + return connector_status_connected; + + /* Load detection is broken on HPD capable machines. Whoever wants a + * broken monitor (without edid) to work behind a broken kvm (that fails + * to have the right resistors for HP detection) needs to fix this up. + * For now just bail out. */ + if (I915_HAS_HOTPLUG(dev)) + return connector_status_disconnected; + + if (!force) + return connector->status; + + /* for pre-945g platforms use load detect */ + if (intel_get_load_detect_pipe(connector, NULL, &tmp)) { + if (intel_crt_detect_ddc(connector)) + status = connector_status_connected; + else + status = intel_crt_load_detect(crt); + intel_release_load_detect_pipe(connector, &tmp); + } else + status = connector_status_unknown; + + return status; +} + +static void intel_crt_destroy(struct drm_connector *connector) +{ + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + +static int intel_crt_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + struct i2c_adapter *i2c; + + i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); + ret = intel_crt_ddc_get_modes(connector, i2c); + if (ret || !IS_G4X(dev)) + return ret; + + /* Try to probe digital port for output in DVI-I -> VGA mode. */ + i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); + return intel_crt_ddc_get_modes(connector, i2c); +} + +static int intel_crt_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t value) +{ + return 0; +} + +static void intel_crt_reset(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crt *crt = intel_attached_crt(connector); + + if (HAS_PCH_SPLIT(dev)) { + u32 adpa; + + adpa = I915_READ(crt->adpa_reg); + adpa &= ~ADPA_CRT_HOTPLUG_MASK; + adpa |= ADPA_HOTPLUG_BITS; + I915_WRITE(crt->adpa_reg, adpa); + POSTING_READ(crt->adpa_reg); + + DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); + crt->force_hotplug_required = 1; + } + +} + +/* + * Routines for controlling stuff on the analog port + */ + +static const struct drm_encoder_helper_funcs crt_encoder_funcs = { + .mode_set = intel_crt_mode_set, +}; + +static const struct drm_connector_funcs intel_crt_connector_funcs = { + .reset = intel_crt_reset, + .dpms = intel_crt_dpms, + .detect = intel_crt_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = intel_crt_destroy, + .set_property = intel_crt_set_property, +}; + +static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { + .mode_valid = intel_crt_mode_valid, + .get_modes = intel_crt_get_modes, + .best_encoder = intel_best_encoder, +}; + +static const struct drm_encoder_funcs intel_crt_enc_funcs = { + .destroy = intel_encoder_destroy, +}; + +static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id) +{ + DRM_INFO("Skipping CRT initialization for %s\n", id->ident); + return 1; +} + +static const struct dmi_system_id intel_no_crt[] = { + { + .callback = intel_no_crt_dmi_callback, + .ident = "ACER ZGB", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ACER"), + DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"), + }, + }, + { } +}; + +void intel_crt_init(struct drm_device *dev) +{ + struct drm_connector *connector; + struct intel_crt *crt; + struct intel_connector *intel_connector; + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Skip machines without VGA that falsely report hotplug events */ + if (dmi_check_system(intel_no_crt)) + return; + + crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL); + if (!crt) + return; + + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(crt); + return; + } + + connector = &intel_connector->base; + crt->connector = intel_connector; + drm_connector_init(dev, &intel_connector->base, + &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); + + drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, + DRM_MODE_ENCODER_DAC); + + intel_connector_attach_encoder(intel_connector, &crt->base); + + crt->base.type = INTEL_OUTPUT_ANALOG; + crt->base.cloneable = true; + if (IS_I830(dev)) + crt->base.crtc_mask = (1 << 0); + else + crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + + if (IS_GEN2(dev)) + connector->interlace_allowed = 0; + else + connector->interlace_allowed = 1; + connector->doublescan_allowed = 0; + + if (HAS_PCH_SPLIT(dev)) + crt->adpa_reg = PCH_ADPA; + else if (IS_VALLEYVIEW(dev)) + crt->adpa_reg = VLV_ADPA; + else + crt->adpa_reg = ADPA; + + crt->base.compute_config = intel_crt_compute_config; + crt->base.disable = intel_disable_crt; + crt->base.enable = intel_enable_crt; + if (I915_HAS_HOTPLUG(dev)) + crt->base.hpd_pin = HPD_CRT; + if (HAS_DDI(dev)) + crt->base.get_hw_state = intel_ddi_get_hw_state; + else + crt->base.get_hw_state = intel_crt_get_hw_state; + intel_connector->get_hw_state = intel_connector_get_hw_state; + + drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs); + drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); + + drm_sysfs_connector_add(connector); + + if (!I915_HAS_HOTPLUG(dev)) + intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; + + /* + * Configure the automatic hotplug detection stuff + */ + crt->force_hotplug_required = 0; + + /* + * TODO: find a proper way to discover whether we need to set the the + * polarity and link reversal bits or not, instead of relying on the + * BIOS. + */ + if (HAS_PCH_LPT(dev)) { + u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT | + FDI_RX_LINK_REVERSAL_OVERRIDE; + + dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config; + } +} diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c new file mode 100644 index 0000000..16e674a --- /dev/null +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -0,0 +1,1562 @@ +/* + * Copyright © 2012 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eugeni Dodonov + * + */ + +#include "i915_drv.h" +#include "intel_drv.h" + +/* HDMI/DVI modes ignore everything but the last 2 items. So we share + * them for both DP and FDI transports, allowing those ports to + * automatically adapt to HDMI connections as well + */ +static const u32 hsw_ddi_translations_dp[] = { + 0x00FFFFFF, 0x0006000E, /* DP parameters */ + 0x00D75FFF, 0x0005000A, + 0x00C30FFF, 0x00040006, + 0x80AAAFFF, 0x000B0000, + 0x00FFFFFF, 0x0005000A, + 0x00D75FFF, 0x000C0004, + 0x80C30FFF, 0x000B0000, + 0x00FFFFFF, 0x00040006, + 0x80D75FFF, 0x000B0000, + 0x00FFFFFF, 0x00040006 /* HDMI parameters */ +}; + +static const u32 hsw_ddi_translations_fdi[] = { + 0x00FFFFFF, 0x0007000E, /* FDI parameters */ + 0x00D75FFF, 0x000F000A, + 0x00C30FFF, 0x00060006, + 0x00AAAFFF, 0x001E0000, + 0x00FFFFFF, 0x000F000A, + 0x00D75FFF, 0x00160004, + 0x00C30FFF, 0x001E0000, + 0x00FFFFFF, 0x00060006, + 0x00D75FFF, 0x001E0000, + 0x00FFFFFF, 0x00040006 /* HDMI parameters */ +}; + +static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) +{ + struct drm_encoder *encoder = &intel_encoder->base; + int type = intel_encoder->type; + + if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || + type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) { + struct intel_digital_port *intel_dig_port = + enc_to_dig_port(encoder); + return intel_dig_port->port; + + } else if (type == INTEL_OUTPUT_ANALOG) { + return PORT_E; + + } else { + DRM_ERROR("Invalid DDI encoder type %d\n", type); + BUG(); + } +} + +/* On Haswell, DDI port buffers must be programmed with correct values + * in advance. The buffer values are different for FDI and DP modes, + * but the HDMI/DVI fields are shared among those. So we program the DDI + * in either FDI or DP modes only, as HDMI connections will work with both + * of those + */ +static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, + bool use_fdi_mode) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg; + int i; + const u32 *ddi_translations = ((use_fdi_mode) ? + hsw_ddi_translations_fdi : + hsw_ddi_translations_dp); + + DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n", + port_name(port), + use_fdi_mode ? "FDI" : "DP"); + + WARN((use_fdi_mode && (port != PORT_E)), + "Programming port %c in FDI mode, this probably will not work.\n", + port_name(port)); + + for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { + I915_WRITE(reg, ddi_translations[i]); + reg += 4; + } +} + +/* Program DDI buffers translations for DP. By default, program ports A-D in DP + * mode and port E for FDI. + */ +void intel_prepare_ddi(struct drm_device *dev) +{ + int port; + + if (!HAS_DDI(dev)) + return; + + for (port = PORT_A; port < PORT_E; port++) + intel_prepare_ddi_buffers(dev, port, false); + + /* DDI E is the suggested one to work in FDI mode, so program is as such + * by default. It will have to be re-programmed in case a digital DP + * output will be detected on it + */ + intel_prepare_ddi_buffers(dev, PORT_E, true); +} + +static const long hsw_ddi_buf_ctl_values[] = { + DDI_BUF_EMP_400MV_0DB_HSW, + DDI_BUF_EMP_400MV_3_5DB_HSW, + DDI_BUF_EMP_400MV_6DB_HSW, + DDI_BUF_EMP_400MV_9_5DB_HSW, + DDI_BUF_EMP_600MV_0DB_HSW, + DDI_BUF_EMP_600MV_3_5DB_HSW, + DDI_BUF_EMP_600MV_6DB_HSW, + DDI_BUF_EMP_800MV_0DB_HSW, + DDI_BUF_EMP_800MV_3_5DB_HSW +}; + +static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, + enum port port) +{ + uint32_t reg = DDI_BUF_CTL(port); + int i; + + for (i = 0; i < 8; i++) { + udelay(1); + if (I915_READ(reg) & DDI_BUF_IS_IDLE) + return; + } + DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port)); +} + +/* Starting with Haswell, different DDI ports can work in FDI mode for + * connection to the PCH-located connectors. For this, it is necessary to train + * both the DDI port and PCH receiver for the desired DDI buffer settings. + * + * The recommended port to work in FDI mode is DDI E, which we use here. Also, + * please note that when FDI mode is active on DDI E, it shares 2 lines with + * DDI A (which is used for eDP) + */ + +void hsw_fdi_link_train(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + u32 temp, i, rx_ctl_val; + + /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the + * mode set "sequence for CRT port" document: + * - TP1 to TP2 time with the default value + * - FDI delay to 90h + */ + I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) | + FDI_RX_PWRDN_LANE0_VAL(2) | + FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); + + /* Enable the PCH Receiver FDI PLL */ + rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | + FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19); + I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); + POSTING_READ(_FDI_RXA_CTL); + udelay(220); + + /* Switch from Rawclk to PCDclk */ + rx_ctl_val |= FDI_PCDCLK; + I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); + + /* Configure Port Clock Select */ + I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel); + + /* Start the training iterating through available voltages and emphasis, + * testing each value twice. */ + for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) { + /* Configure DP_TP_CTL with auto-training */ + I915_WRITE(DP_TP_CTL(PORT_E), + DP_TP_CTL_FDI_AUTOTRAIN | + DP_TP_CTL_ENHANCED_FRAME_ENABLE | + DP_TP_CTL_LINK_TRAIN_PAT1 | + DP_TP_CTL_ENABLE); + + /* Configure and enable DDI_BUF_CTL for DDI E with next voltage. + * DDI E does not support port reversal, the functionality is + * achieved on the PCH side in FDI_RX_CTL, so no need to set the + * port reversal bit */ + I915_WRITE(DDI_BUF_CTL(PORT_E), + DDI_BUF_CTL_ENABLE | + ((intel_crtc->fdi_lanes - 1) << 1) | + hsw_ddi_buf_ctl_values[i / 2]); + POSTING_READ(DDI_BUF_CTL(PORT_E)); + + udelay(600); + + /* Program PCH FDI Receiver TU */ + I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64)); + + /* Enable PCH FDI Receiver with auto-training */ + rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO; + I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); + POSTING_READ(_FDI_RXA_CTL); + + /* Wait for FDI receiver lane calibration */ + udelay(30); + + /* Unset FDI_RX_MISC pwrdn lanes */ + temp = I915_READ(_FDI_RXA_MISC); + temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); + I915_WRITE(_FDI_RXA_MISC, temp); + POSTING_READ(_FDI_RXA_MISC); + + /* Wait for FDI auto training time */ + udelay(5); + + temp = I915_READ(DP_TP_STATUS(PORT_E)); + if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { + DRM_DEBUG_KMS("FDI link training done on step %d\n", i); + + /* Enable normal pixel sending for FDI */ + I915_WRITE(DP_TP_CTL(PORT_E), + DP_TP_CTL_FDI_AUTOTRAIN | + DP_TP_CTL_LINK_TRAIN_NORMAL | + DP_TP_CTL_ENHANCED_FRAME_ENABLE | + DP_TP_CTL_ENABLE); + + return; + } + + temp = I915_READ(DDI_BUF_CTL(PORT_E)); + temp &= ~DDI_BUF_CTL_ENABLE; + I915_WRITE(DDI_BUF_CTL(PORT_E), temp); + POSTING_READ(DDI_BUF_CTL(PORT_E)); + + /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ + temp = I915_READ(DP_TP_CTL(PORT_E)); + temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); + temp |= DP_TP_CTL_LINK_TRAIN_PAT1; + I915_WRITE(DP_TP_CTL(PORT_E), temp); + POSTING_READ(DP_TP_CTL(PORT_E)); + + intel_wait_ddi_buf_idle(dev_priv, PORT_E); + + rx_ctl_val &= ~FDI_RX_ENABLE; + I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); + POSTING_READ(_FDI_RXA_CTL); + + /* Reset FDI_RX_MISC pwrdn lanes */ + temp = I915_READ(_FDI_RXA_MISC); + temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); + temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); + I915_WRITE(_FDI_RXA_MISC, temp); + POSTING_READ(_FDI_RXA_MISC); + } + + DRM_ERROR("FDI link training failed!\n"); +} + +/* WRPLL clock dividers */ +struct wrpll_tmds_clock { + u32 clock; + u16 p; /* Post divider */ + u16 n2; /* Feedback divider */ + u16 r2; /* Reference divider */ +}; + +/* Table of matching values for WRPLL clocks programming for each frequency. + * The code assumes this table is sorted. */ +static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { + {19750, 38, 25, 18}, + {20000, 48, 32, 18}, + {21000, 36, 21, 15}, + {21912, 42, 29, 17}, + {22000, 36, 22, 15}, + {23000, 36, 23, 15}, + {23500, 40, 40, 23}, + {23750, 26, 16, 14}, + {24000, 36, 24, 15}, + {25000, 36, 25, 15}, + {25175, 26, 40, 33}, + {25200, 30, 21, 15}, + {26000, 36, 26, 15}, + {27000, 30, 21, 14}, + {27027, 18, 100, 111}, + {27500, 30, 29, 19}, + {28000, 34, 30, 17}, + {28320, 26, 30, 22}, + {28322, 32, 42, 25}, + {28750, 24, 23, 18}, + {29000, 30, 29, 18}, + {29750, 32, 30, 17}, + {30000, 30, 25, 15}, + {30750, 30, 41, 24}, + {31000, 30, 31, 18}, + {31500, 30, 28, 16}, + {32000, 30, 32, 18}, + {32500, 28, 32, 19}, + {33000, 24, 22, 15}, + {34000, 28, 30, 17}, + {35000, 26, 32, 19}, + {35500, 24, 30, 19}, + {36000, 26, 26, 15}, + {36750, 26, 46, 26}, + {37000, 24, 23, 14}, + {37762, 22, 40, 26}, + {37800, 20, 21, 15}, + {38000, 24, 27, 16}, + {38250, 24, 34, 20}, + {39000, 24, 26, 15}, + {40000, 24, 32, 18}, + {40500, 20, 21, 14}, + {40541, 22, 147, 89}, + {40750, 18, 19, 14}, + {41000, 16, 17, 14}, + {41500, 22, 44, 26}, + {41540, 22, 44, 26}, + {42000, 18, 21, 15}, + {42500, 22, 45, 26}, + {43000, 20, 43, 27}, + {43163, 20, 24, 15}, + {44000, 18, 22, 15}, + {44900, 20, 108, 65}, + {45000, 20, 25, 15}, + {45250, 20, 52, 31}, + {46000, 18, 23, 15}, + {46750, 20, 45, 26}, + {47000, 20, 40, 23}, + {48000, 18, 24, 15}, + {49000, 18, 49, 30}, + {49500, 16, 22, 15}, + {50000, 18, 25, 15}, + {50500, 18, 32, 19}, + {51000, 18, 34, 20}, + {52000, 18, 26, 15}, + {52406, 14, 34, 25}, + {53000, 16, 22, 14}, + {54000, 16, 24, 15}, + {54054, 16, 173, 108}, + {54500, 14, 24, 17}, + {55000, 12, 22, 18}, + {56000, 14, 45, 31}, + {56250, 16, 25, 15}, + {56750, 14, 25, 17}, + {57000, 16, 27, 16}, + {58000, 16, 43, 25}, + {58250, 16, 38, 22}, + {58750, 16, 40, 23}, + {59000, 14, 26, 17}, + {59341, 14, 40, 26}, + {59400, 16, 44, 25}, + {60000, 16, 32, 18}, + {60500, 12, 39, 29}, + {61000, 14, 49, 31}, + {62000, 14, 37, 23}, + {62250, 14, 42, 26}, + {63000, 12, 21, 15}, + {63500, 14, 28, 17}, + {64000, 12, 27, 19}, + {65000, 14, 32, 19}, + {65250, 12, 29, 20}, + {65500, 12, 32, 22}, + {66000, 12, 22, 15}, + {66667, 14, 38, 22}, + {66750, 10, 21, 17}, + {67000, 14, 33, 19}, + {67750, 14, 58, 33}, + {68000, 14, 30, 17}, + {68179, 14, 46, 26}, + {68250, 14, 46, 26}, + {69000, 12, 23, 15}, + {70000, 12, 28, 18}, + {71000, 12, 30, 19}, + {72000, 12, 24, 15}, + {73000, 10, 23, 17}, + {74000, 12, 23, 14}, + {74176, 8, 100, 91}, + {74250, 10, 22, 16}, + {74481, 12, 43, 26}, + {74500, 10, 29, 21}, + {75000, 12, 25, 15}, + {75250, 10, 39, 28}, + {76000, 12, 27, 16}, + {77000, 12, 53, 31}, + {78000, 12, 26, 15}, + {78750, 12, 28, 16}, + {79000, 10, 38, 26}, + {79500, 10, 28, 19}, + {80000, 12, 32, 18}, + {81000, 10, 21, 14}, + {81081, 6, 100, 111}, + {81624, 8, 29, 24}, + {82000, 8, 17, 14}, + {83000, 10, 40, 26}, + {83950, 10, 28, 18}, + {84000, 10, 28, 18}, + {84750, 6, 16, 17}, + {85000, 6, 17, 18}, + {85250, 10, 30, 19}, + {85750, 10, 27, 17}, + {86000, 10, 43, 27}, + {87000, 10, 29, 18}, + {88000, 10, 44, 27}, + {88500, 10, 41, 25}, + {89000, 10, 28, 17}, + {89012, 6, 90, 91}, + {89100, 10, 33, 20}, + {90000, 10, 25, 15}, + {91000, 10, 32, 19}, + {92000, 10, 46, 27}, + {93000, 10, 31, 18}, + {94000, 10, 40, 23}, + {94500, 10, 28, 16}, + {95000, 10, 44, 25}, + {95654, 10, 39, 22}, + {95750, 10, 39, 22}, + {96000, 10, 32, 18}, + {97000, 8, 23, 16}, + {97750, 8, 42, 29}, + {98000, 8, 45, 31}, + {99000, 8, 22, 15}, + {99750, 8, 34, 23}, + {100000, 6, 20, 18}, + {100500, 6, 19, 17}, + {101000, 6, 37, 33}, + {101250, 8, 21, 14}, + {102000, 6, 17, 15}, + {102250, 6, 25, 22}, + {103000, 8, 29, 19}, + {104000, 8, 37, 24}, + {105000, 8, 28, 18}, + {106000, 8, 22, 14}, + {107000, 8, 46, 29}, + {107214, 8, 27, 17}, + {108000, 8, 24, 15}, + {108108, 8, 173, 108}, + {109000, 6, 23, 19}, + {110000, 6, 22, 18}, + {110013, 6, 22, 18}, + {110250, 8, 49, 30}, + {110500, 8, 36, 22}, + {111000, 8, 23, 14}, + {111264, 8, 150, 91}, + {111375, 8, 33, 20}, + {112000, 8, 63, 38}, + {112500, 8, 25, 15}, + {113100, 8, 57, 34}, + {113309, 8, 42, 25}, + {114000, 8, 27, 16}, + {115000, 6, 23, 18}, + {116000, 8, 43, 25}, + {117000, 8, 26, 15}, + {117500, 8, 40, 23}, + {118000, 6, 38, 29}, + {119000, 8, 30, 17}, + {119500, 8, 46, 26}, + {119651, 8, 39, 22}, + {120000, 8, 32, 18}, + {121000, 6, 39, 29}, + {121250, 6, 31, 23}, + {121750, 6, 23, 17}, + {122000, 6, 42, 31}, + {122614, 6, 30, 22}, + {123000, 6, 41, 30}, + {123379, 6, 37, 27}, + {124000, 6, 51, 37}, + {125000, 6, 25, 18}, + {125250, 4, 13, 14}, + {125750, 4, 27, 29}, + {126000, 6, 21, 15}, + {127000, 6, 24, 17}, + {127250, 6, 41, 29}, + {128000, 6, 27, 19}, + {129000, 6, 43, 30}, + {129859, 4, 25, 26}, + {130000, 6, 26, 18}, + {130250, 6, 42, 29}, + {131000, 6, 32, 22}, + {131500, 6, 38, 26}, + {131850, 6, 41, 28}, + {132000, 6, 22, 15}, + {132750, 6, 28, 19}, + {133000, 6, 34, 23}, + {133330, 6, 37, 25}, + {134000, 6, 61, 41}, + {135000, 6, 21, 14}, + {135250, 6, 167, 111}, + {136000, 6, 62, 41}, + {137000, 6, 35, 23}, + {138000, 6, 23, 15}, + {138500, 6, 40, 26}, + {138750, 6, 37, 24}, + {139000, 6, 34, 22}, + {139050, 6, 34, 22}, + {139054, 6, 34, 22}, + {140000, 6, 28, 18}, + {141000, 6, 36, 23}, + {141500, 6, 22, 14}, + {142000, 6, 30, 19}, + {143000, 6, 27, 17}, + {143472, 4, 17, 16}, + {144000, 6, 24, 15}, + {145000, 6, 29, 18}, + {146000, 6, 47, 29}, + {146250, 6, 26, 16}, + {147000, 6, 49, 30}, + {147891, 6, 23, 14}, + {148000, 6, 23, 14}, + {148250, 6, 28, 17}, + {148352, 4, 100, 91}, + {148500, 6, 33, 20}, + {149000, 6, 48, 29}, + {150000, 6, 25, 15}, + {151000, 4, 19, 17}, + {152000, 6, 27, 16}, + {152280, 6, 44, 26}, + {153000, 6, 34, 20}, + {154000, 6, 53, 31}, + {155000, 6, 31, 18}, + {155250, 6, 50, 29}, + {155750, 6, 45, 26}, + {156000, 6, 26, 15}, + {157000, 6, 61, 35}, + {157500, 6, 28, 16}, + {158000, 6, 65, 37}, + {158250, 6, 44, 25}, + {159000, 6, 53, 30}, + {159500, 6, 39, 22}, + {160000, 6, 32, 18}, + {161000, 4, 31, 26}, + {162000, 4, 18, 15}, + {162162, 4, 131, 109}, + {162500, 4, 53, 44}, + {163000, 4, 29, 24}, + {164000, 4, 17, 14}, + {165000, 4, 22, 18}, + {166000, 4, 32, 26}, + {167000, 4, 26, 21}, + {168000, 4, 46, 37}, + {169000, 4, 104, 83}, + {169128, 4, 64, 51}, + {169500, 4, 39, 31}, + {170000, 4, 34, 27}, + {171000, 4, 19, 15}, + {172000, 4, 51, 40}, + {172750, 4, 32, 25}, + {172800, 4, 32, 25}, + {173000, 4, 41, 32}, + {174000, 4, 49, 38}, + {174787, 4, 22, 17}, + {175000, 4, 35, 27}, + {176000, 4, 30, 23}, + {177000, 4, 38, 29}, + {178000, 4, 29, 22}, + {178500, 4, 37, 28}, + {179000, 4, 53, 40}, + {179500, 4, 73, 55}, + {180000, 4, 20, 15}, + {181000, 4, 55, 41}, + {182000, 4, 31, 23}, + {183000, 4, 42, 31}, + {184000, 4, 30, 22}, + {184750, 4, 26, 19}, + {185000, 4, 37, 27}, + {186000, 4, 51, 37}, + {187000, 4, 36, 26}, + {188000, 4, 32, 23}, + {189000, 4, 21, 15}, + {190000, 4, 38, 27}, + {190960, 4, 41, 29}, + {191000, 4, 41, 29}, + {192000, 4, 27, 19}, + {192250, 4, 37, 26}, + {193000, 4, 20, 14}, + {193250, 4, 53, 37}, + {194000, 4, 23, 16}, + {194208, 4, 23, 16}, + {195000, 4, 26, 18}, + {196000, 4, 45, 31}, + {197000, 4, 35, 24}, + {197750, 4, 41, 28}, + {198000, 4, 22, 15}, + {198500, 4, 25, 17}, + {199000, 4, 28, 19}, + {200000, 4, 37, 25}, + {201000, 4, 61, 41}, + {202000, 4, 112, 75}, + {202500, 4, 21, 14}, + {203000, 4, 146, 97}, + {204000, 4, 62, 41}, + {204750, 4, 44, 29}, + {205000, 4, 38, 25}, + {206000, 4, 29, 19}, + {207000, 4, 23, 15}, + {207500, 4, 40, 26}, + {208000, 4, 37, 24}, + {208900, 4, 48, 31}, + {209000, 4, 48, 31}, + {209250, 4, 31, 20}, + {210000, 4, 28, 18}, + {211000, 4, 25, 16}, + {212000, 4, 22, 14}, + {213000, 4, 30, 19}, + {213750, 4, 38, 24}, + {214000, 4, 46, 29}, + {214750, 4, 35, 22}, + {215000, 4, 43, 27}, + {216000, 4, 24, 15}, + {217000, 4, 37, 23}, + {218000, 4, 42, 26}, + {218250, 4, 42, 26}, + {218750, 4, 34, 21}, + {219000, 4, 47, 29}, + {220000, 4, 44, 27}, + {220640, 4, 49, 30}, + {220750, 4, 36, 22}, + {221000, 4, 36, 22}, + {222000, 4, 23, 14}, + {222525, 4, 28, 17}, + {222750, 4, 33, 20}, + {227000, 4, 37, 22}, + {230250, 4, 29, 17}, + {233500, 4, 38, 22}, + {235000, 4, 40, 23}, + {238000, 4, 30, 17}, + {241500, 2, 17, 19}, + {245250, 2, 20, 22}, + {247750, 2, 22, 24}, + {253250, 2, 15, 16}, + {256250, 2, 18, 19}, + {262500, 2, 31, 32}, + {267250, 2, 66, 67}, + {268500, 2, 94, 95}, + {270000, 2, 14, 14}, + {272500, 2, 77, 76}, + {273750, 2, 57, 56}, + {280750, 2, 24, 23}, + {281250, 2, 23, 22}, + {286000, 2, 17, 16}, + {291750, 2, 26, 24}, + {296703, 2, 56, 51}, + {297000, 2, 22, 20}, + {298000, 2, 21, 19}, +}; + +static void intel_ddi_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_crtc *crtc = encoder->crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + int port = intel_ddi_get_encoder_port(intel_encoder); + int pipe = intel_crtc->pipe; + int type = intel_encoder->type; + + DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n", + port_name(port), pipe_name(pipe)); + + intel_crtc->eld_vld = false; + if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_digital_port *intel_dig_port = + enc_to_dig_port(encoder); + + intel_dp->DP = intel_dig_port->saved_port_bits | + DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; + switch (intel_dp->lane_count) { + case 1: + intel_dp->DP |= DDI_PORT_WIDTH_X1; + break; + case 2: + intel_dp->DP |= DDI_PORT_WIDTH_X2; + break; + case 4: + intel_dp->DP |= DDI_PORT_WIDTH_X4; + break; + default: + intel_dp->DP |= DDI_PORT_WIDTH_X4; + WARN(1, "Unexpected DP lane count %d\n", + intel_dp->lane_count); + break; + } + + if (intel_dp->has_audio) { + DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n", + pipe_name(intel_crtc->pipe)); + + /* write eld */ + DRM_DEBUG_DRIVER("DP audio: write eld information\n"); + intel_write_eld(encoder, adjusted_mode); + } + + intel_dp_init_link_config(intel_dp); + + } else if (type == INTEL_OUTPUT_HDMI) { + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + + if (intel_hdmi->has_audio) { + /* Proper support for digital audio needs a new logic + * and a new set of registers, so we leave it for future + * patch bombing. + */ + DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n", + pipe_name(intel_crtc->pipe)); + + /* write eld */ + DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); + intel_write_eld(encoder, adjusted_mode); + } + + intel_hdmi->set_infoframes(encoder, adjusted_mode); + } +} + +static struct intel_encoder * +intel_ddi_get_crtc_encoder(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *intel_encoder, *ret = NULL; + int num_encoders = 0; + + for_each_encoder_on_crtc(dev, crtc, intel_encoder) { + ret = intel_encoder; + num_encoders++; + } + + if (num_encoders != 1) + WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders, + intel_crtc->pipe); + + BUG_ON(ret == NULL); + return ret; +} + +void intel_ddi_put_crtc_pll(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct intel_ddi_plls *plls = &dev_priv->ddi_plls; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + uint32_t val; + + switch (intel_crtc->ddi_pll_sel) { + case PORT_CLK_SEL_SPLL: + plls->spll_refcount--; + if (plls->spll_refcount == 0) { + DRM_DEBUG_KMS("Disabling SPLL\n"); + val = I915_READ(SPLL_CTL); + WARN_ON(!(val & SPLL_PLL_ENABLE)); + I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); + POSTING_READ(SPLL_CTL); + } + break; + case PORT_CLK_SEL_WRPLL1: + plls->wrpll1_refcount--; + if (plls->wrpll1_refcount == 0) { + DRM_DEBUG_KMS("Disabling WRPLL 1\n"); + val = I915_READ(WRPLL_CTL1); + WARN_ON(!(val & WRPLL_PLL_ENABLE)); + I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE); + POSTING_READ(WRPLL_CTL1); + } + break; + case PORT_CLK_SEL_WRPLL2: + plls->wrpll2_refcount--; + if (plls->wrpll2_refcount == 0) { + DRM_DEBUG_KMS("Disabling WRPLL 2\n"); + val = I915_READ(WRPLL_CTL2); + WARN_ON(!(val & WRPLL_PLL_ENABLE)); + I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE); + POSTING_READ(WRPLL_CTL2); + } + break; + } + + WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n"); + WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n"); + WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n"); + + intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE; +} + +static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2) +{ + u32 i; + + for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) + if (clock <= wrpll_tmds_clock_table[i].clock) + break; + + if (i == ARRAY_SIZE(wrpll_tmds_clock_table)) + i--; + + *p = wrpll_tmds_clock_table[i].p; + *n2 = wrpll_tmds_clock_table[i].n2; + *r2 = wrpll_tmds_clock_table[i].r2; + + if (wrpll_tmds_clock_table[i].clock != clock) + DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n", + wrpll_tmds_clock_table[i].clock, clock); + + DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n", + clock, *p, *n2, *r2); +} + +bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); + struct drm_encoder *encoder = &intel_encoder->base; + struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct intel_ddi_plls *plls = &dev_priv->ddi_plls; + int type = intel_encoder->type; + enum pipe pipe = intel_crtc->pipe; + uint32_t reg, val; + + /* TODO: reuse PLLs when possible (compare values) */ + + intel_ddi_put_crtc_pll(crtc); + + if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + switch (intel_dp->link_bw) { + case DP_LINK_BW_1_62: + intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810; + break; + case DP_LINK_BW_2_7: + intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350; + break; + case DP_LINK_BW_5_4: + intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700; + break; + default: + DRM_ERROR("Link bandwidth %d unsupported\n", + intel_dp->link_bw); + return false; + } + + /* We don't need to turn any PLL on because we'll use LCPLL. */ + return true; + + } else if (type == INTEL_OUTPUT_HDMI) { + int p, n2, r2; + + if (plls->wrpll1_refcount == 0) { + DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n", + pipe_name(pipe)); + plls->wrpll1_refcount++; + reg = WRPLL_CTL1; + intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1; + } else if (plls->wrpll2_refcount == 0) { + DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n", + pipe_name(pipe)); + plls->wrpll2_refcount++; + reg = WRPLL_CTL2; + intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2; + } else { + DRM_ERROR("No WRPLLs available!\n"); + return false; + } + + WARN(I915_READ(reg) & WRPLL_PLL_ENABLE, + "WRPLL already enabled\n"); + + intel_ddi_calculate_wrpll(clock, &p, &n2, &r2); + + val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 | + WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | + WRPLL_DIVIDER_POST(p); + + } else if (type == INTEL_OUTPUT_ANALOG) { + if (plls->spll_refcount == 0) { + DRM_DEBUG_KMS("Using SPLL on pipe %c\n", + pipe_name(pipe)); + plls->spll_refcount++; + reg = SPLL_CTL; + intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL; + } else { + DRM_ERROR("SPLL already in use\n"); + return false; + } + + WARN(I915_READ(reg) & SPLL_PLL_ENABLE, + "SPLL already enabled\n"); + + val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC; + + } else { + WARN(1, "Invalid DDI encoder type %d\n", type); + return false; + } + + I915_WRITE(reg, val); + udelay(20); + + return true; +} + +void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); + enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; + int type = intel_encoder->type; + uint32_t temp; + + if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { + + temp = TRANS_MSA_SYNC_CLK; + switch (intel_crtc->config.pipe_bpp) { + case 18: + temp |= TRANS_MSA_6_BPC; + break; + case 24: + temp |= TRANS_MSA_8_BPC; + break; + case 30: + temp |= TRANS_MSA_10_BPC; + break; + case 36: + temp |= TRANS_MSA_12_BPC; + break; + default: + BUG(); + } + I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp); + } +} + +void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); + struct drm_encoder *encoder = &intel_encoder->base; + struct drm_i915_private *dev_priv = crtc->dev->dev_private; + enum pipe pipe = intel_crtc->pipe; + enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; + enum port port = intel_ddi_get_encoder_port(intel_encoder); + int type = intel_encoder->type; + uint32_t temp; + + /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */ + temp = TRANS_DDI_FUNC_ENABLE; + temp |= TRANS_DDI_SELECT_PORT(port); + + switch (intel_crtc->config.pipe_bpp) { + case 18: + temp |= TRANS_DDI_BPC_6; + break; + case 24: + temp |= TRANS_DDI_BPC_8; + break; + case 30: + temp |= TRANS_DDI_BPC_10; + break; + case 36: + temp |= TRANS_DDI_BPC_12; + break; + default: + BUG(); + } + + if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) + temp |= TRANS_DDI_PVSYNC; + if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) + temp |= TRANS_DDI_PHSYNC; + + if (cpu_transcoder == TRANSCODER_EDP) { + switch (pipe) { + case PIPE_A: + /* Can only use the always-on power well for eDP when + * not using the panel fitter, and when not using motion + * blur mitigation (which we don't support). */ + if (dev_priv->pch_pf_size) + temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; + else + temp |= TRANS_DDI_EDP_INPUT_A_ON; + break; + case PIPE_B: + temp |= TRANS_DDI_EDP_INPUT_B_ONOFF; + break; + case PIPE_C: + temp |= TRANS_DDI_EDP_INPUT_C_ONOFF; + break; + default: + BUG(); + break; + } + } + + if (type == INTEL_OUTPUT_HDMI) { + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + + if (intel_hdmi->has_hdmi_sink) + temp |= TRANS_DDI_MODE_SELECT_HDMI; + else + temp |= TRANS_DDI_MODE_SELECT_DVI; + + } else if (type == INTEL_OUTPUT_ANALOG) { + temp |= TRANS_DDI_MODE_SELECT_FDI; + temp |= (intel_crtc->fdi_lanes - 1) << 1; + + } else if (type == INTEL_OUTPUT_DISPLAYPORT || + type == INTEL_OUTPUT_EDP) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + temp |= TRANS_DDI_MODE_SELECT_DP_SST; + + switch (intel_dp->lane_count) { + case 1: + temp |= TRANS_DDI_PORT_WIDTH_X1; + break; + case 2: + temp |= TRANS_DDI_PORT_WIDTH_X2; + break; + case 4: + temp |= TRANS_DDI_PORT_WIDTH_X4; + break; + default: + temp |= TRANS_DDI_PORT_WIDTH_X4; + WARN(1, "Unsupported lane count %d\n", + intel_dp->lane_count); + } + + } else { + WARN(1, "Invalid encoder type %d for pipe %d\n", + intel_encoder->type, pipe); + } + + I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); +} + +void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); + uint32_t val = I915_READ(reg); + + val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK); + val |= TRANS_DDI_PORT_NONE; + I915_WRITE(reg, val); +} + +bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) +{ + struct drm_device *dev = intel_connector->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_encoder *intel_encoder = intel_connector->encoder; + int type = intel_connector->base.connector_type; + enum port port = intel_ddi_get_encoder_port(intel_encoder); + enum pipe pipe = 0; + enum transcoder cpu_transcoder; + uint32_t tmp; + + if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) + return false; + + if (port == PORT_A) + cpu_transcoder = TRANSCODER_EDP; + else + cpu_transcoder = (enum transcoder) pipe; + + tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + + switch (tmp & TRANS_DDI_MODE_SELECT_MASK) { + case TRANS_DDI_MODE_SELECT_HDMI: + case TRANS_DDI_MODE_SELECT_DVI: + return (type == DRM_MODE_CONNECTOR_HDMIA); + + case TRANS_DDI_MODE_SELECT_DP_SST: + if (type == DRM_MODE_CONNECTOR_eDP) + return true; + case TRANS_DDI_MODE_SELECT_DP_MST: + return (type == DRM_MODE_CONNECTOR_DisplayPort); + + case TRANS_DDI_MODE_SELECT_FDI: + return (type == DRM_MODE_CONNECTOR_VGA); + + default: + return false; + } +} + +bool intel_ddi_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum port port = intel_ddi_get_encoder_port(encoder); + u32 tmp; + int i; + + tmp = I915_READ(DDI_BUF_CTL(port)); + + if (!(tmp & DDI_BUF_CTL_ENABLE)) + return false; + + if (port == PORT_A) { + tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); + + switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { + case TRANS_DDI_EDP_INPUT_A_ON: + case TRANS_DDI_EDP_INPUT_A_ONOFF: + *pipe = PIPE_A; + break; + case TRANS_DDI_EDP_INPUT_B_ONOFF: + *pipe = PIPE_B; + break; + case TRANS_DDI_EDP_INPUT_C_ONOFF: + *pipe = PIPE_C; + break; + } + + return true; + } else { + for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) { + tmp = I915_READ(TRANS_DDI_FUNC_CTL(i)); + + if ((tmp & TRANS_DDI_PORT_MASK) + == TRANS_DDI_SELECT_PORT(port)) { + *pipe = i; + return true; + } + } + } + + DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port); + + return false; +} + +static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + uint32_t temp, ret; + enum port port = I915_MAX_PORTS; + enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, + pipe); + int i; + + if (cpu_transcoder == TRANSCODER_EDP) { + port = PORT_A; + } else { + temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + temp &= TRANS_DDI_PORT_MASK; + + for (i = PORT_B; i <= PORT_E; i++) + if (temp == TRANS_DDI_SELECT_PORT(i)) + port = i; + } + + if (port == I915_MAX_PORTS) { + WARN(1, "Pipe %c enabled on an unknown port\n", + pipe_name(pipe)); + ret = PORT_CLK_SEL_NONE; + } else { + ret = I915_READ(PORT_CLK_SEL(port)); + DRM_DEBUG_KMS("Pipe %c connected to port %c using clock " + "0x%08x\n", pipe_name(pipe), port_name(port), + ret); + } + + return ret; +} + +void intel_ddi_setup_hw_pll_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe; + struct intel_crtc *intel_crtc; + + for_each_pipe(pipe) { + intel_crtc = + to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); + + if (!intel_crtc->active) + continue; + + intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, + pipe); + + switch (intel_crtc->ddi_pll_sel) { + case PORT_CLK_SEL_SPLL: + dev_priv->ddi_plls.spll_refcount++; + break; + case PORT_CLK_SEL_WRPLL1: + dev_priv->ddi_plls.wrpll1_refcount++; + break; + case PORT_CLK_SEL_WRPLL2: + dev_priv->ddi_plls.wrpll2_refcount++; + break; + } + } +} + +void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) +{ + struct drm_crtc *crtc = &intel_crtc->base; + struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); + enum port port = intel_ddi_get_encoder_port(intel_encoder); + enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; + + if (cpu_transcoder != TRANSCODER_EDP) + I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), + TRANS_CLK_SEL_PORT(port)); +} + +void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) +{ + struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; + enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; + + if (cpu_transcoder != TRANSCODER_EDP) + I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), + TRANS_CLK_SEL_DISABLED); +} + +static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) +{ + struct drm_encoder *encoder = &intel_encoder->base; + struct drm_crtc *crtc = encoder->crtc; + struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + enum port port = intel_ddi_get_encoder_port(intel_encoder); + int type = intel_encoder->type; + + if (type == INTEL_OUTPUT_EDP) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + ironlake_edp_panel_vdd_on(intel_dp); + ironlake_edp_panel_on(intel_dp); + ironlake_edp_panel_vdd_off(intel_dp, true); + } + + WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); + I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel); + + if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + intel_dp_start_link_train(intel_dp); + intel_dp_complete_link_train(intel_dp); + if (port != PORT_A) + intel_dp_stop_link_train(intel_dp); + } +} + +static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) +{ + struct drm_encoder *encoder = &intel_encoder->base; + struct drm_i915_private *dev_priv = encoder->dev->dev_private; + enum port port = intel_ddi_get_encoder_port(intel_encoder); + int type = intel_encoder->type; + uint32_t val; + bool wait = false; + + val = I915_READ(DDI_BUF_CTL(port)); + if (val & DDI_BUF_CTL_ENABLE) { + val &= ~DDI_BUF_CTL_ENABLE; + I915_WRITE(DDI_BUF_CTL(port), val); + wait = true; + } + + val = I915_READ(DP_TP_CTL(port)); + val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); + val |= DP_TP_CTL_LINK_TRAIN_PAT1; + I915_WRITE(DP_TP_CTL(port), val); + + if (wait) + intel_wait_ddi_buf_idle(dev_priv, port); + + if (type == INTEL_OUTPUT_EDP) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + ironlake_edp_panel_vdd_on(intel_dp); + ironlake_edp_panel_off(intel_dp); + } + + I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); +} + +static void intel_enable_ddi(struct intel_encoder *intel_encoder) +{ + struct drm_encoder *encoder = &intel_encoder->base; + struct drm_crtc *crtc = encoder->crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum port port = intel_ddi_get_encoder_port(intel_encoder); + int type = intel_encoder->type; + uint32_t tmp; + + if (type == INTEL_OUTPUT_HDMI) { + struct intel_digital_port *intel_dig_port = + enc_to_dig_port(encoder); + + /* In HDMI/DVI mode, the port width, and swing/emphasis values + * are ignored so nothing special needs to be done besides + * enabling the port. + */ + I915_WRITE(DDI_BUF_CTL(port), + intel_dig_port->saved_port_bits | + DDI_BUF_CTL_ENABLE); + } else if (type == INTEL_OUTPUT_EDP) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (port == PORT_A) + intel_dp_stop_link_train(intel_dp); + + ironlake_edp_backlight_on(intel_dp); + } + + if (intel_crtc->eld_vld) { + tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); + tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4)); + I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); + } +} + +static void intel_disable_ddi(struct intel_encoder *intel_encoder) +{ + struct drm_encoder *encoder = &intel_encoder->base; + struct drm_crtc *crtc = encoder->crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int type = intel_encoder->type; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t tmp; + + tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); + tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4)); + I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); + + if (type == INTEL_OUTPUT_EDP) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + ironlake_edp_backlight_off(intel_dp); + } +} + +int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) +{ + if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) + return 450; + else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) == + LCPLL_CLK_FREQ_450) + return 450; + else if (IS_ULT(dev_priv->dev)) + return 338; + else + return 540; +} + +void intel_ddi_pll_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t val = I915_READ(LCPLL_CTL); + + /* The LCPLL register should be turned on by the BIOS. For now let's + * just check its state and print errors in case something is wrong. + * Don't even try to turn it on. + */ + + DRM_DEBUG_KMS("CDCLK running at %dMHz\n", + intel_ddi_get_cdclk_freq(dev_priv)); + + if (val & LCPLL_CD_SOURCE_FCLK) + DRM_ERROR("CDCLK source is not LCPLL\n"); + + if (val & LCPLL_PLL_DISABLE) + DRM_ERROR("LCPLL is disabled\n"); +} + +void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) +{ + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_i915_private *dev_priv = encoder->dev->dev_private; + enum port port = intel_dig_port->port; + uint32_t val; + bool wait = false; + + if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) { + val = I915_READ(DDI_BUF_CTL(port)); + if (val & DDI_BUF_CTL_ENABLE) { + val &= ~DDI_BUF_CTL_ENABLE; + I915_WRITE(DDI_BUF_CTL(port), val); + wait = true; + } + + val = I915_READ(DP_TP_CTL(port)); + val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); + val |= DP_TP_CTL_LINK_TRAIN_PAT1; + I915_WRITE(DP_TP_CTL(port), val); + POSTING_READ(DP_TP_CTL(port)); + + if (wait) + intel_wait_ddi_buf_idle(dev_priv, port); + } + + val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST | + DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; + if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) + val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; + I915_WRITE(DP_TP_CTL(port), val); + POSTING_READ(DP_TP_CTL(port)); + + intel_dp->DP |= DDI_BUF_CTL_ENABLE; + I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP); + POSTING_READ(DDI_BUF_CTL(port)); + + udelay(600); +} + +void intel_ddi_fdi_disable(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); + uint32_t val; + + intel_ddi_post_disable(intel_encoder); + + val = I915_READ(_FDI_RXA_CTL); + val &= ~FDI_RX_ENABLE; + I915_WRITE(_FDI_RXA_CTL, val); + + val = I915_READ(_FDI_RXA_MISC); + val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); + val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); + I915_WRITE(_FDI_RXA_MISC, val); + + val = I915_READ(_FDI_RXA_CTL); + val &= ~FDI_PCDCLK; + I915_WRITE(_FDI_RXA_CTL, val); + + val = I915_READ(_FDI_RXA_CTL); + val &= ~FDI_RX_PLL_ENABLE; + I915_WRITE(_FDI_RXA_CTL, val); +} + +static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + int type = intel_encoder->type; + + if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) + intel_dp_check_link_status(intel_dp); +} + +static void intel_ddi_destroy(struct drm_encoder *encoder) +{ + /* HDMI has nothing special to destroy, so we can go with this. */ + intel_dp_encoder_destroy(encoder); +} + +static bool intel_ddi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + int type = encoder->type; + + WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n"); + + if (type == INTEL_OUTPUT_HDMI) + return intel_hdmi_compute_config(encoder, pipe_config); + else + return intel_dp_compute_config(encoder, pipe_config); +} + +static const struct drm_encoder_funcs intel_ddi_funcs = { + .destroy = intel_ddi_destroy, +}; + +static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = { + .mode_set = intel_ddi_mode_set, +}; + +void intel_ddi_init(struct drm_device *dev, enum port port) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_digital_port *intel_dig_port; + struct intel_encoder *intel_encoder; + struct drm_encoder *encoder; + struct intel_connector *hdmi_connector = NULL; + struct intel_connector *dp_connector = NULL; + + intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); + if (!intel_dig_port) + return; + + dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!dp_connector) { + kfree(intel_dig_port); + return; + } + + if (port != PORT_A) { + hdmi_connector = kzalloc(sizeof(struct intel_connector), + GFP_KERNEL); + if (!hdmi_connector) { + kfree(dp_connector); + kfree(intel_dig_port); + return; + } + } + + intel_encoder = &intel_dig_port->base; + encoder = &intel_encoder->base; + + drm_encoder_init(dev, encoder, &intel_ddi_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs); + + intel_encoder->compute_config = intel_ddi_compute_config; + intel_encoder->enable = intel_enable_ddi; + intel_encoder->pre_enable = intel_ddi_pre_enable; + intel_encoder->disable = intel_disable_ddi; + intel_encoder->post_disable = intel_ddi_post_disable; + intel_encoder->get_hw_state = intel_ddi_get_hw_state; + + intel_dig_port->port = port; + intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & + (DDI_BUF_PORT_REVERSAL | + DDI_A_4_LANES); + if (hdmi_connector) + intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port); + intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); + + intel_encoder->type = INTEL_OUTPUT_UNKNOWN; + intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + intel_encoder->cloneable = false; + intel_encoder->hot_plug = intel_ddi_hot_plug; + + if (hdmi_connector) + intel_hdmi_init_connector(intel_dig_port, hdmi_connector); + intel_dp_init_connector(intel_dig_port, dp_connector); +} diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c new file mode 100644 index 0000000..ab95259 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_display.c @@ -0,0 +1,9664 @@ +/* + * Copyright © 2006-2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "intel_drv.h" +#include +#include "i915_drv.h" +#include "i915_trace.h" +#include +#include +#include + +bool intel_pipe_has_type(struct drm_crtc *crtc, int type); +static void intel_increase_pllclock(struct drm_crtc *crtc); +static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); + +typedef struct { + /* given values */ + int n; + int m1, m2; + int p1, p2; + /* derived values */ + int dot; + int vco; + int m; + int p; +} intel_clock_t; + +typedef struct { + int min, max; +} intel_range_t; + +typedef struct { + int dot_limit; + int p2_slow, p2_fast; +} intel_p2_t; + +#define INTEL_P2_NUM 2 +typedef struct intel_limit intel_limit_t; +struct intel_limit { + intel_range_t dot, vco, n, m, m1, m2, p, p1; + intel_p2_t p2; + /** + * find_pll() - Find the best values for the PLL + * @limit: limits for the PLL + * @crtc: current CRTC + * @target: target frequency in kHz + * @refclk: reference clock frequency in kHz + * @match_clock: if provided, @best_clock P divider must + * match the P divider from @match_clock + * used for LVDS downclocking + * @best_clock: best PLL values found + * + * Returns true on success, false on failure. + */ + bool (*find_pll)(const intel_limit_t *limit, + struct drm_crtc *crtc, + int target, int refclk, + intel_clock_t *match_clock, + intel_clock_t *best_clock); +}; + +/* FDI */ +#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ + +int +intel_pch_rawclk(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + WARN_ON(!HAS_PCH_SPLIT(dev)); + + return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; +} + +static bool +intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *match_clock, + intel_clock_t *best_clock); +static bool +intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *match_clock, + intel_clock_t *best_clock); + +static bool +intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *match_clock, + intel_clock_t *best_clock); +static bool +intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *match_clock, + intel_clock_t *best_clock); + +static bool +intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *match_clock, + intel_clock_t *best_clock); + +static inline u32 /* units of 100MHz */ +intel_fdi_link_freq(struct drm_device *dev) +{ + if (IS_GEN5(dev)) { + struct drm_i915_private *dev_priv = dev->dev_private; + return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; + } else + return 27; +} + +static const intel_limit_t intel_limits_i8xx_dvo = { + .dot = { .min = 25000, .max = 350000 }, + .vco = { .min = 930000, .max = 1400000 }, + .n = { .min = 3, .max = 16 }, + .m = { .min = 96, .max = 140 }, + .m1 = { .min = 18, .max = 26 }, + .m2 = { .min = 6, .max = 16 }, + .p = { .min = 4, .max = 128 }, + .p1 = { .min = 2, .max = 33 }, + .p2 = { .dot_limit = 165000, + .p2_slow = 4, .p2_fast = 2 }, + .find_pll = intel_find_best_PLL, +}; + +static const intel_limit_t intel_limits_i8xx_lvds = { + .dot = { .min = 25000, .max = 350000 }, + .vco = { .min = 930000, .max = 1400000 }, + .n = { .min = 3, .max = 16 }, + .m = { .min = 96, .max = 140 }, + .m1 = { .min = 18, .max = 26 }, + .m2 = { .min = 6, .max = 16 }, + .p = { .min = 4, .max = 128 }, + .p1 = { .min = 1, .max = 6 }, + .p2 = { .dot_limit = 165000, + .p2_slow = 14, .p2_fast = 7 }, + .find_pll = intel_find_best_PLL, +}; + +static const intel_limit_t intel_limits_i9xx_sdvo = { + .dot = { .min = 20000, .max = 400000 }, + .vco = { .min = 1400000, .max = 2800000 }, + .n = { .min = 1, .max = 6 }, + .m = { .min = 70, .max = 120 }, + .m1 = { .min = 8, .max = 18 }, + .m2 = { .min = 3, .max = 7 }, + .p = { .min = 5, .max = 80 }, + .p1 = { .min = 1, .max = 8 }, + .p2 = { .dot_limit = 200000, + .p2_slow = 10, .p2_fast = 5 }, + .find_pll = intel_find_best_PLL, +}; + +static const intel_limit_t intel_limits_i9xx_lvds = { + .dot = { .min = 20000, .max = 400000 }, + .vco = { .min = 1400000, .max = 2800000 }, + .n = { .min = 1, .max = 6 }, + .m = { .min = 70, .max = 120 }, + .m1 = { .min = 8, .max = 18 }, + .m2 = { .min = 3, .max = 7 }, + .p = { .min = 7, .max = 98 }, + .p1 = { .min = 1, .max = 8 }, + .p2 = { .dot_limit = 112000, + .p2_slow = 14, .p2_fast = 7 }, + .find_pll = intel_find_best_PLL, +}; + + +static const intel_limit_t intel_limits_g4x_sdvo = { + .dot = { .min = 25000, .max = 270000 }, + .vco = { .min = 1750000, .max = 3500000}, + .n = { .min = 1, .max = 4 }, + .m = { .min = 104, .max = 138 }, + .m1 = { .min = 17, .max = 23 }, + .m2 = { .min = 5, .max = 11 }, + .p = { .min = 10, .max = 30 }, + .p1 = { .min = 1, .max = 3}, + .p2 = { .dot_limit = 270000, + .p2_slow = 10, + .p2_fast = 10 + }, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_g4x_hdmi = { + .dot = { .min = 22000, .max = 400000 }, + .vco = { .min = 1750000, .max = 3500000}, + .n = { .min = 1, .max = 4 }, + .m = { .min = 104, .max = 138 }, + .m1 = { .min = 16, .max = 23 }, + .m2 = { .min = 5, .max = 11 }, + .p = { .min = 5, .max = 80 }, + .p1 = { .min = 1, .max = 8}, + .p2 = { .dot_limit = 165000, + .p2_slow = 10, .p2_fast = 5 }, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_g4x_single_channel_lvds = { + .dot = { .min = 20000, .max = 115000 }, + .vco = { .min = 1750000, .max = 3500000 }, + .n = { .min = 1, .max = 3 }, + .m = { .min = 104, .max = 138 }, + .m1 = { .min = 17, .max = 23 }, + .m2 = { .min = 5, .max = 11 }, + .p = { .min = 28, .max = 112 }, + .p1 = { .min = 2, .max = 8 }, + .p2 = { .dot_limit = 0, + .p2_slow = 14, .p2_fast = 14 + }, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { + .dot = { .min = 80000, .max = 224000 }, + .vco = { .min = 1750000, .max = 3500000 }, + .n = { .min = 1, .max = 3 }, + .m = { .min = 104, .max = 138 }, + .m1 = { .min = 17, .max = 23 }, + .m2 = { .min = 5, .max = 11 }, + .p = { .min = 14, .max = 42 }, + .p1 = { .min = 2, .max = 6 }, + .p2 = { .dot_limit = 0, + .p2_slow = 7, .p2_fast = 7 + }, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_g4x_display_port = { + .dot = { .min = 161670, .max = 227000 }, + .vco = { .min = 1750000, .max = 3500000}, + .n = { .min = 1, .max = 2 }, + .m = { .min = 97, .max = 108 }, + .m1 = { .min = 0x10, .max = 0x12 }, + .m2 = { .min = 0x05, .max = 0x06 }, + .p = { .min = 10, .max = 20 }, + .p1 = { .min = 1, .max = 2}, + .p2 = { .dot_limit = 0, + .p2_slow = 10, .p2_fast = 10 }, + .find_pll = intel_find_pll_g4x_dp, +}; + +static const intel_limit_t intel_limits_pineview_sdvo = { + .dot = { .min = 20000, .max = 400000}, + .vco = { .min = 1700000, .max = 3500000 }, + /* Pineview's Ncounter is a ring counter */ + .n = { .min = 3, .max = 6 }, + .m = { .min = 2, .max = 256 }, + /* Pineview only has one combined m divider, which we treat as m2. */ + .m1 = { .min = 0, .max = 0 }, + .m2 = { .min = 0, .max = 254 }, + .p = { .min = 5, .max = 80 }, + .p1 = { .min = 1, .max = 8 }, + .p2 = { .dot_limit = 200000, + .p2_slow = 10, .p2_fast = 5 }, + .find_pll = intel_find_best_PLL, +}; + +static const intel_limit_t intel_limits_pineview_lvds = { + .dot = { .min = 20000, .max = 400000 }, + .vco = { .min = 1700000, .max = 3500000 }, + .n = { .min = 3, .max = 6 }, + .m = { .min = 2, .max = 256 }, + .m1 = { .min = 0, .max = 0 }, + .m2 = { .min = 0, .max = 254 }, + .p = { .min = 7, .max = 112 }, + .p1 = { .min = 1, .max = 8 }, + .p2 = { .dot_limit = 112000, + .p2_slow = 14, .p2_fast = 14 }, + .find_pll = intel_find_best_PLL, +}; + +/* Ironlake / Sandybridge + * + * We calculate clock using (register_value + 2) for N/M1/M2, so here + * the range value for them is (actual_value - 2). + */ +static const intel_limit_t intel_limits_ironlake_dac = { + .dot = { .min = 25000, .max = 350000 }, + .vco = { .min = 1760000, .max = 3510000 }, + .n = { .min = 1, .max = 5 }, + .m = { .min = 79, .max = 127 }, + .m1 = { .min = 12, .max = 22 }, + .m2 = { .min = 5, .max = 9 }, + .p = { .min = 5, .max = 80 }, + .p1 = { .min = 1, .max = 8 }, + .p2 = { .dot_limit = 225000, + .p2_slow = 10, .p2_fast = 5 }, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_ironlake_single_lvds = { + .dot = { .min = 25000, .max = 350000 }, + .vco = { .min = 1760000, .max = 3510000 }, + .n = { .min = 1, .max = 3 }, + .m = { .min = 79, .max = 118 }, + .m1 = { .min = 12, .max = 22 }, + .m2 = { .min = 5, .max = 9 }, + .p = { .min = 28, .max = 112 }, + .p1 = { .min = 2, .max = 8 }, + .p2 = { .dot_limit = 225000, + .p2_slow = 14, .p2_fast = 14 }, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_ironlake_dual_lvds = { + .dot = { .min = 25000, .max = 350000 }, + .vco = { .min = 1760000, .max = 3510000 }, + .n = { .min = 1, .max = 3 }, + .m = { .min = 79, .max = 127 }, + .m1 = { .min = 12, .max = 22 }, + .m2 = { .min = 5, .max = 9 }, + .p = { .min = 14, .max = 56 }, + .p1 = { .min = 2, .max = 8 }, + .p2 = { .dot_limit = 225000, + .p2_slow = 7, .p2_fast = 7 }, + .find_pll = intel_g4x_find_best_PLL, +}; + +/* LVDS 100mhz refclk limits. */ +static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { + .dot = { .min = 25000, .max = 350000 }, + .vco = { .min = 1760000, .max = 3510000 }, + .n = { .min = 1, .max = 2 }, + .m = { .min = 79, .max = 126 }, + .m1 = { .min = 12, .max = 22 }, + .m2 = { .min = 5, .max = 9 }, + .p = { .min = 28, .max = 112 }, + .p1 = { .min = 2, .max = 8 }, + .p2 = { .dot_limit = 225000, + .p2_slow = 14, .p2_fast = 14 }, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { + .dot = { .min = 25000, .max = 350000 }, + .vco = { .min = 1760000, .max = 3510000 }, + .n = { .min = 1, .max = 3 }, + .m = { .min = 79, .max = 126 }, + .m1 = { .min = 12, .max = 22 }, + .m2 = { .min = 5, .max = 9 }, + .p = { .min = 14, .max = 42 }, + .p1 = { .min = 2, .max = 6 }, + .p2 = { .dot_limit = 225000, + .p2_slow = 7, .p2_fast = 7 }, + .find_pll = intel_g4x_find_best_PLL, +}; + +static const intel_limit_t intel_limits_ironlake_display_port = { + .dot = { .min = 25000, .max = 350000 }, + .vco = { .min = 1760000, .max = 3510000}, + .n = { .min = 1, .max = 2 }, + .m = { .min = 81, .max = 90 }, + .m1 = { .min = 12, .max = 22 }, + .m2 = { .min = 5, .max = 9 }, + .p = { .min = 10, .max = 20 }, + .p1 = { .min = 1, .max = 2}, + .p2 = { .dot_limit = 0, + .p2_slow = 10, .p2_fast = 10 }, + .find_pll = intel_find_pll_ironlake_dp, +}; + +static const intel_limit_t intel_limits_vlv_dac = { + .dot = { .min = 25000, .max = 270000 }, + .vco = { .min = 4000000, .max = 6000000 }, + .n = { .min = 1, .max = 7 }, + .m = { .min = 22, .max = 450 }, /* guess */ + .m1 = { .min = 2, .max = 3 }, + .m2 = { .min = 11, .max = 156 }, + .p = { .min = 10, .max = 30 }, + .p1 = { .min = 2, .max = 3 }, + .p2 = { .dot_limit = 270000, + .p2_slow = 2, .p2_fast = 20 }, + .find_pll = intel_vlv_find_best_pll, +}; + +static const intel_limit_t intel_limits_vlv_hdmi = { + .dot = { .min = 20000, .max = 165000 }, + .vco = { .min = 4000000, .max = 5994000}, + .n = { .min = 1, .max = 7 }, + .m = { .min = 60, .max = 300 }, /* guess */ + .m1 = { .min = 2, .max = 3 }, + .m2 = { .min = 11, .max = 156 }, + .p = { .min = 10, .max = 30 }, + .p1 = { .min = 2, .max = 3 }, + .p2 = { .dot_limit = 270000, + .p2_slow = 2, .p2_fast = 20 }, + .find_pll = intel_vlv_find_best_pll, +}; + +static const intel_limit_t intel_limits_vlv_dp = { + .dot = { .min = 25000, .max = 270000 }, + .vco = { .min = 4000000, .max = 6000000 }, + .n = { .min = 1, .max = 7 }, + .m = { .min = 22, .max = 450 }, + .m1 = { .min = 2, .max = 3 }, + .m2 = { .min = 11, .max = 156 }, + .p = { .min = 10, .max = 30 }, + .p1 = { .min = 2, .max = 3 }, + .p2 = { .dot_limit = 270000, + .p2_slow = 2, .p2_fast = 20 }, + .find_pll = intel_vlv_find_best_pll, +}; + +u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg) +{ + WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); + + if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { + DRM_ERROR("DPIO idle wait timed out\n"); + return 0; + } + + I915_WRITE(DPIO_REG, reg); + I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID | + DPIO_BYTE); + if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { + DRM_ERROR("DPIO read wait timed out\n"); + return 0; + } + + return I915_READ(DPIO_DATA); +} + +static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg, + u32 val) +{ + WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); + + if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { + DRM_ERROR("DPIO idle wait timed out\n"); + return; + } + + I915_WRITE(DPIO_DATA, val); + I915_WRITE(DPIO_REG, reg); + I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID | + DPIO_BYTE); + if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) + DRM_ERROR("DPIO write wait timed out\n"); +} + +static void vlv_init_dpio(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Reset the DPIO config */ + I915_WRITE(DPIO_CTL, 0); + POSTING_READ(DPIO_CTL); + I915_WRITE(DPIO_CTL, 1); + POSTING_READ(DPIO_CTL); +} + +static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, + int refclk) +{ + struct drm_device *dev = crtc->dev; + const intel_limit_t *limit; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_is_dual_link_lvds(dev)) { + if (refclk == 100000) + limit = &intel_limits_ironlake_dual_lvds_100m; + else + limit = &intel_limits_ironlake_dual_lvds; + } else { + if (refclk == 100000) + limit = &intel_limits_ironlake_single_lvds_100m; + else + limit = &intel_limits_ironlake_single_lvds; + } + } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) + limit = &intel_limits_ironlake_display_port; + else + limit = &intel_limits_ironlake_dac; + + return limit; +} + +static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + const intel_limit_t *limit; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_is_dual_link_lvds(dev)) + limit = &intel_limits_g4x_dual_channel_lvds; + else + limit = &intel_limits_g4x_single_channel_lvds; + } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { + limit = &intel_limits_g4x_hdmi; + } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { + limit = &intel_limits_g4x_sdvo; + } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { + limit = &intel_limits_g4x_display_port; + } else /* The option is for other outputs */ + limit = &intel_limits_i9xx_sdvo; + + return limit; +} + +static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) +{ + struct drm_device *dev = crtc->dev; + const intel_limit_t *limit; + + if (HAS_PCH_SPLIT(dev)) + limit = intel_ironlake_limit(crtc, refclk); + else if (IS_G4X(dev)) { + limit = intel_g4x_limit(crtc); + } else if (IS_PINEVIEW(dev)) { + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + limit = &intel_limits_pineview_lvds; + else + limit = &intel_limits_pineview_sdvo; + } else if (IS_VALLEYVIEW(dev)) { + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) + limit = &intel_limits_vlv_dac; + else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) + limit = &intel_limits_vlv_hdmi; + else + limit = &intel_limits_vlv_dp; + } else if (!IS_GEN2(dev)) { + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + limit = &intel_limits_i9xx_lvds; + else + limit = &intel_limits_i9xx_sdvo; + } else { + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + limit = &intel_limits_i8xx_lvds; + else + limit = &intel_limits_i8xx_dvo; + } + return limit; +} + +/* m1 is reserved as 0 in Pineview, n is a ring counter */ +static void pineview_clock(int refclk, intel_clock_t *clock) +{ + clock->m = clock->m2 + 2; + clock->p = clock->p1 * clock->p2; + clock->vco = refclk * clock->m / clock->n; + clock->dot = clock->vco / clock->p; +} + +static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) +{ + if (IS_PINEVIEW(dev)) { + pineview_clock(refclk, clock); + return; + } + clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); + clock->p = clock->p1 * clock->p2; + clock->vco = refclk * clock->m / (clock->n + 2); + clock->dot = clock->vco / clock->p; +} + +/** + * Returns whether any output on the specified pipe is of the specified type + */ +bool intel_pipe_has_type(struct drm_crtc *crtc, int type) +{ + struct drm_device *dev = crtc->dev; + struct intel_encoder *encoder; + + for_each_encoder_on_crtc(dev, crtc, encoder) + if (encoder->type == type) + return true; + + return false; +} + +#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) +/** + * Returns whether the given set of divisors are valid for a given refclk with + * the given connectors. + */ + +static bool intel_PLL_is_valid(struct drm_device *dev, + const intel_limit_t *limit, + const intel_clock_t *clock) +{ + if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) + INTELPllInvalid("p1 out of range\n"); + if (clock->p < limit->p.min || limit->p.max < clock->p) + INTELPllInvalid("p out of range\n"); + if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) + INTELPllInvalid("m2 out of range\n"); + if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) + INTELPllInvalid("m1 out of range\n"); + if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) + INTELPllInvalid("m1 <= m2\n"); + if (clock->m < limit->m.min || limit->m.max < clock->m) + INTELPllInvalid("m out of range\n"); + if (clock->n < limit->n.min || limit->n.max < clock->n) + INTELPllInvalid("n out of range\n"); + if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) + INTELPllInvalid("vco out of range\n"); + /* XXX: We may need to be checking "Dot clock" depending on the multiplier, + * connector, etc., rather than just a single range. + */ + if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) + INTELPllInvalid("dot out of range\n"); + + return true; +} + +static bool +intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *match_clock, + intel_clock_t *best_clock) + +{ + struct drm_device *dev = crtc->dev; + intel_clock_t clock; + int err = target; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + /* + * For LVDS just rely on its current settings for dual-channel. + * We haven't figured out how to reliably set up different + * single/dual channel state, if we even can. + */ + if (intel_is_dual_link_lvds(dev)) + clock.p2 = limit->p2.p2_fast; + else + clock.p2 = limit->p2.p2_slow; + } else { + if (target < limit->p2.dot_limit) + clock.p2 = limit->p2.p2_slow; + else + clock.p2 = limit->p2.p2_fast; + } + + memset(best_clock, 0, sizeof(*best_clock)); + + for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; + clock.m1++) { + for (clock.m2 = limit->m2.min; + clock.m2 <= limit->m2.max; clock.m2++) { + /* m1 is always 0 in Pineview */ + if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) + break; + for (clock.n = limit->n.min; + clock.n <= limit->n.max; clock.n++) { + for (clock.p1 = limit->p1.min; + clock.p1 <= limit->p1.max; clock.p1++) { + int this_err; + + intel_clock(dev, refclk, &clock); + if (!intel_PLL_is_valid(dev, limit, + &clock)) + continue; + if (match_clock && + clock.p != match_clock->p) + continue; + + this_err = abs(clock.dot - target); + if (this_err < err) { + *best_clock = clock; + err = this_err; + } + } + } + } + } + + return (err != target); +} + +static bool +intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *match_clock, + intel_clock_t *best_clock) +{ + struct drm_device *dev = crtc->dev; + intel_clock_t clock; + int max_n; + bool found; + /* approximately equals target * 0.00585 */ + int err_most = (target >> 8) + (target >> 9); + found = false; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + int lvds_reg; + + if (HAS_PCH_SPLIT(dev)) + lvds_reg = PCH_LVDS; + else + lvds_reg = LVDS; + if (intel_is_dual_link_lvds(dev)) + clock.p2 = limit->p2.p2_fast; + else + clock.p2 = limit->p2.p2_slow; + } else { + if (target < limit->p2.dot_limit) + clock.p2 = limit->p2.p2_slow; + else + clock.p2 = limit->p2.p2_fast; + } + + memset(best_clock, 0, sizeof(*best_clock)); + max_n = limit->n.max; + /* based on hardware requirement, prefer smaller n to precision */ + for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { + /* based on hardware requirement, prefere larger m1,m2 */ + for (clock.m1 = limit->m1.max; + clock.m1 >= limit->m1.min; clock.m1--) { + for (clock.m2 = limit->m2.max; + clock.m2 >= limit->m2.min; clock.m2--) { + for (clock.p1 = limit->p1.max; + clock.p1 >= limit->p1.min; clock.p1--) { + int this_err; + + intel_clock(dev, refclk, &clock); + if (!intel_PLL_is_valid(dev, limit, + &clock)) + continue; + if (match_clock && + clock.p != match_clock->p) + continue; + + this_err = abs(clock.dot - target); + if (this_err < err_most) { + *best_clock = clock; + err_most = this_err; + max_n = clock.n; + found = true; + } + } + } + } + } + return found; +} + +static bool +intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *match_clock, + intel_clock_t *best_clock) +{ + struct drm_device *dev = crtc->dev; + intel_clock_t clock; + + if (target < 200000) { + clock.n = 1; + clock.p1 = 2; + clock.p2 = 10; + clock.m1 = 12; + clock.m2 = 9; + } else { + clock.n = 2; + clock.p1 = 1; + clock.p2 = 10; + clock.m1 = 14; + clock.m2 = 8; + } + intel_clock(dev, refclk, &clock); + memcpy(best_clock, &clock, sizeof(intel_clock_t)); + return true; +} + +/* DisplayPort has only two frequencies, 162MHz and 270MHz */ +static bool +intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *match_clock, + intel_clock_t *best_clock) +{ + intel_clock_t clock; + if (target < 200000) { + clock.p1 = 2; + clock.p2 = 10; + clock.n = 2; + clock.m1 = 23; + clock.m2 = 8; + } else { + clock.p1 = 1; + clock.p2 = 10; + clock.n = 1; + clock.m1 = 14; + clock.m2 = 2; + } + clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); + clock.p = (clock.p1 * clock.p2); + clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; + clock.vco = 0; + memcpy(best_clock, &clock, sizeof(intel_clock_t)); + return true; +} +static bool +intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *match_clock, + intel_clock_t *best_clock) +{ + u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2; + u32 m, n, fastclk; + u32 updrate, minupdate, fracbits, p; + unsigned long bestppm, ppm, absppm; + int dotclk, flag; + + flag = 0; + dotclk = target * 1000; + bestppm = 1000000; + ppm = absppm = 0; + fastclk = dotclk / (2*100); + updrate = 0; + minupdate = 19200; + fracbits = 1; + n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0; + bestm1 = bestm2 = bestp1 = bestp2 = 0; + + /* based on hardware requirement, prefer smaller n to precision */ + for (n = limit->n.min; n <= ((refclk) / minupdate); n++) { + updrate = refclk / n; + for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) { + for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) { + if (p2 > 10) + p2 = p2 - 1; + p = p1 * p2; + /* based on hardware requirement, prefer bigger m1,m2 values */ + for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) { + m2 = (((2*(fastclk * p * n / m1 )) + + refclk) / (2*refclk)); + m = m1 * m2; + vco = updrate * m; + if (vco >= limit->vco.min && vco < limit->vco.max) { + ppm = 1000000 * ((vco / p) - fastclk) / fastclk; + absppm = (ppm > 0) ? ppm : (-ppm); + if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) { + bestppm = 0; + flag = 1; + } + if (absppm < bestppm - 10) { + bestppm = absppm; + flag = 1; + } + if (flag) { + bestn = n; + bestm1 = m1; + bestm2 = m2; + bestp1 = p1; + bestp2 = p2; + flag = 0; + } + } + } + } + } + } + best_clock->n = bestn; + best_clock->m1 = bestm1; + best_clock->m2 = bestm2; + best_clock->p1 = bestp1; + best_clock->p2 = bestp2; + + return true; +} + +enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + return intel_crtc->config.cpu_transcoder; +} + +static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 frame, frame_reg = PIPEFRAME(pipe); + + frame = I915_READ(frame_reg); + + if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50)) + DRM_DEBUG_KMS("vblank wait timed out\n"); +} + +/** + * intel_wait_for_vblank - wait for vblank on a given pipe + * @dev: drm device + * @pipe: pipe to wait for + * + * Wait for vblank to occur on a given pipe. Needed for various bits of + * mode setting code. + */ +void intel_wait_for_vblank(struct drm_device *dev, int pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipestat_reg = PIPESTAT(pipe); + + if (INTEL_INFO(dev)->gen >= 5) { + ironlake_wait_for_vblank(dev, pipe); + return; + } + + /* Clear existing vblank status. Note this will clear any other + * sticky status fields as well. + * + * This races with i915_driver_irq_handler() with the result + * that either function could miss a vblank event. Here it is not + * fatal, as we will either wait upon the next vblank interrupt or + * timeout. Generally speaking intel_wait_for_vblank() is only + * called during modeset at which time the GPU should be idle and + * should *not* be performing page flips and thus not waiting on + * vblanks... + * Currently, the result of us stealing a vblank from the irq + * handler is that a single frame will be skipped during swapbuffers. + */ + I915_WRITE(pipestat_reg, + I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); + + /* Wait for vblank interrupt bit to set */ + if (wait_for(I915_READ(pipestat_reg) & + PIPE_VBLANK_INTERRUPT_STATUS, + 50)) + DRM_DEBUG_KMS("vblank wait timed out\n"); +} + +/* + * intel_wait_for_pipe_off - wait for pipe to turn off + * @dev: drm device + * @pipe: pipe to wait for + * + * After disabling a pipe, we can't wait for vblank in the usual way, + * spinning on the vblank interrupt status bit, since we won't actually + * see an interrupt when the pipe is disabled. + * + * On Gen4 and above: + * wait for the pipe register state bit to turn off + * + * Otherwise: + * wait for the display line value to settle (it usually + * ends up stopping at the start of the next frame). + * + */ +void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, + pipe); + + if (INTEL_INFO(dev)->gen >= 4) { + int reg = PIPECONF(cpu_transcoder); + + /* Wait for the Pipe State to go off */ + if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, + 100)) + WARN(1, "pipe_off wait timed out\n"); + } else { + u32 last_line, line_mask; + int reg = PIPEDSL(pipe); + unsigned long timeout = jiffies + msecs_to_jiffies(100); + + if (IS_GEN2(dev)) + line_mask = DSL_LINEMASK_GEN2; + else + line_mask = DSL_LINEMASK_GEN3; + + /* Wait for the display line to settle */ + do { + last_line = I915_READ(reg) & line_mask; + mdelay(5); + } while (((I915_READ(reg) & line_mask) != last_line) && + time_after(timeout, jiffies)); + if (time_after(jiffies, timeout)) + WARN(1, "pipe_off wait timed out\n"); + } +} + +/* + * ibx_digital_port_connected - is the specified port connected? + * @dev_priv: i915 private structure + * @port: the port to test + * + * Returns true if @port is connected, false otherwise. + */ +bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, + struct intel_digital_port *port) +{ + u32 bit; + + if (HAS_PCH_IBX(dev_priv->dev)) { + switch(port->port) { + case PORT_B: + bit = SDE_PORTB_HOTPLUG; + break; + case PORT_C: + bit = SDE_PORTC_HOTPLUG; + break; + case PORT_D: + bit = SDE_PORTD_HOTPLUG; + break; + default: + return true; + } + } else { + switch(port->port) { + case PORT_B: + bit = SDE_PORTB_HOTPLUG_CPT; + break; + case PORT_C: + bit = SDE_PORTC_HOTPLUG_CPT; + break; + case PORT_D: + bit = SDE_PORTD_HOTPLUG_CPT; + break; + default: + return true; + } + } + + return I915_READ(SDEISR) & bit; +} + +static const char *state_string(bool enabled) +{ + return enabled ? "on" : "off"; +} + +/* Only for pre-ILK configs */ +static void assert_pll(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state) +{ + int reg; + u32 val; + bool cur_state; + + reg = DPLL(pipe); + val = I915_READ(reg); + cur_state = !!(val & DPLL_VCO_ENABLE); + WARN(cur_state != state, + "PLL state assertion failure (expected %s, current %s)\n", + state_string(state), state_string(cur_state)); +} +#define assert_pll_enabled(d, p) assert_pll(d, p, true) +#define assert_pll_disabled(d, p) assert_pll(d, p, false) + +/* For ILK+ */ +static void assert_pch_pll(struct drm_i915_private *dev_priv, + struct intel_pch_pll *pll, + struct intel_crtc *crtc, + bool state) +{ + u32 val; + bool cur_state; + + if (HAS_PCH_LPT(dev_priv->dev)) { + DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n"); + return; + } + + if (WARN (!pll, + "asserting PCH PLL %s with no PLL\n", state_string(state))) + return; + + val = I915_READ(pll->pll_reg); + cur_state = !!(val & DPLL_VCO_ENABLE); + WARN(cur_state != state, + "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n", + pll->pll_reg, state_string(state), state_string(cur_state), val); + + /* Make sure the selected PLL is correctly attached to the transcoder */ + if (crtc && HAS_PCH_CPT(dev_priv->dev)) { + u32 pch_dpll; + + pch_dpll = I915_READ(PCH_DPLL_SEL); + cur_state = pll->pll_reg == _PCH_DPLL_B; + if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state, + "PLL[%d] not attached to this transcoder %d: %08x\n", + cur_state, crtc->pipe, pch_dpll)) { + cur_state = !!(val >> (4*crtc->pipe + 3)); + WARN(cur_state != state, + "PLL[%d] not %s on this transcoder %d: %08x\n", + pll->pll_reg == _PCH_DPLL_B, + state_string(state), + crtc->pipe, + val); + } + } +} +#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true) +#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false) + +static void assert_fdi_tx(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state) +{ + int reg; + u32 val; + bool cur_state; + enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, + pipe); + + if (HAS_DDI(dev_priv->dev)) { + /* DDI does not have a specific FDI_TX register */ + reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); + val = I915_READ(reg); + cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); + } else { + reg = FDI_TX_CTL(pipe); + val = I915_READ(reg); + cur_state = !!(val & FDI_TX_ENABLE); + } + WARN(cur_state != state, + "FDI TX state assertion failure (expected %s, current %s)\n", + state_string(state), state_string(cur_state)); +} +#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) +#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) + +static void assert_fdi_rx(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state) +{ + int reg; + u32 val; + bool cur_state; + + reg = FDI_RX_CTL(pipe); + val = I915_READ(reg); + cur_state = !!(val & FDI_RX_ENABLE); + WARN(cur_state != state, + "FDI RX state assertion failure (expected %s, current %s)\n", + state_string(state), state_string(cur_state)); +} +#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) +#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) + +static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int reg; + u32 val; + + /* ILK FDI PLL is always enabled */ + if (dev_priv->info->gen == 5) + return; + + /* On Haswell, DDI ports are responsible for the FDI PLL setup */ + if (HAS_DDI(dev_priv->dev)) + return; + + reg = FDI_TX_CTL(pipe); + val = I915_READ(reg); + WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); +} + +static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int reg; + u32 val; + + reg = FDI_RX_CTL(pipe); + val = I915_READ(reg); + WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); +} + +static void assert_panel_unlocked(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int pp_reg, lvds_reg; + u32 val; + enum pipe panel_pipe = PIPE_A; + bool locked = true; + + if (HAS_PCH_SPLIT(dev_priv->dev)) { + pp_reg = PCH_PP_CONTROL; + lvds_reg = PCH_LVDS; + } else { + pp_reg = PP_CONTROL; + lvds_reg = LVDS; + } + + val = I915_READ(pp_reg); + if (!(val & PANEL_POWER_ON) || + ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) + locked = false; + + if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) + panel_pipe = PIPE_B; + + WARN(panel_pipe == pipe && locked, + "panel assertion failure, pipe %c regs locked\n", + pipe_name(pipe)); +} + +void assert_pipe(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state) +{ + int reg; + u32 val; + bool cur_state; + enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, + pipe); + + /* if we need the pipe A quirk it must be always on */ + if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) + state = true; + + if (!intel_using_power_well(dev_priv->dev) && + cpu_transcoder != TRANSCODER_EDP) { + cur_state = false; + } else { + reg = PIPECONF(cpu_transcoder); + val = I915_READ(reg); + cur_state = !!(val & PIPECONF_ENABLE); + } + + WARN(cur_state != state, + "pipe %c assertion failure (expected %s, current %s)\n", + pipe_name(pipe), state_string(state), state_string(cur_state)); +} + +static void assert_plane(struct drm_i915_private *dev_priv, + enum plane plane, bool state) +{ + int reg; + u32 val; + bool cur_state; + + reg = DSPCNTR(plane); + val = I915_READ(reg); + cur_state = !!(val & DISPLAY_PLANE_ENABLE); + WARN(cur_state != state, + "plane %c assertion failure (expected %s, current %s)\n", + plane_name(plane), state_string(state), state_string(cur_state)); +} + +#define assert_plane_enabled(d, p) assert_plane(d, p, true) +#define assert_plane_disabled(d, p) assert_plane(d, p, false) + +static void assert_planes_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int reg, i; + u32 val; + int cur_pipe; + + /* Planes are fixed to pipes on ILK+ */ + if (HAS_PCH_SPLIT(dev_priv->dev) || IS_VALLEYVIEW(dev_priv->dev)) { + reg = DSPCNTR(pipe); + val = I915_READ(reg); + WARN((val & DISPLAY_PLANE_ENABLE), + "plane %c assertion failure, should be disabled but not\n", + plane_name(pipe)); + return; + } + + /* Need to check both planes against the pipe */ + for (i = 0; i < 2; i++) { + reg = DSPCNTR(i); + val = I915_READ(reg); + cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> + DISPPLANE_SEL_PIPE_SHIFT; + WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, + "plane %c assertion failure, should be off on pipe %c but is still active\n", + plane_name(i), pipe_name(pipe)); + } +} + +static void assert_sprites_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int reg, i; + u32 val; + + if (!IS_VALLEYVIEW(dev_priv->dev)) + return; + + /* Need to check both planes against the pipe */ + for (i = 0; i < dev_priv->num_plane; i++) { + reg = SPCNTR(pipe, i); + val = I915_READ(reg); + WARN((val & SP_ENABLE), + "sprite %d assertion failure, should be off on pipe %c but is still active\n", + pipe * 2 + i, pipe_name(pipe)); + } +} + +static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) +{ + u32 val; + bool enabled; + + if (HAS_PCH_LPT(dev_priv->dev)) { + DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n"); + return; + } + + val = I915_READ(PCH_DREF_CONTROL); + enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | + DREF_SUPERSPREAD_SOURCE_MASK)); + WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); +} + +static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int reg; + u32 val; + bool enabled; + + reg = TRANSCONF(pipe); + val = I915_READ(reg); + enabled = !!(val & TRANS_ENABLE); + WARN(enabled, + "transcoder assertion failed, should be off on pipe %c but is still active\n", + pipe_name(pipe)); +} + +static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, + enum pipe pipe, u32 port_sel, u32 val) +{ + if ((val & DP_PORT_EN) == 0) + return false; + + if (HAS_PCH_CPT(dev_priv->dev)) { + u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); + u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); + if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) + return false; + } else { + if ((val & DP_PIPE_MASK) != (pipe << 30)) + return false; + } + return true; +} + +static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, + enum pipe pipe, u32 val) +{ + if ((val & SDVO_ENABLE) == 0) + return false; + + if (HAS_PCH_CPT(dev_priv->dev)) { + if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) + return false; + } else { + if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) + return false; + } + return true; +} + +static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, + enum pipe pipe, u32 val) +{ + if ((val & LVDS_PORT_EN) == 0) + return false; + + if (HAS_PCH_CPT(dev_priv->dev)) { + if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) + return false; + } else { + if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) + return false; + } + return true; +} + +static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, + enum pipe pipe, u32 val) +{ + if ((val & ADPA_DAC_ENABLE) == 0) + return false; + if (HAS_PCH_CPT(dev_priv->dev)) { + if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) + return false; + } else { + if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) + return false; + } + return true; +} + +static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe, int reg, u32 port_sel) +{ + u32 val = I915_READ(reg); + WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), + "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", + reg, pipe_name(pipe)); + + WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 + && (val & DP_PIPEB_SELECT), + "IBX PCH dp port still using transcoder B\n"); +} + +static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe, int reg) +{ + u32 val = I915_READ(reg); + WARN(hdmi_pipe_enabled(dev_priv, pipe, val), + "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", + reg, pipe_name(pipe)); + + WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 + && (val & SDVO_PIPE_B_SELECT), + "IBX PCH hdmi port still using transcoder B\n"); +} + +static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int reg; + u32 val; + + assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); + assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); + assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); + + reg = PCH_ADPA; + val = I915_READ(reg); + WARN(adpa_pipe_enabled(dev_priv, pipe, val), + "PCH VGA enabled on transcoder %c, should be disabled\n", + pipe_name(pipe)); + + reg = PCH_LVDS; + val = I915_READ(reg); + WARN(lvds_pipe_enabled(dev_priv, pipe, val), + "PCH LVDS enabled on transcoder %c, should be disabled\n", + pipe_name(pipe)); + + assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); + assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); + assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); +} + +/** + * intel_enable_pll - enable a PLL + * @dev_priv: i915 private structure + * @pipe: pipe PLL to enable + * + * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to + * make sure the PLL reg is writable first though, since the panel write + * protect mechanism may be enabled. + * + * Note! This is for pre-ILK only. + * + * Unfortunately needed by dvo_ns2501 since the dvo depends on it running. + */ +static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + int reg; + u32 val; + + /* No really, not for ILK+ */ + BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5); + + /* PLL is protected by panel, make sure we can write it */ + if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) + assert_panel_unlocked(dev_priv, pipe); + + reg = DPLL(pipe); + val = I915_READ(reg); + val |= DPLL_VCO_ENABLE; + + /* We do this three times for luck */ + I915_WRITE(reg, val); + POSTING_READ(reg); + udelay(150); /* wait for warmup */ + I915_WRITE(reg, val); + POSTING_READ(reg); + udelay(150); /* wait for warmup */ + I915_WRITE(reg, val); + POSTING_READ(reg); + udelay(150); /* wait for warmup */ +} + +/** + * intel_disable_pll - disable a PLL + * @dev_priv: i915 private structure + * @pipe: pipe PLL to disable + * + * Disable the PLL for @pipe, making sure the pipe is off first. + * + * Note! This is for pre-ILK only. + */ +static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + int reg; + u32 val; + + /* Don't disable pipe A or pipe A PLLs if needed */ + if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) + return; + + /* Make sure the pipe isn't still relying on us */ + assert_pipe_disabled(dev_priv, pipe); + + reg = DPLL(pipe); + val = I915_READ(reg); + val &= ~DPLL_VCO_ENABLE; + I915_WRITE(reg, val); + POSTING_READ(reg); +} + +/* SBI access */ +static void +intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, + enum intel_sbi_destination destination) +{ + u32 tmp; + + WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); + + if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, + 100)) { + DRM_ERROR("timeout waiting for SBI to become ready\n"); + return; + } + + I915_WRITE(SBI_ADDR, (reg << 16)); + I915_WRITE(SBI_DATA, value); + + if (destination == SBI_ICLK) + tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR; + else + tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR; + I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp); + + if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, + 100)) { + DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); + return; + } +} + +static u32 +intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, + enum intel_sbi_destination destination) +{ + u32 value = 0; + WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); + + if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, + 100)) { + DRM_ERROR("timeout waiting for SBI to become ready\n"); + return 0; + } + + I915_WRITE(SBI_ADDR, (reg << 16)); + + if (destination == SBI_ICLK) + value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD; + else + value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD; + I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY); + + if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, + 100)) { + DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); + return 0; + } + + return I915_READ(SBI_DATA); +} + +/** + * ironlake_enable_pch_pll - enable PCH PLL + * @dev_priv: i915 private structure + * @pipe: pipe PLL to enable + * + * The PCH PLL needs to be enabled before the PCH transcoder, since it + * drives the transcoder clock. + */ +static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc) +{ + struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; + struct intel_pch_pll *pll; + int reg; + u32 val; + + /* PCH PLLs only available on ILK, SNB and IVB */ + BUG_ON(dev_priv->info->gen < 5); + pll = intel_crtc->pch_pll; + if (pll == NULL) + return; + + if (WARN_ON(pll->refcount == 0)) + return; + + DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n", + pll->pll_reg, pll->active, pll->on, + intel_crtc->base.base.id); + + /* PCH refclock must be enabled first */ + assert_pch_refclk_enabled(dev_priv); + + if (pll->active++ && pll->on) { + assert_pch_pll_enabled(dev_priv, pll, NULL); + return; + } + + DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg); + + reg = pll->pll_reg; + val = I915_READ(reg); + val |= DPLL_VCO_ENABLE; + I915_WRITE(reg, val); + POSTING_READ(reg); + udelay(200); + + pll->on = true; +} + +static void intel_disable_pch_pll(struct intel_crtc *intel_crtc) +{ + struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; + struct intel_pch_pll *pll = intel_crtc->pch_pll; + int reg; + u32 val; + + /* PCH only available on ILK+ */ + BUG_ON(dev_priv->info->gen < 5); + if (pll == NULL) + return; + + if (WARN_ON(pll->refcount == 0)) + return; + + DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n", + pll->pll_reg, pll->active, pll->on, + intel_crtc->base.base.id); + + if (WARN_ON(pll->active == 0)) { + assert_pch_pll_disabled(dev_priv, pll, NULL); + return; + } + + if (--pll->active) { + assert_pch_pll_enabled(dev_priv, pll, NULL); + return; + } + + DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg); + + /* Make sure transcoder isn't still depending on us */ + assert_transcoder_disabled(dev_priv, intel_crtc->pipe); + + reg = pll->pll_reg; + val = I915_READ(reg); + val &= ~DPLL_VCO_ENABLE; + I915_WRITE(reg, val); + POSTING_READ(reg); + udelay(200); + + pll->on = false; +} + +static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + struct drm_device *dev = dev_priv->dev; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + uint32_t reg, val, pipeconf_val; + + /* PCH only available on ILK+ */ + BUG_ON(dev_priv->info->gen < 5); + + /* Make sure PCH DPLL is enabled */ + assert_pch_pll_enabled(dev_priv, + to_intel_crtc(crtc)->pch_pll, + to_intel_crtc(crtc)); + + /* FDI must be feeding us bits for PCH ports */ + assert_fdi_tx_enabled(dev_priv, pipe); + assert_fdi_rx_enabled(dev_priv, pipe); + + if (HAS_PCH_CPT(dev)) { + /* Workaround: Set the timing override bit before enabling the + * pch transcoder. */ + reg = TRANS_CHICKEN2(pipe); + val = I915_READ(reg); + val |= TRANS_CHICKEN2_TIMING_OVERRIDE; + I915_WRITE(reg, val); + } + + reg = TRANSCONF(pipe); + val = I915_READ(reg); + pipeconf_val = I915_READ(PIPECONF(pipe)); + + if (HAS_PCH_IBX(dev_priv->dev)) { + /* + * make the BPC in transcoder be consistent with + * that in pipeconf reg. + */ + val &= ~PIPECONF_BPC_MASK; + val |= pipeconf_val & PIPECONF_BPC_MASK; + } + + val &= ~TRANS_INTERLACE_MASK; + if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) + if (HAS_PCH_IBX(dev_priv->dev) && + intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) + val |= TRANS_LEGACY_INTERLACED_ILK; + else + val |= TRANS_INTERLACED; + else + val |= TRANS_PROGRESSIVE; + + I915_WRITE(reg, val | TRANS_ENABLE); + if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) + DRM_ERROR("failed to enable transcoder %d\n", pipe); +} + +static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + u32 val, pipeconf_val; + + /* PCH only available on ILK+ */ + BUG_ON(dev_priv->info->gen < 5); + + /* FDI must be feeding us bits for PCH ports */ + assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); + assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); + + /* Workaround: set timing override bit. */ + val = I915_READ(_TRANSA_CHICKEN2); + val |= TRANS_CHICKEN2_TIMING_OVERRIDE; + I915_WRITE(_TRANSA_CHICKEN2, val); + + val = TRANS_ENABLE; + pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); + + if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == + PIPECONF_INTERLACED_ILK) + val |= TRANS_INTERLACED; + else + val |= TRANS_PROGRESSIVE; + + I915_WRITE(TRANSCONF(TRANSCODER_A), val); + if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100)) + DRM_ERROR("Failed to enable PCH transcoder\n"); +} + +static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + struct drm_device *dev = dev_priv->dev; + uint32_t reg, val; + + /* FDI relies on the transcoder */ + assert_fdi_tx_disabled(dev_priv, pipe); + assert_fdi_rx_disabled(dev_priv, pipe); + + /* Ports must be off as well */ + assert_pch_ports_disabled(dev_priv, pipe); + + reg = TRANSCONF(pipe); + val = I915_READ(reg); + val &= ~TRANS_ENABLE; + I915_WRITE(reg, val); + /* wait for PCH transcoder off, transcoder state */ + if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) + DRM_ERROR("failed to disable transcoder %d\n", pipe); + + if (!HAS_PCH_IBX(dev)) { + /* Workaround: Clear the timing override chicken bit again. */ + reg = TRANS_CHICKEN2(pipe); + val = I915_READ(reg); + val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; + I915_WRITE(reg, val); + } +} + +static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) +{ + u32 val; + + val = I915_READ(_TRANSACONF); + val &= ~TRANS_ENABLE; + I915_WRITE(_TRANSACONF, val); + /* wait for PCH transcoder off, transcoder state */ + if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50)) + DRM_ERROR("Failed to disable PCH transcoder\n"); + + /* Workaround: clear timing override bit. */ + val = I915_READ(_TRANSA_CHICKEN2); + val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; + I915_WRITE(_TRANSA_CHICKEN2, val); +} + +/** + * intel_enable_pipe - enable a pipe, asserting requirements + * @dev_priv: i915 private structure + * @pipe: pipe to enable + * @pch_port: on ILK+, is this pipe driving a PCH port or not + * + * Enable @pipe, making sure that various hardware specific requirements + * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. + * + * @pipe should be %PIPE_A or %PIPE_B. + * + * Will wait until the pipe is actually running (i.e. first vblank) before + * returning. + */ +static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, + bool pch_port) +{ + enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, + pipe); + enum pipe pch_transcoder; + int reg; + u32 val; + + if (HAS_PCH_LPT(dev_priv->dev)) + pch_transcoder = TRANSCODER_A; + else + pch_transcoder = pipe; + + /* + * A pipe without a PLL won't actually be able to drive bits from + * a plane. On ILK+ the pipe PLLs are integrated, so we don't + * need the check. + */ + if (!HAS_PCH_SPLIT(dev_priv->dev)) + assert_pll_enabled(dev_priv, pipe); + else { + if (pch_port) { + /* if driving the PCH, we need FDI enabled */ + assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); + assert_fdi_tx_pll_enabled(dev_priv, + (enum pipe) cpu_transcoder); + } + /* FIXME: assert CPU port conditions for SNB+ */ + } + + reg = PIPECONF(cpu_transcoder); + val = I915_READ(reg); + if (val & PIPECONF_ENABLE) + return; + + I915_WRITE(reg, val | PIPECONF_ENABLE); + intel_wait_for_vblank(dev_priv->dev, pipe); +} + +/** + * intel_disable_pipe - disable a pipe, asserting requirements + * @dev_priv: i915 private structure + * @pipe: pipe to disable + * + * Disable @pipe, making sure that various hardware specific requirements + * are met, if applicable, e.g. plane disabled, panel fitter off, etc. + * + * @pipe should be %PIPE_A or %PIPE_B. + * + * Will wait until the pipe has shut down before returning. + */ +static void intel_disable_pipe(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, + pipe); + int reg; + u32 val; + + /* + * Make sure planes won't keep trying to pump pixels to us, + * or we might hang the display. + */ + assert_planes_disabled(dev_priv, pipe); + assert_sprites_disabled(dev_priv, pipe); + + /* Don't disable pipe A or pipe A PLLs if needed */ + if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) + return; + + reg = PIPECONF(cpu_transcoder); + val = I915_READ(reg); + if ((val & PIPECONF_ENABLE) == 0) + return; + + I915_WRITE(reg, val & ~PIPECONF_ENABLE); + intel_wait_for_pipe_off(dev_priv->dev, pipe); +} + +/* + * Plane regs are double buffered, going from enabled->disabled needs a + * trigger in order to latch. The display address reg provides this. + */ +void intel_flush_display_plane(struct drm_i915_private *dev_priv, + enum plane plane) +{ + if (dev_priv->info->gen >= 4) + I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); + else + I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); +} + +/** + * intel_enable_plane - enable a display plane on a given pipe + * @dev_priv: i915 private structure + * @plane: plane to enable + * @pipe: pipe being fed + * + * Enable @plane on @pipe, making sure that @pipe is running first. + */ +static void intel_enable_plane(struct drm_i915_private *dev_priv, + enum plane plane, enum pipe pipe) +{ + int reg; + u32 val; + + /* If the pipe isn't enabled, we can't pump pixels and may hang */ + assert_pipe_enabled(dev_priv, pipe); + + reg = DSPCNTR(plane); + val = I915_READ(reg); + if (val & DISPLAY_PLANE_ENABLE) + return; + + I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); + intel_flush_display_plane(dev_priv, plane); + intel_wait_for_vblank(dev_priv->dev, pipe); +} + +/** + * intel_disable_plane - disable a display plane + * @dev_priv: i915 private structure + * @plane: plane to disable + * @pipe: pipe consuming the data + * + * Disable @plane; should be an independent operation. + */ +static void intel_disable_plane(struct drm_i915_private *dev_priv, + enum plane plane, enum pipe pipe) +{ + int reg; + u32 val; + + reg = DSPCNTR(plane); + val = I915_READ(reg); + if ((val & DISPLAY_PLANE_ENABLE) == 0) + return; + + I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); + intel_flush_display_plane(dev_priv, plane); + intel_wait_for_vblank(dev_priv->dev, pipe); +} + +static bool need_vtd_wa(struct drm_device *dev) +{ +#ifdef CONFIG_INTEL_IOMMU + if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped) + return true; +#endif + return false; +} + +int +intel_pin_and_fence_fb_obj(struct drm_device *dev, + struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 alignment; + int ret; + + switch (obj->tiling_mode) { + case I915_TILING_NONE: + if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) + alignment = 128 * 1024; + else if (INTEL_INFO(dev)->gen >= 4) + alignment = 4 * 1024; + else + alignment = 64 * 1024; + break; + case I915_TILING_X: + /* pin() will align the object as required by fence */ + alignment = 0; + break; + case I915_TILING_Y: + /* Despite that we check this in framebuffer_init userspace can + * screw us over and change the tiling after the fact. Only + * pinned buffers can't change their tiling. */ + DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n"); + return -EINVAL; + default: + BUG(); + } + + /* Note that the w/a also requires 64 PTE of padding following the + * bo. We currently fill all unused PTE with the shadow page and so + * we should always have valid PTE following the scanout preventing + * the VT-d warning. + */ + if (need_vtd_wa(dev) && alignment < 256 * 1024) + alignment = 256 * 1024; + + dev_priv->mm.interruptible = false; + ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); + if (ret) + goto err_interruptible; + + /* Install a fence for tiled scan-out. Pre-i965 always needs a + * fence, whereas 965+ only requires a fence if using + * framebuffer compression. For simplicity, we always install + * a fence as the cost is not that onerous. + */ + ret = i915_gem_object_get_fence(obj); + if (ret) + goto err_unpin; + + i915_gem_object_pin_fence(obj); + + dev_priv->mm.interruptible = true; + return 0; + +err_unpin: + i915_gem_object_unpin(obj); +err_interruptible: + dev_priv->mm.interruptible = true; + return ret; +} + +void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) +{ + i915_gem_object_unpin_fence(obj); + i915_gem_object_unpin(obj); +} + +/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel + * is assumed to be a power-of-two. */ +unsigned long intel_gen4_compute_page_offset(int *x, int *y, + unsigned int tiling_mode, + unsigned int cpp, + unsigned int pitch) +{ + if (tiling_mode != I915_TILING_NONE) { + unsigned int tile_rows, tiles; + + tile_rows = *y / 8; + *y %= 8; + + tiles = *x / (512/cpp); + *x %= 512/cpp; + + return tile_rows * pitch * 8 + tiles * 4096; + } else { + unsigned int offset; + + offset = *y * pitch + *x * cpp; + *y = 0; + *x = (offset & 4095) / cpp; + return offset & -4096; + } +} + +static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, + int x, int y) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_framebuffer *intel_fb; + struct drm_i915_gem_object *obj; + int plane = intel_crtc->plane; + unsigned long linear_offset; + u32 dspcntr; + u32 reg; + + switch (plane) { + case 0: + case 1: + break; + default: + DRM_ERROR("Can't update plane %d in SAREA\n", plane); + return -EINVAL; + } + + intel_fb = to_intel_framebuffer(fb); + obj = intel_fb->obj; + + reg = DSPCNTR(plane); + dspcntr = I915_READ(reg); + /* Mask out pixel format bits in case we change it */ + dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; + switch (fb->pixel_format) { + case DRM_FORMAT_C8: + dspcntr |= DISPPLANE_8BPP; + break; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_ARGB1555: + dspcntr |= DISPPLANE_BGRX555; + break; + case DRM_FORMAT_RGB565: + dspcntr |= DISPPLANE_BGRX565; + break; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + dspcntr |= DISPPLANE_BGRX888; + break; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + dspcntr |= DISPPLANE_RGBX888; + break; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + dspcntr |= DISPPLANE_BGRX101010; + break; + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ABGR2101010: + dspcntr |= DISPPLANE_RGBX101010; + break; + default: + BUG(); + } + + if (INTEL_INFO(dev)->gen >= 4) { + if (obj->tiling_mode != I915_TILING_NONE) + dspcntr |= DISPPLANE_TILED; + else + dspcntr &= ~DISPPLANE_TILED; + } + + I915_WRITE(reg, dspcntr); + + linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); + + if (INTEL_INFO(dev)->gen >= 4) { + intel_crtc->dspaddr_offset = + intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, + fb->bits_per_pixel / 8, + fb->pitches[0]); + linear_offset -= intel_crtc->dspaddr_offset; + } else { + intel_crtc->dspaddr_offset = linear_offset; + } + + DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", + obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); + I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); + if (INTEL_INFO(dev)->gen >= 4) { + I915_MODIFY_DISPBASE(DSPSURF(plane), + obj->gtt_offset + intel_crtc->dspaddr_offset); + I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); + I915_WRITE(DSPLINOFF(plane), linear_offset); + } else + I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset); + POSTING_READ(reg); + + return 0; +} + +static int ironlake_update_plane(struct drm_crtc *crtc, + struct drm_framebuffer *fb, int x, int y) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_framebuffer *intel_fb; + struct drm_i915_gem_object *obj; + int plane = intel_crtc->plane; + unsigned long linear_offset; + u32 dspcntr; + u32 reg; + + switch (plane) { + case 0: + case 1: + case 2: + break; + default: + DRM_ERROR("Can't update plane %d in SAREA\n", plane); + return -EINVAL; + } + + intel_fb = to_intel_framebuffer(fb); + obj = intel_fb->obj; + + reg = DSPCNTR(plane); + dspcntr = I915_READ(reg); + /* Mask out pixel format bits in case we change it */ + dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; + switch (fb->pixel_format) { + case DRM_FORMAT_C8: + dspcntr |= DISPPLANE_8BPP; + break; + case DRM_FORMAT_RGB565: + dspcntr |= DISPPLANE_BGRX565; + break; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + dspcntr |= DISPPLANE_BGRX888; + break; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + dspcntr |= DISPPLANE_RGBX888; + break; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + dspcntr |= DISPPLANE_BGRX101010; + break; + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ABGR2101010: + dspcntr |= DISPPLANE_RGBX101010; + break; + default: + BUG(); + } + + if (obj->tiling_mode != I915_TILING_NONE) + dspcntr |= DISPPLANE_TILED; + else + dspcntr &= ~DISPPLANE_TILED; + + /* must disable */ + dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; + + I915_WRITE(reg, dspcntr); + + linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); + intel_crtc->dspaddr_offset = + intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, + fb->bits_per_pixel / 8, + fb->pitches[0]); + linear_offset -= intel_crtc->dspaddr_offset; + + DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", + obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); + I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); + I915_MODIFY_DISPBASE(DSPSURF(plane), + obj->gtt_offset + intel_crtc->dspaddr_offset); + if (IS_HASWELL(dev)) { + I915_WRITE(DSPOFFSET(plane), (y << 16) | x); + } else { + I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); + I915_WRITE(DSPLINOFF(plane), linear_offset); + } + POSTING_READ(reg); + + return 0; +} + +/* Assume fb object is pinned & idle & fenced and just update base pointers */ +static int +intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->display.disable_fbc) + dev_priv->display.disable_fbc(dev); + intel_increase_pllclock(crtc); + + return dev_priv->display.update_plane(crtc, fb, x, y); +} + +void intel_display_handle_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + + /* + * Flips in the rings have been nuked by the reset, + * so complete all pending flips so that user space + * will get its events and not get stuck. + * + * Also update the base address of all primary + * planes to the the last fb to make sure we're + * showing the correct fb after a reset. + * + * Need to make two loops over the crtcs so that we + * don't try to grab a crtc mutex before the + * pending_flip_queue really got woken up. + */ + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + enum plane plane = intel_crtc->plane; + + intel_prepare_page_flip(dev, plane); + intel_finish_page_flip_plane(dev, plane); + } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + mutex_lock(&crtc->mutex); + if (intel_crtc->active) + dev_priv->display.update_plane(crtc, crtc->fb, + crtc->x, crtc->y); + mutex_unlock(&crtc->mutex); + } +} + +static int +intel_finish_fb(struct drm_framebuffer *old_fb) +{ + struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + bool was_interruptible = dev_priv->mm.interruptible; + int ret; + + /* Big Hammer, we also need to ensure that any pending + * MI_WAIT_FOR_EVENT inside a user batch buffer on the + * current scanout is retired before unpinning the old + * framebuffer. + * + * This should only fail upon a hung GPU, in which case we + * can safely continue. + */ + dev_priv->mm.interruptible = false; + ret = i915_gem_object_finish_gpu(obj); + dev_priv->mm.interruptible = was_interruptible; + + return ret; +} + +static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_master_private *master_priv; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + if (!dev->primary->master) + return; + + master_priv = dev->primary->master->driver_priv; + if (!master_priv->sarea_priv) + return; + + switch (intel_crtc->pipe) { + case 0: + master_priv->sarea_priv->pipeA_x = x; + master_priv->sarea_priv->pipeA_y = y; + break; + case 1: + master_priv->sarea_priv->pipeB_x = x; + master_priv->sarea_priv->pipeB_y = y; + break; + default: + break; + } +} + +static int +intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *fb) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_framebuffer *old_fb; + int ret; + + /* no fb bound */ + if (!fb) { + DRM_ERROR("No FB bound\n"); + return 0; + } + + if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) { + DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n", + intel_crtc->plane, + INTEL_INFO(dev)->num_pipes); + return -EINVAL; + } + + mutex_lock(&dev->struct_mutex); + ret = intel_pin_and_fence_fb_obj(dev, + to_intel_framebuffer(fb)->obj, + NULL); + if (ret != 0) { + mutex_unlock(&dev->struct_mutex); + DRM_ERROR("pin & fence failed\n"); + return ret; + } + + ret = dev_priv->display.update_plane(crtc, fb, x, y); + if (ret) { + intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); + mutex_unlock(&dev->struct_mutex); + DRM_ERROR("failed to update base address\n"); + return ret; + } + + old_fb = crtc->fb; + crtc->fb = fb; + crtc->x = x; + crtc->y = y; + + if (old_fb) { + intel_wait_for_vblank(dev, intel_crtc->pipe); + intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); + } + + intel_update_fbc(dev); + mutex_unlock(&dev->struct_mutex); + + intel_crtc_update_sarea_pos(crtc, x, y); + + return 0; +} + +static void intel_fdi_normal_train(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 reg, temp; + + /* enable normal train */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + if (IS_IVYBRIDGE(dev)) { + temp &= ~FDI_LINK_TRAIN_NONE_IVB; + temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; + } + I915_WRITE(reg, temp); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_NORMAL_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_NONE; + } + I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); + + /* wait one idle pattern time */ + POSTING_READ(reg); + udelay(1000); + + /* IVB wants error correction enabled */ + if (IS_IVYBRIDGE(dev)) + I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | + FDI_FE_ERRC_ENABLE); +} + +static void ivb_modeset_global_resources(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *pipe_B_crtc = + to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); + struct intel_crtc *pipe_C_crtc = + to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]); + uint32_t temp; + + /* When everything is off disable fdi C so that we could enable fdi B + * with all lanes. XXX: This misses the case where a pipe is not using + * any pch resources and so doesn't need any fdi lanes. */ + if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) { + WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); + WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); + + temp = I915_READ(SOUTH_CHICKEN1); + temp &= ~FDI_BC_BIFURCATION_SELECT; + DRM_DEBUG_KMS("disabling fdi C rx\n"); + I915_WRITE(SOUTH_CHICKEN1, temp); + } +} + +/* The FDI link training functions for ILK/Ibexpeak. */ +static void ironlake_fdi_link_train(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + u32 reg, temp, tries; + + /* FDI needs bits from pipe & plane first */ + assert_pipe_enabled(dev_priv, pipe); + assert_plane_enabled(dev_priv, plane); + + /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit + for train result */ + reg = FDI_RX_IMR(pipe); + temp = I915_READ(reg); + temp &= ~FDI_RX_SYMBOL_LOCK; + temp &= ~FDI_RX_BIT_LOCK; + I915_WRITE(reg, temp); + I915_READ(reg); + udelay(150); + + /* enable CPU FDI TX and PCH FDI RX */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(7 << 19); + temp |= (intel_crtc->fdi_lanes - 1) << 19; + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(reg, temp | FDI_TX_ENABLE); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(reg, temp | FDI_RX_ENABLE); + + POSTING_READ(reg); + udelay(150); + + /* Ironlake workaround, enable clock pointer after FDI enable*/ + I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); + I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | + FDI_RX_PHASE_SYNC_POINTER_EN); + + reg = FDI_RX_IIR(pipe); + for (tries = 0; tries < 5; tries++) { + temp = I915_READ(reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + + if ((temp & FDI_RX_BIT_LOCK)) { + DRM_DEBUG_KMS("FDI train 1 done.\n"); + I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); + break; + } + } + if (tries == 5) + DRM_ERROR("FDI train 1 fail!\n"); + + /* Train 2 */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_2; + I915_WRITE(reg, temp); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_2; + I915_WRITE(reg, temp); + + POSTING_READ(reg); + udelay(150); + + reg = FDI_RX_IIR(pipe); + for (tries = 0; tries < 5; tries++) { + temp = I915_READ(reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + + if (temp & FDI_RX_SYMBOL_LOCK) { + I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); + DRM_DEBUG_KMS("FDI train 2 done.\n"); + break; + } + } + if (tries == 5) + DRM_ERROR("FDI train 2 fail!\n"); + + DRM_DEBUG_KMS("FDI train done\n"); + +} + +static const int snb_b_fdi_train_param[] = { + FDI_LINK_TRAIN_400MV_0DB_SNB_B, + FDI_LINK_TRAIN_400MV_6DB_SNB_B, + FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, + FDI_LINK_TRAIN_800MV_0DB_SNB_B, +}; + +/* The FDI link training functions for SNB/Cougarpoint. */ +static void gen6_fdi_link_train(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 reg, temp, i, retry; + + /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit + for train result */ + reg = FDI_RX_IMR(pipe); + temp = I915_READ(reg); + temp &= ~FDI_RX_SYMBOL_LOCK; + temp &= ~FDI_RX_BIT_LOCK; + I915_WRITE(reg, temp); + + POSTING_READ(reg); + udelay(150); + + /* enable CPU FDI TX and PCH FDI RX */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(7 << 19); + temp |= (intel_crtc->fdi_lanes - 1) << 19; + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; + /* SNB-B */ + temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; + I915_WRITE(reg, temp | FDI_TX_ENABLE); + + I915_WRITE(FDI_RX_MISC(pipe), + FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + } + I915_WRITE(reg, temp | FDI_RX_ENABLE); + + POSTING_READ(reg); + udelay(150); + + for (i = 0; i < 4; i++) { + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; + temp |= snb_b_fdi_train_param[i]; + I915_WRITE(reg, temp); + + POSTING_READ(reg); + udelay(500); + + for (retry = 0; retry < 5; retry++) { + reg = FDI_RX_IIR(pipe); + temp = I915_READ(reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + if (temp & FDI_RX_BIT_LOCK) { + I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); + DRM_DEBUG_KMS("FDI train 1 done.\n"); + break; + } + udelay(50); + } + if (retry < 5) + break; + } + if (i == 4) + DRM_ERROR("FDI train 1 fail!\n"); + + /* Train 2 */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_2; + if (IS_GEN6(dev)) { + temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; + /* SNB-B */ + temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; + } + I915_WRITE(reg, temp); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_2; + } + I915_WRITE(reg, temp); + + POSTING_READ(reg); + udelay(150); + + for (i = 0; i < 4; i++) { + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; + temp |= snb_b_fdi_train_param[i]; + I915_WRITE(reg, temp); + + POSTING_READ(reg); + udelay(500); + + for (retry = 0; retry < 5; retry++) { + reg = FDI_RX_IIR(pipe); + temp = I915_READ(reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + if (temp & FDI_RX_SYMBOL_LOCK) { + I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); + DRM_DEBUG_KMS("FDI train 2 done.\n"); + break; + } + udelay(50); + } + if (retry < 5) + break; + } + if (i == 4) + DRM_ERROR("FDI train 2 fail!\n"); + + DRM_DEBUG_KMS("FDI train done.\n"); +} + +/* Manual link training for Ivy Bridge A0 parts */ +static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 reg, temp, i; + + /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit + for train result */ + reg = FDI_RX_IMR(pipe); + temp = I915_READ(reg); + temp &= ~FDI_RX_SYMBOL_LOCK; + temp &= ~FDI_RX_BIT_LOCK; + I915_WRITE(reg, temp); + + POSTING_READ(reg); + udelay(150); + + DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", + I915_READ(FDI_RX_IIR(pipe))); + + /* enable CPU FDI TX and PCH FDI RX */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(7 << 19); + temp |= (intel_crtc->fdi_lanes - 1) << 19; + temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); + temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; + temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; + temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; + temp |= FDI_COMPOSITE_SYNC; + I915_WRITE(reg, temp | FDI_TX_ENABLE); + + I915_WRITE(FDI_RX_MISC(pipe), + FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_AUTO; + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; + temp |= FDI_COMPOSITE_SYNC; + I915_WRITE(reg, temp | FDI_RX_ENABLE); + + POSTING_READ(reg); + udelay(150); + + for (i = 0; i < 4; i++) { + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; + temp |= snb_b_fdi_train_param[i]; + I915_WRITE(reg, temp); + + POSTING_READ(reg); + udelay(500); + + reg = FDI_RX_IIR(pipe); + temp = I915_READ(reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + + if (temp & FDI_RX_BIT_LOCK || + (I915_READ(reg) & FDI_RX_BIT_LOCK)) { + I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); + DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i); + break; + } + } + if (i == 4) + DRM_ERROR("FDI train 1 fail!\n"); + + /* Train 2 */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_NONE_IVB; + temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; + temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; + temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; + I915_WRITE(reg, temp); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; + I915_WRITE(reg, temp); + + POSTING_READ(reg); + udelay(150); + + for (i = 0; i < 4; i++) { + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; + temp |= snb_b_fdi_train_param[i]; + I915_WRITE(reg, temp); + + POSTING_READ(reg); + udelay(500); + + reg = FDI_RX_IIR(pipe); + temp = I915_READ(reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + + if (temp & FDI_RX_SYMBOL_LOCK) { + I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); + DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i); + break; + } + } + if (i == 4) + DRM_ERROR("FDI train 2 fail!\n"); + + DRM_DEBUG_KMS("FDI train done.\n"); +} + +static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) +{ + struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe = intel_crtc->pipe; + u32 reg, temp; + + + /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~((0x7 << 19) | (0x7 << 16)); + temp |= (intel_crtc->fdi_lanes - 1) << 19; + temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; + I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); + + POSTING_READ(reg); + udelay(200); + + /* Switch from Rawclk to PCDclk */ + temp = I915_READ(reg); + I915_WRITE(reg, temp | FDI_PCDCLK); + + POSTING_READ(reg); + udelay(200); + + /* Enable CPU FDI TX PLL, always on for Ironlake */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + if ((temp & FDI_TX_PLL_ENABLE) == 0) { + I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); + + POSTING_READ(reg); + udelay(100); + } +} + +static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) +{ + struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe = intel_crtc->pipe; + u32 reg, temp; + + /* Switch from PCDclk to Rawclk */ + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_PCDCLK); + + /* Disable CPU FDI TX PLL */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); + + POSTING_READ(reg); + udelay(100); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); + + /* Wait for the clocks to turn off. */ + POSTING_READ(reg); + udelay(100); +} + +static void ironlake_fdi_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 reg, temp; + + /* disable CPU FDI tx and PCH FDI rx */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_TX_ENABLE); + POSTING_READ(reg); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(0x7 << 16); + temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; + I915_WRITE(reg, temp & ~FDI_RX_ENABLE); + + POSTING_READ(reg); + udelay(100); + + /* Ironlake workaround, disable clock pointer after downing FDI */ + if (HAS_PCH_IBX(dev)) { + I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); + } + + /* still set train pattern 1 */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(reg, temp); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + } + /* BPC in FDI rx is consistent with that in PIPECONF */ + temp &= ~(0x07 << 16); + temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; + I915_WRITE(reg, temp); + + POSTING_READ(reg); + udelay(100); +} + +static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + unsigned long flags; + bool pending; + + if (i915_reset_in_progress(&dev_priv->gpu_error) || + intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) + return false; + + spin_lock_irqsave(&dev->event_lock, flags); + pending = to_intel_crtc(crtc)->unpin_work != NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + + return pending; +} + +static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (crtc->fb == NULL) + return; + + WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); + + wait_event(dev_priv->pending_flip_queue, + !intel_crtc_has_pending_flip(crtc)); + + mutex_lock(&dev->struct_mutex); + intel_finish_fb(crtc->fb); + mutex_unlock(&dev->struct_mutex); +} + +/* Program iCLKIP clock to the desired frequency */ +static void lpt_program_iclkip(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 divsel, phaseinc, auxdiv, phasedir = 0; + u32 temp; + + mutex_lock(&dev_priv->dpio_lock); + + /* It is necessary to ungate the pixclk gate prior to programming + * the divisors, and gate it back when it is done. + */ + I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); + + /* Disable SSCCTL */ + intel_sbi_write(dev_priv, SBI_SSCCTL6, + intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) | + SBI_SSCCTL_DISABLE, + SBI_ICLK); + + /* 20MHz is a corner case which is out of range for the 7-bit divisor */ + if (crtc->mode.clock == 20000) { + auxdiv = 1; + divsel = 0x41; + phaseinc = 0x20; + } else { + /* The iCLK virtual clock root frequency is in MHz, + * but the crtc->mode.clock in in KHz. To get the divisors, + * it is necessary to divide one by another, so we + * convert the virtual clock precision to KHz here for higher + * precision. + */ + u32 iclk_virtual_root_freq = 172800 * 1000; + u32 iclk_pi_range = 64; + u32 desired_divisor, msb_divisor_value, pi_value; + + desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock); + msb_divisor_value = desired_divisor / iclk_pi_range; + pi_value = desired_divisor % iclk_pi_range; + + auxdiv = 0; + divsel = msb_divisor_value - 2; + phaseinc = pi_value; + } + + /* This should not happen with any sane values */ + WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & + ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); + WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & + ~SBI_SSCDIVINTPHASE_INCVAL_MASK); + + DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", + crtc->mode.clock, + auxdiv, + divsel, + phasedir, + phaseinc); + + /* Program SSCDIVINTPHASE6 */ + temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); + temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; + temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); + temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; + temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); + temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); + temp |= SBI_SSCDIVINTPHASE_PROPAGATE; + intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); + + /* Program SSCAUXDIV */ + temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); + temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); + temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); + intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); + + /* Enable modulator and associated divider */ + temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); + temp &= ~SBI_SSCCTL_DISABLE; + intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); + + /* Wait for initialization time */ + udelay(24); + + I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); + + mutex_unlock(&dev_priv->dpio_lock); +} + +/* + * Enable PCH resources required for PCH ports: + * - PCH PLLs + * - FDI training & RX/TX + * - update transcoder timings + * - DP transcoding bits + * - transcoder + */ +static void ironlake_pch_enable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 reg, temp; + + assert_transcoder_disabled(dev_priv, pipe); + + /* Write the TU size bits before fdi link training, so that error + * detection works. */ + I915_WRITE(FDI_RX_TUSIZE1(pipe), + I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); + + /* For PCH output, training FDI link */ + dev_priv->display.fdi_link_train(crtc); + + /* XXX: pch pll's can be enabled any time before we enable the PCH + * transcoder, and we actually should do this to not upset any PCH + * transcoder that already use the clock when we share it. + * + * Note that enable_pch_pll tries to do the right thing, but get_pch_pll + * unconditionally resets the pll - we need that to have the right LVDS + * enable sequence. */ + ironlake_enable_pch_pll(intel_crtc); + + if (HAS_PCH_CPT(dev)) { + u32 sel; + + temp = I915_READ(PCH_DPLL_SEL); + switch (pipe) { + default: + case 0: + temp |= TRANSA_DPLL_ENABLE; + sel = TRANSA_DPLLB_SEL; + break; + case 1: + temp |= TRANSB_DPLL_ENABLE; + sel = TRANSB_DPLLB_SEL; + break; + case 2: + temp |= TRANSC_DPLL_ENABLE; + sel = TRANSC_DPLLB_SEL; + break; + } + if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B) + temp |= sel; + else + temp &= ~sel; + I915_WRITE(PCH_DPLL_SEL, temp); + } + + /* set transcoder timing, panel must allow it */ + assert_panel_unlocked(dev_priv, pipe); + I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); + I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); + I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); + + I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); + I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); + I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); + I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); + + intel_fdi_normal_train(crtc); + + /* For PCH DP, enable TRANS_DP_CTL */ + if (HAS_PCH_CPT(dev) && + (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { + u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; + reg = TRANS_DP_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(TRANS_DP_PORT_SEL_MASK | + TRANS_DP_SYNC_MASK | + TRANS_DP_BPC_MASK); + temp |= (TRANS_DP_OUTPUT_ENABLE | + TRANS_DP_ENH_FRAMING); + temp |= bpc << 9; /* same format but at 11:9 */ + + if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) + temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; + if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) + temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; + + switch (intel_trans_dp_port_sel(crtc)) { + case PCH_DP_B: + temp |= TRANS_DP_PORT_SEL_B; + break; + case PCH_DP_C: + temp |= TRANS_DP_PORT_SEL_C; + break; + case PCH_DP_D: + temp |= TRANS_DP_PORT_SEL_D; + break; + default: + BUG(); + } + + I915_WRITE(reg, temp); + } + + ironlake_enable_pch_transcoder(dev_priv, pipe); +} + +static void lpt_pch_enable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; + + assert_transcoder_disabled(dev_priv, TRANSCODER_A); + + lpt_program_iclkip(crtc); + + /* Set transcoder timing. */ + I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder))); + I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder))); + I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder))); + + I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder))); + I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder))); + I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder))); + I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder))); + + lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); +} + +static void intel_put_pch_pll(struct intel_crtc *intel_crtc) +{ + struct intel_pch_pll *pll = intel_crtc->pch_pll; + + if (pll == NULL) + return; + + if (pll->refcount == 0) { + WARN(1, "bad PCH PLL refcount\n"); + return; + } + + --pll->refcount; + intel_crtc->pch_pll = NULL; +} + +static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp) +{ + struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; + struct intel_pch_pll *pll; + int i; + + pll = intel_crtc->pch_pll; + if (pll) { + DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n", + intel_crtc->base.base.id, pll->pll_reg); + goto prepare; + } + + if (HAS_PCH_IBX(dev_priv->dev)) { + /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ + i = intel_crtc->pipe; + pll = &dev_priv->pch_plls[i]; + + DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n", + intel_crtc->base.base.id, pll->pll_reg); + + goto found; + } + + for (i = 0; i < dev_priv->num_pch_pll; i++) { + pll = &dev_priv->pch_plls[i]; + + /* Only want to check enabled timings first */ + if (pll->refcount == 0) + continue; + + if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) && + fp == I915_READ(pll->fp0_reg)) { + DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n", + intel_crtc->base.base.id, + pll->pll_reg, pll->refcount, pll->active); + + goto found; + } + } + + /* Ok no matching timings, maybe there's a free one? */ + for (i = 0; i < dev_priv->num_pch_pll; i++) { + pll = &dev_priv->pch_plls[i]; + if (pll->refcount == 0) { + DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n", + intel_crtc->base.base.id, pll->pll_reg); + goto found; + } + } + + return NULL; + +found: + intel_crtc->pch_pll = pll; + pll->refcount++; + DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe); +prepare: /* separate function? */ + DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg); + + /* Wait for the clocks to stabilize before rewriting the regs */ + I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE); + POSTING_READ(pll->pll_reg); + udelay(150); + + I915_WRITE(pll->fp0_reg, fp); + I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE); + pll->on = false; + return pll; +} + +void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int dslreg = PIPEDSL(pipe); + u32 temp; + + temp = I915_READ(dslreg); + udelay(500); + if (wait_for(I915_READ(dslreg) != temp, 5)) { + if (wait_for(I915_READ(dslreg) != temp, 5)) + DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); + } +} + +static void ironlake_crtc_enable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *encoder; + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + u32 temp; + + WARN_ON(!crtc->enabled); + + if (intel_crtc->active) + return; + + intel_crtc->active = true; + intel_update_watermarks(dev); + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + temp = I915_READ(PCH_LVDS); + if ((temp & LVDS_PORT_EN) == 0) + I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); + } + + + if (intel_crtc->config.has_pch_encoder) { + /* Note: FDI PLL enabling _must_ be done before we enable the + * cpu pipes, hence this is separate from all the other fdi/pch + * enabling. */ + ironlake_fdi_pll_enable(intel_crtc); + } else { + assert_fdi_tx_disabled(dev_priv, pipe); + assert_fdi_rx_disabled(dev_priv, pipe); + } + + for_each_encoder_on_crtc(dev, crtc, encoder) + if (encoder->pre_enable) + encoder->pre_enable(encoder); + + /* Enable panel fitting for LVDS */ + if (dev_priv->pch_pf_size && + (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { + /* Force use of hard-coded filter coefficients + * as some pre-programmed values are broken, + * e.g. x201. + */ + if (IS_IVYBRIDGE(dev)) + I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | + PF_PIPE_SEL_IVB(pipe)); + else + I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); + I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); + I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); + } + + /* + * On ILK+ LUT must be loaded before the pipe is running but with + * clocks enabled + */ + intel_crtc_load_lut(crtc); + + intel_enable_pipe(dev_priv, pipe, + intel_crtc->config.has_pch_encoder); + intel_enable_plane(dev_priv, plane, pipe); + + if (intel_crtc->config.has_pch_encoder) + ironlake_pch_enable(crtc); + + mutex_lock(&dev->struct_mutex); + intel_update_fbc(dev); + mutex_unlock(&dev->struct_mutex); + + intel_crtc_update_cursor(crtc, true); + + for_each_encoder_on_crtc(dev, crtc, encoder) + encoder->enable(encoder); + + if (HAS_PCH_CPT(dev)) + intel_cpt_verify_modeset(dev, intel_crtc->pipe); + + /* + * There seems to be a race in PCH platform hw (at least on some + * outputs) where an enabled pipe still completes any pageflip right + * away (as if the pipe is off) instead of waiting for vblank. As soon + * as the first vblank happend, everything works as expected. Hence just + * wait for one vblank before returning to avoid strange things + * happening. + */ + intel_wait_for_vblank(dev, intel_crtc->pipe); +} + +static void haswell_crtc_enable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *encoder; + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + + WARN_ON(!crtc->enabled); + + if (intel_crtc->active) + return; + + intel_crtc->active = true; + intel_update_watermarks(dev); + + if (intel_crtc->config.has_pch_encoder) + dev_priv->display.fdi_link_train(crtc); + + for_each_encoder_on_crtc(dev, crtc, encoder) + if (encoder->pre_enable) + encoder->pre_enable(encoder); + + intel_ddi_enable_pipe_clock(intel_crtc); + + /* Enable panel fitting for eDP */ + if (dev_priv->pch_pf_size && + intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { + /* Force use of hard-coded filter coefficients + * as some pre-programmed values are broken, + * e.g. x201. + */ + I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | + PF_PIPE_SEL_IVB(pipe)); + I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); + I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); + } + + /* + * On ILK+ LUT must be loaded before the pipe is running but with + * clocks enabled + */ + intel_crtc_load_lut(crtc); + + intel_ddi_set_pipe_settings(crtc); + intel_ddi_enable_transcoder_func(crtc); + + intel_enable_pipe(dev_priv, pipe, + intel_crtc->config.has_pch_encoder); + intel_enable_plane(dev_priv, plane, pipe); + + if (intel_crtc->config.has_pch_encoder) + lpt_pch_enable(crtc); + + mutex_lock(&dev->struct_mutex); + intel_update_fbc(dev); + mutex_unlock(&dev->struct_mutex); + + intel_crtc_update_cursor(crtc, true); + + for_each_encoder_on_crtc(dev, crtc, encoder) + encoder->enable(encoder); + + /* + * There seems to be a race in PCH platform hw (at least on some + * outputs) where an enabled pipe still completes any pageflip right + * away (as if the pipe is off) instead of waiting for vblank. As soon + * as the first vblank happend, everything works as expected. Hence just + * wait for one vblank before returning to avoid strange things + * happening. + */ + intel_wait_for_vblank(dev, intel_crtc->pipe); +} + +static void ironlake_crtc_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *encoder; + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + u32 reg, temp; + + + if (!intel_crtc->active) + return; + + for_each_encoder_on_crtc(dev, crtc, encoder) + encoder->disable(encoder); + + intel_crtc_wait_for_pending_flips(crtc); + drm_vblank_off(dev, pipe); + intel_crtc_update_cursor(crtc, false); + + intel_disable_plane(dev_priv, plane, pipe); + + if (dev_priv->cfb_plane == plane) + intel_disable_fbc(dev); + + intel_disable_pipe(dev_priv, pipe); + + /* Disable PF */ + I915_WRITE(PF_CTL(pipe), 0); + I915_WRITE(PF_WIN_SZ(pipe), 0); + + for_each_encoder_on_crtc(dev, crtc, encoder) + if (encoder->post_disable) + encoder->post_disable(encoder); + + ironlake_fdi_disable(crtc); + + ironlake_disable_pch_transcoder(dev_priv, pipe); + + if (HAS_PCH_CPT(dev)) { + /* disable TRANS_DP_CTL */ + reg = TRANS_DP_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); + temp |= TRANS_DP_PORT_SEL_NONE; + I915_WRITE(reg, temp); + + /* disable DPLL_SEL */ + temp = I915_READ(PCH_DPLL_SEL); + switch (pipe) { + case 0: + temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); + break; + case 1: + temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); + break; + case 2: + /* C shares PLL A or B */ + temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); + break; + default: + BUG(); /* wtf */ + } + I915_WRITE(PCH_DPLL_SEL, temp); + } + + /* disable PCH DPLL */ + intel_disable_pch_pll(intel_crtc); + + ironlake_fdi_pll_disable(intel_crtc); + + intel_crtc->active = false; + intel_update_watermarks(dev); + + mutex_lock(&dev->struct_mutex); + intel_update_fbc(dev); + mutex_unlock(&dev->struct_mutex); +} + +static void haswell_crtc_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *encoder; + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; + + if (!intel_crtc->active) + return; + + for_each_encoder_on_crtc(dev, crtc, encoder) + encoder->disable(encoder); + + intel_crtc_wait_for_pending_flips(crtc); + drm_vblank_off(dev, pipe); + intel_crtc_update_cursor(crtc, false); + + intel_disable_plane(dev_priv, plane, pipe); + + if (dev_priv->cfb_plane == plane) + intel_disable_fbc(dev); + + intel_disable_pipe(dev_priv, pipe); + + intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); + + /* XXX: Once we have proper panel fitter state tracking implemented with + * hardware state read/check support we should switch to only disable + * the panel fitter when we know it's used. */ + if (intel_using_power_well(dev)) { + I915_WRITE(PF_CTL(pipe), 0); + I915_WRITE(PF_WIN_SZ(pipe), 0); + } + + intel_ddi_disable_pipe_clock(intel_crtc); + + for_each_encoder_on_crtc(dev, crtc, encoder) + if (encoder->post_disable) + encoder->post_disable(encoder); + + if (intel_crtc->config.has_pch_encoder) { + lpt_disable_pch_transcoder(dev_priv); + intel_ddi_fdi_disable(crtc); + } + + intel_crtc->active = false; + intel_update_watermarks(dev); + + mutex_lock(&dev->struct_mutex); + intel_update_fbc(dev); + mutex_unlock(&dev->struct_mutex); +} + +static void ironlake_crtc_off(struct drm_crtc *crtc) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + intel_put_pch_pll(intel_crtc); +} + +static void haswell_crtc_off(struct drm_crtc *crtc) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + /* Stop saying we're using TRANSCODER_EDP because some other CRTC might + * start using it. */ + intel_crtc->config.cpu_transcoder = (enum transcoder) intel_crtc->pipe; + + intel_ddi_put_crtc_pll(crtc); +} + +static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) +{ + if (!enable && intel_crtc->overlay) { + struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + mutex_lock(&dev->struct_mutex); + dev_priv->mm.interruptible = false; + (void) intel_overlay_switch_off(intel_crtc->overlay); + dev_priv->mm.interruptible = true; + mutex_unlock(&dev->struct_mutex); + } + + /* Let userspace switch the overlay on again. In most cases userspace + * has to recompute where to put it anyway. + */ +} + +/** + * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware + * cursor plane briefly if not already running after enabling the display + * plane. + * This workaround avoids occasional blank screens when self refresh is + * enabled. + */ +static void +g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + u32 cntl = I915_READ(CURCNTR(pipe)); + + if ((cntl & CURSOR_MODE) == 0) { + u32 fw_bcl_self = I915_READ(FW_BLC_SELF); + + I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN); + I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX); + intel_wait_for_vblank(dev_priv->dev, pipe); + I915_WRITE(CURCNTR(pipe), cntl); + I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe))); + I915_WRITE(FW_BLC_SELF, fw_bcl_self); + } +} + +static void i9xx_crtc_enable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *encoder; + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + + WARN_ON(!crtc->enabled); + + if (intel_crtc->active) + return; + + intel_crtc->active = true; + intel_update_watermarks(dev); + + intel_enable_pll(dev_priv, pipe); + + for_each_encoder_on_crtc(dev, crtc, encoder) + if (encoder->pre_enable) + encoder->pre_enable(encoder); + + intel_enable_pipe(dev_priv, pipe, false); + intel_enable_plane(dev_priv, plane, pipe); + if (IS_G4X(dev)) + g4x_fixup_plane(dev_priv, pipe); + + intel_crtc_load_lut(crtc); + intel_update_fbc(dev); + + /* Give the overlay scaler a chance to enable if it's on this pipe */ + intel_crtc_dpms_overlay(intel_crtc, true); + intel_crtc_update_cursor(crtc, true); + + for_each_encoder_on_crtc(dev, crtc, encoder) + encoder->enable(encoder); +} + +static void i9xx_pfit_disable(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe; + uint32_t pctl = I915_READ(PFIT_CONTROL); + + assert_pipe_disabled(dev_priv, crtc->pipe); + + if (INTEL_INFO(dev)->gen >= 4) + pipe = (pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT; + else + pipe = PIPE_B; + + if (pipe == crtc->pipe) { + DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", pctl); + I915_WRITE(PFIT_CONTROL, 0); + } +} + +static void i9xx_crtc_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *encoder; + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + + if (!intel_crtc->active) + return; + + for_each_encoder_on_crtc(dev, crtc, encoder) + encoder->disable(encoder); + + /* Give the overlay scaler a chance to disable if it's on this pipe */ + intel_crtc_wait_for_pending_flips(crtc); + drm_vblank_off(dev, pipe); + intel_crtc_dpms_overlay(intel_crtc, false); + intel_crtc_update_cursor(crtc, false); + + if (dev_priv->cfb_plane == plane) + intel_disable_fbc(dev); + + intel_disable_plane(dev_priv, plane, pipe); + intel_disable_pipe(dev_priv, pipe); + + i9xx_pfit_disable(intel_crtc); + + intel_disable_pll(dev_priv, pipe); + + intel_crtc->active = false; + intel_update_fbc(dev); + intel_update_watermarks(dev); +} + +static void i9xx_crtc_off(struct drm_crtc *crtc) +{ +} + +static void intel_crtc_update_sarea(struct drm_crtc *crtc, + bool enabled) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_master_private *master_priv; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + + if (!dev->primary->master) + return; + + master_priv = dev->primary->master->driver_priv; + if (!master_priv->sarea_priv) + return; + + switch (pipe) { + case 0: + master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; + master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; + break; + case 1: + master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; + master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; + break; + default: + DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); + break; + } +} + +/** + * Sets the power management mode of the pipe and plane. + */ +void intel_crtc_update_dpms(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_encoder *intel_encoder; + bool enable = false; + + for_each_encoder_on_crtc(dev, crtc, intel_encoder) + enable |= intel_encoder->connectors_active; + + if (enable) + dev_priv->display.crtc_enable(crtc); + else + dev_priv->display.crtc_disable(crtc); + + intel_crtc_update_sarea(crtc, enable); +} + +static void intel_crtc_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_connector *connector; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + /* crtc should still be enabled when we disable it. */ + WARN_ON(!crtc->enabled); + + intel_crtc->eld_vld = false; + dev_priv->display.crtc_disable(crtc); + intel_crtc_update_sarea(crtc, false); + dev_priv->display.off(crtc); + + assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); + assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); + + if (crtc->fb) { + mutex_lock(&dev->struct_mutex); + intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); + mutex_unlock(&dev->struct_mutex); + crtc->fb = NULL; + } + + /* Update computed state. */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (!connector->encoder || !connector->encoder->crtc) + continue; + + if (connector->encoder->crtc != crtc) + continue; + + connector->dpms = DRM_MODE_DPMS_OFF; + to_intel_encoder(connector->encoder)->connectors_active = false; + } +} + +void intel_modeset_disable(struct drm_device *dev) +{ + struct drm_crtc *crtc; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if (crtc->enabled) + intel_crtc_disable(crtc); + } +} + +void intel_encoder_destroy(struct drm_encoder *encoder) +{ + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + + drm_encoder_cleanup(encoder); + kfree(intel_encoder); +} + +/* Simple dpms helper for encodres with just one connector, no cloning and only + * one kind of off state. It clamps all !ON modes to fully OFF and changes the + * state of the entire output pipe. */ +void intel_encoder_dpms(struct intel_encoder *encoder, int mode) +{ + if (mode == DRM_MODE_DPMS_ON) { + encoder->connectors_active = true; + + intel_crtc_update_dpms(encoder->base.crtc); + } else { + encoder->connectors_active = false; + + intel_crtc_update_dpms(encoder->base.crtc); + } +} + +/* Cross check the actual hw state with our own modeset state tracking (and it's + * internal consistency). */ +static void intel_connector_check_state(struct intel_connector *connector) +{ + if (connector->get_hw_state(connector)) { + struct intel_encoder *encoder = connector->encoder; + struct drm_crtc *crtc; + bool encoder_enabled; + enum pipe pipe; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.base.id, + drm_get_connector_name(&connector->base)); + + WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, + "wrong connector dpms state\n"); + WARN(connector->base.encoder != &encoder->base, + "active connector not linked to encoder\n"); + WARN(!encoder->connectors_active, + "encoder->connectors_active not set\n"); + + encoder_enabled = encoder->get_hw_state(encoder, &pipe); + WARN(!encoder_enabled, "encoder not enabled\n"); + if (WARN_ON(!encoder->base.crtc)) + return; + + crtc = encoder->base.crtc; + + WARN(!crtc->enabled, "crtc not enabled\n"); + WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); + WARN(pipe != to_intel_crtc(crtc)->pipe, + "encoder active on the wrong pipe\n"); + } +} + +/* Even simpler default implementation, if there's really no special case to + * consider. */ +void intel_connector_dpms(struct drm_connector *connector, int mode) +{ + /* All the simple cases only support two dpms states. */ + if (mode != DRM_MODE_DPMS_ON) + mode = DRM_MODE_DPMS_OFF; + + if (mode == connector->dpms) + return; + + connector->dpms = mode; + + /* Only need to change hw state when actually enabled */ + if (connector->encoder) + intel_encoder_dpms(to_intel_encoder(connector->encoder), mode); + + intel_modeset_check_state(connector->dev); +} + +/* Simple connector->get_hw_state implementation for encoders that support only + * one connector and no cloning and hence the encoder state determines the state + * of the connector. */ +bool intel_connector_get_hw_state(struct intel_connector *connector) +{ + enum pipe pipe = 0; + struct intel_encoder *encoder = connector->encoder; + + return encoder->get_hw_state(encoder, &pipe); +} + +static bool intel_crtc_compute_config(struct drm_crtc *crtc, + struct intel_crtc_config *pipe_config) +{ + struct drm_device *dev = crtc->dev; + struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; + + if (HAS_PCH_SPLIT(dev)) { + /* FDI link clock is fixed at 2.7G */ + if (pipe_config->requested_mode.clock * 3 + > IRONLAKE_FDI_FREQ * 4) + return false; + } + + /* All interlaced capable intel hw wants timings in frames. Note though + * that intel_lvds_mode_fixup does some funny tricks with the crtc + * timings, so we need to be careful not to clobber these.*/ + if (!pipe_config->timings_set) + drm_mode_set_crtcinfo(adjusted_mode, 0); + + /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes + * with a hsync front porch of 0. + */ + if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && + adjusted_mode->hsync_start == adjusted_mode->hdisplay) + return false; + + if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) { + pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */ + } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) { + /* only a 8bpc pipe, with 6bpc dither through the panel fitter + * for lvds. */ + pipe_config->pipe_bpp = 8*3; + } + + return true; +} + +static int valleyview_get_display_clock_speed(struct drm_device *dev) +{ + return 400000; /* FIXME */ +} + +static int i945_get_display_clock_speed(struct drm_device *dev) +{ + return 400000; +} + +static int i915_get_display_clock_speed(struct drm_device *dev) +{ + return 333000; +} + +static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) +{ + return 200000; +} + +static int i915gm_get_display_clock_speed(struct drm_device *dev) +{ + u16 gcfgc = 0; + + pci_read_config_word(dev->pdev, GCFGC, &gcfgc); + + if (gcfgc & GC_LOW_FREQUENCY_ENABLE) + return 133000; + else { + switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { + case GC_DISPLAY_CLOCK_333_MHZ: + return 333000; + default: + case GC_DISPLAY_CLOCK_190_200_MHZ: + return 190000; + } + } +} + +static int i865_get_display_clock_speed(struct drm_device *dev) +{ + return 266000; +} + +static int i855_get_display_clock_speed(struct drm_device *dev) +{ + u16 hpllcc = 0; + /* Assume that the hardware is in the high speed state. This + * should be the default. + */ + switch (hpllcc & GC_CLOCK_CONTROL_MASK) { + case GC_CLOCK_133_200: + case GC_CLOCK_100_200: + return 200000; + case GC_CLOCK_166_250: + return 250000; + case GC_CLOCK_100_133: + return 133000; + } + + /* Shouldn't happen */ + return 0; +} + +static int i830_get_display_clock_speed(struct drm_device *dev) +{ + return 133000; +} + +static void +intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) +{ + while (*num > DATA_LINK_M_N_MASK || + *den > DATA_LINK_M_N_MASK) { + *num >>= 1; + *den >>= 1; + } +} + +static void compute_m_n(unsigned int m, unsigned int n, + uint32_t *ret_m, uint32_t *ret_n) +{ + *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); + *ret_m = div_u64((uint64_t) m * *ret_n, n); + intel_reduce_m_n_ratio(ret_m, ret_n); +} + +void +intel_link_compute_m_n(int bits_per_pixel, int nlanes, + int pixel_clock, int link_clock, + struct intel_link_m_n *m_n) +{ + m_n->tu = 64; + + compute_m_n(bits_per_pixel * pixel_clock, + link_clock * nlanes * 8, + &m_n->gmch_m, &m_n->gmch_n); + + compute_m_n(pixel_clock, link_clock, + &m_n->link_m, &m_n->link_n); +} + +static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) +{ + if (i915_panel_use_ssc >= 0) + return i915_panel_use_ssc != 0; + return dev_priv->lvds_use_ssc + && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); +} + +static int vlv_get_refclk(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int refclk = 27000; /* for DP & HDMI */ + + return 100000; /* only one validated so far */ + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { + refclk = 96000; + } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_panel_use_ssc(dev_priv)) + refclk = 100000; + else + refclk = 96000; + } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { + refclk = 100000; + } + + return refclk; +} + +static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int refclk; + + if (IS_VALLEYVIEW(dev)) { + refclk = vlv_get_refclk(crtc); + } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && + intel_panel_use_ssc(dev_priv) && num_connectors < 2) { + refclk = dev_priv->lvds_ssc_freq * 1000; + DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", + refclk / 1000); + } else if (!IS_GEN2(dev)) { + refclk = 96000; + } else { + refclk = 48000; + } + + return refclk; +} + +static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc *crtc) +{ + unsigned dotclock = crtc->config.adjusted_mode.clock; + struct dpll *clock = &crtc->config.dpll; + + /* SDVO TV has fixed PLL values depend on its clock range, + this mirrors vbios setting. */ + if (dotclock >= 100000 && dotclock < 140500) { + clock->p1 = 2; + clock->p2 = 10; + clock->n = 3; + clock->m1 = 16; + clock->m2 = 8; + } else if (dotclock >= 140500 && dotclock <= 200000) { + clock->p1 = 1; + clock->p2 = 10; + clock->n = 6; + clock->m1 = 12; + clock->m2 = 8; + } + + crtc->config.clock_set = true; +} + +static void i9xx_update_pll_dividers(struct intel_crtc *crtc, + intel_clock_t *reduced_clock) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe = crtc->pipe; + u32 fp, fp2 = 0; + struct dpll *clock = &crtc->config.dpll; + + if (IS_PINEVIEW(dev)) { + fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2; + if (reduced_clock) + fp2 = (1 << reduced_clock->n) << 16 | + reduced_clock->m1 << 8 | reduced_clock->m2; + } else { + fp = clock->n << 16 | clock->m1 << 8 | clock->m2; + if (reduced_clock) + fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 | + reduced_clock->m2; + } + + I915_WRITE(FP0(pipe), fp); + + crtc->lowfreq_avail = false; + if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && + reduced_clock && i915_powersave) { + I915_WRITE(FP1(pipe), fp2); + crtc->lowfreq_avail = true; + } else { + I915_WRITE(FP1(pipe), fp); + } +} + +static void intel_dp_set_m_n(struct intel_crtc *crtc) +{ + if (crtc->config.has_pch_encoder) + intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n); + else + intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n); +} + +static void vlv_update_pll(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe = crtc->pipe; + u32 dpll, mdiv, pdiv; + u32 bestn, bestm1, bestm2, bestp1, bestp2; + bool is_sdvo; + u32 temp; + + mutex_lock(&dev_priv->dpio_lock); + + is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) || + intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI); + + dpll = DPLL_VGA_MODE_DIS; + dpll |= DPLL_EXT_BUFFER_ENABLE_VLV; + dpll |= DPLL_REFA_CLK_ENABLE_VLV; + dpll |= DPLL_INTEGRATED_CLOCK_VLV; + + I915_WRITE(DPLL(pipe), dpll); + POSTING_READ(DPLL(pipe)); + + bestn = crtc->config.dpll.n; + bestm1 = crtc->config.dpll.m1; + bestm2 = crtc->config.dpll.m2; + bestp1 = crtc->config.dpll.p1; + bestp2 = crtc->config.dpll.p2; + + /* + * In Valleyview PLL and program lane counter registers are exposed + * through DPIO interface + */ + mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); + mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); + mdiv |= ((bestn << DPIO_N_SHIFT)); + mdiv |= (1 << DPIO_POST_DIV_SHIFT); + mdiv |= (1 << DPIO_K_SHIFT); + mdiv |= DPIO_ENABLE_CALIBRATION; + intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); + + intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000); + + pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) | + (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) | + (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) | + (5 << DPIO_CLK_BIAS_CTL_SHIFT); + intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv); + + intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b); + + dpll |= DPLL_VCO_ENABLE; + I915_WRITE(DPLL(pipe), dpll); + POSTING_READ(DPLL(pipe)); + if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) + DRM_ERROR("DPLL %d failed to lock\n", pipe); + + intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620); + + if (crtc->config.has_dp_encoder) + intel_dp_set_m_n(crtc); + + I915_WRITE(DPLL(pipe), dpll); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(DPLL(pipe)); + udelay(150); + + temp = 0; + if (is_sdvo) { + temp = 0; + if (crtc->config.pixel_multiplier > 1) { + temp = (crtc->config.pixel_multiplier - 1) + << DPLL_MD_UDI_MULTIPLIER_SHIFT; + } + } + I915_WRITE(DPLL_MD(pipe), temp); + POSTING_READ(DPLL_MD(pipe)); + + /* Now program lane control registers */ + if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) + || intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) { + temp = 0x1000C4; + if(pipe == 1) + temp |= (1 << 21); + intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp); + } + + if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) { + temp = 0x1000C4; + if(pipe == 1) + temp |= (1 << 21); + intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp); + } + + mutex_unlock(&dev_priv->dpio_lock); +} + +static void i9xx_update_pll(struct intel_crtc *crtc, + intel_clock_t *reduced_clock, + int num_connectors, + bool needs_tv_clock) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_encoder *encoder; + int pipe = crtc->pipe; + u32 dpll; + bool is_sdvo; + struct dpll *clock = &crtc->config.dpll; + + i9xx_update_pll_dividers(crtc, reduced_clock); + + is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) || + intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI); + + dpll = DPLL_VGA_MODE_DIS; + + if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) + dpll |= DPLLB_MODE_LVDS; + else + dpll |= DPLLB_MODE_DAC_SERIAL; + + if (is_sdvo) { + if ((crtc->config.pixel_multiplier > 1) && + (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))) { + dpll |= (crtc->config.pixel_multiplier - 1) + << SDVO_MULTIPLIER_SHIFT_HIRES; + } + dpll |= DPLL_DVO_HIGH_SPEED; + } + if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) + dpll |= DPLL_DVO_HIGH_SPEED; + + /* compute bitmask from p1 value */ + if (IS_PINEVIEW(dev)) + dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; + else { + dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; + if (IS_G4X(dev) && reduced_clock) + dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; + } + switch (clock->p2) { + case 5: + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; + break; + case 7: + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; + break; + case 10: + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; + break; + case 14: + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; + break; + } + if (INTEL_INFO(dev)->gen >= 4) + dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); + + if (is_sdvo && needs_tv_clock) + dpll |= PLL_REF_INPUT_TVCLKINBC; + else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT)) + /* XXX: just matching BIOS for now */ + /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ + dpll |= 3; + else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && + intel_panel_use_ssc(dev_priv) && num_connectors < 2) + dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; + else + dpll |= PLL_REF_INPUT_DREFCLK; + + dpll |= DPLL_VCO_ENABLE; + I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); + POSTING_READ(DPLL(pipe)); + udelay(150); + + for_each_encoder_on_crtc(dev, &crtc->base, encoder) + if (encoder->pre_pll_enable) + encoder->pre_pll_enable(encoder); + + if (crtc->config.has_dp_encoder) + intel_dp_set_m_n(crtc); + + I915_WRITE(DPLL(pipe), dpll); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(DPLL(pipe)); + udelay(150); + + if (INTEL_INFO(dev)->gen >= 4) { + u32 temp = 0; + if (is_sdvo) { + temp = 0; + if (crtc->config.pixel_multiplier > 1) { + temp = (crtc->config.pixel_multiplier - 1) + << DPLL_MD_UDI_MULTIPLIER_SHIFT; + } + } + I915_WRITE(DPLL_MD(pipe), temp); + } else { + /* The pixel multiplier can only be updated once the + * DPLL is enabled and the clocks are stable. + * + * So write it again. + */ + I915_WRITE(DPLL(pipe), dpll); + } +} + +static void i8xx_update_pll(struct intel_crtc *crtc, + struct drm_display_mode *adjusted_mode, + intel_clock_t *reduced_clock, + int num_connectors) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_encoder *encoder; + int pipe = crtc->pipe; + u32 dpll; + struct dpll *clock = &crtc->config.dpll; + + i9xx_update_pll_dividers(crtc, reduced_clock); + + dpll = DPLL_VGA_MODE_DIS; + + if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) { + dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; + } else { + if (clock->p1 == 2) + dpll |= PLL_P1_DIVIDE_BY_TWO; + else + dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; + if (clock->p2 == 4) + dpll |= PLL_P2_DIVIDE_BY_4; + } + + if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && + intel_panel_use_ssc(dev_priv) && num_connectors < 2) + dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; + else + dpll |= PLL_REF_INPUT_DREFCLK; + + dpll |= DPLL_VCO_ENABLE; + I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); + POSTING_READ(DPLL(pipe)); + udelay(150); + + for_each_encoder_on_crtc(dev, &crtc->base, encoder) + if (encoder->pre_pll_enable) + encoder->pre_pll_enable(encoder); + + I915_WRITE(DPLL(pipe), dpll); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(DPLL(pipe)); + udelay(150); + + /* The pixel multiplier can only be updated once the + * DPLL is enabled and the clocks are stable. + * + * So write it again. + */ + I915_WRITE(DPLL(pipe), dpll); +} + +static void intel_set_pipe_timings(struct intel_crtc *intel_crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe = intel_crtc->pipe; + enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; + uint32_t vsyncshift; + + if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { + /* the chip adds 2 halflines automatically */ + adjusted_mode->crtc_vtotal -= 1; + adjusted_mode->crtc_vblank_end -= 1; + vsyncshift = adjusted_mode->crtc_hsync_start + - adjusted_mode->crtc_htotal / 2; + } else { + vsyncshift = 0; + } + + if (INTEL_INFO(dev)->gen > 3) + I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); + + I915_WRITE(HTOTAL(cpu_transcoder), + (adjusted_mode->crtc_hdisplay - 1) | + ((adjusted_mode->crtc_htotal - 1) << 16)); + I915_WRITE(HBLANK(cpu_transcoder), + (adjusted_mode->crtc_hblank_start - 1) | + ((adjusted_mode->crtc_hblank_end - 1) << 16)); + I915_WRITE(HSYNC(cpu_transcoder), + (adjusted_mode->crtc_hsync_start - 1) | + ((adjusted_mode->crtc_hsync_end - 1) << 16)); + + I915_WRITE(VTOTAL(cpu_transcoder), + (adjusted_mode->crtc_vdisplay - 1) | + ((adjusted_mode->crtc_vtotal - 1) << 16)); + I915_WRITE(VBLANK(cpu_transcoder), + (adjusted_mode->crtc_vblank_start - 1) | + ((adjusted_mode->crtc_vblank_end - 1) << 16)); + I915_WRITE(VSYNC(cpu_transcoder), + (adjusted_mode->crtc_vsync_start - 1) | + ((adjusted_mode->crtc_vsync_end - 1) << 16)); + + /* Workaround: when the EDP input selection is B, the VTOTAL_B must be + * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is + * documented on the DDI_FUNC_CTL register description, EDP Input Select + * bits. */ + if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && + (pipe == PIPE_B || pipe == PIPE_C)) + I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); + + /* pipesrc controls the size that is scaled from, which should + * always be the user's requested size. + */ + I915_WRITE(PIPESRC(pipe), + ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); +} + +static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) +{ + struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t pipeconf; + + pipeconf = I915_READ(PIPECONF(intel_crtc->pipe)); + + if (dev_priv->quirks & QUIRK_PIPEA_FORCE && + I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE) + pipeconf |= PIPECONF_ENABLE; + + if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) { + /* Enable pixel doubling when the dot clock is > 90% of the (display) + * core speed. + * + * XXX: No double-wide on 915GM pipe B. Is that the only reason for the + * pipe == 0 check? + */ + if (intel_crtc->config.requested_mode.clock > + dev_priv->display.get_display_clock_speed(dev) * 9 / 10) + pipeconf |= PIPECONF_DOUBLE_WIDE; + else + pipeconf &= ~PIPECONF_DOUBLE_WIDE; + } + + /* default to 8bpc */ + pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN); + if (intel_crtc->config.has_dp_encoder) { + if (intel_crtc->config.dither) { + pipeconf |= PIPECONF_6BPC | + PIPECONF_DITHER_EN | + PIPECONF_DITHER_TYPE_SP; + } + } + + if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(&intel_crtc->base, + INTEL_OUTPUT_EDP)) { + if (intel_crtc->config.dither) { + pipeconf |= PIPECONF_6BPC | + PIPECONF_ENABLE | + I965_PIPECONF_ACTIVE; + } + } + + if (HAS_PIPE_CXSR(dev)) { + if (intel_crtc->lowfreq_avail) { + DRM_DEBUG_KMS("enabling CxSR downclocking\n"); + pipeconf |= PIPECONF_CXSR_DOWNCLOCK; + } else { + DRM_DEBUG_KMS("disabling CxSR downclocking\n"); + pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; + } + } + + pipeconf &= ~PIPECONF_INTERLACE_MASK; + if (!IS_GEN2(dev) && + intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) + pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; + else + pipeconf |= PIPECONF_PROGRESSIVE; + + if (IS_VALLEYVIEW(dev)) { + if (intel_crtc->config.limited_color_range) + pipeconf |= PIPECONF_COLOR_RANGE_SELECT; + else + pipeconf &= ~PIPECONF_COLOR_RANGE_SELECT; + } + + I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); + POSTING_READ(PIPECONF(intel_crtc->pipe)); +} + +static int i9xx_crtc_mode_set(struct drm_crtc *crtc, + int x, int y, + struct drm_framebuffer *fb) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_display_mode *adjusted_mode = + &intel_crtc->config.adjusted_mode; + struct drm_display_mode *mode = &intel_crtc->config.requested_mode; + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + int refclk, num_connectors = 0; + intel_clock_t clock, reduced_clock; + u32 dspcntr; + bool ok, has_reduced_clock = false, is_sdvo = false; + bool is_lvds = false, is_tv = false; + struct intel_encoder *encoder; + const intel_limit_t *limit; + int ret; + + for_each_encoder_on_crtc(dev, crtc, encoder) { + switch (encoder->type) { + case INTEL_OUTPUT_LVDS: + is_lvds = true; + break; + case INTEL_OUTPUT_SDVO: + case INTEL_OUTPUT_HDMI: + is_sdvo = true; + if (encoder->needs_tv_clock) + is_tv = true; + break; + case INTEL_OUTPUT_TVOUT: + is_tv = true; + break; + } + + num_connectors++; + } + + refclk = i9xx_get_refclk(crtc, num_connectors); + + /* + * Returns a set of divisors for the desired target clock with the given + * refclk, or FALSE. The returned values represent the clock equation: + * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. + */ + limit = intel_limit(crtc, refclk); + ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, + &clock); + if (!ok) { + DRM_ERROR("Couldn't find PLL settings for mode!\n"); + return -EINVAL; + } + + /* Ensure that the cursor is valid for the new mode before changing... */ + intel_crtc_update_cursor(crtc, true); + + if (is_lvds && dev_priv->lvds_downclock_avail) { + /* + * Ensure we match the reduced clock's P to the target clock. + * If the clocks don't match, we can't switch the display clock + * by using the FP0/FP1. In such case we will disable the LVDS + * downclock feature. + */ + has_reduced_clock = limit->find_pll(limit, crtc, + dev_priv->lvds_downclock, + refclk, + &clock, + &reduced_clock); + } + /* Compat-code for transition, will disappear. */ + if (!intel_crtc->config.clock_set) { + intel_crtc->config.dpll.n = clock.n; + intel_crtc->config.dpll.m1 = clock.m1; + intel_crtc->config.dpll.m2 = clock.m2; + intel_crtc->config.dpll.p1 = clock.p1; + intel_crtc->config.dpll.p2 = clock.p2; + } + + if (is_sdvo && is_tv) + i9xx_adjust_sdvo_tv_clock(intel_crtc); + + if (IS_GEN2(dev)) + i8xx_update_pll(intel_crtc, adjusted_mode, + has_reduced_clock ? &reduced_clock : NULL, + num_connectors); + else if (IS_VALLEYVIEW(dev)) + vlv_update_pll(intel_crtc); + else + i9xx_update_pll(intel_crtc, + has_reduced_clock ? &reduced_clock : NULL, + num_connectors, + is_sdvo && is_tv); + + /* Set up the display plane register */ + dspcntr = DISPPLANE_GAMMA_ENABLE; + + if (!IS_VALLEYVIEW(dev)) { + if (pipe == 0) + dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; + else + dspcntr |= DISPPLANE_SEL_PIPE_B; + } + + DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); + drm_mode_debug_printmodeline(mode); + + intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); + + /* pipesrc and dspsize control the size that is scaled from, + * which should always be the user's requested size. + */ + I915_WRITE(DSPSIZE(plane), + ((mode->vdisplay - 1) << 16) | + (mode->hdisplay - 1)); + I915_WRITE(DSPPOS(plane), 0); + + i9xx_set_pipeconf(intel_crtc); + + intel_enable_pipe(dev_priv, pipe, false); + + intel_wait_for_vblank(dev, pipe); + + I915_WRITE(DSPCNTR(plane), dspcntr); + POSTING_READ(DSPCNTR(plane)); + + ret = intel_pipe_set_base(crtc, x, y, fb); + + intel_update_watermarks(dev); + + return ret; +} + +static bool i9xx_get_pipe_config(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t tmp; + + tmp = I915_READ(PIPECONF(crtc->pipe)); + if (!(tmp & PIPECONF_ENABLE)) + return false; + + return true; +} + +static void ironlake_init_pch_refclk(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_mode_config *mode_config = &dev->mode_config; + struct intel_encoder *encoder; + u32 val, final; + bool has_lvds = false; + bool has_cpu_edp = false; + bool has_pch_edp = false; + bool has_panel = false; + bool has_ck505 = false; + bool can_ssc = false; + + /* We need to take the global config into account */ + list_for_each_entry(encoder, &mode_config->encoder_list, + base.head) { + switch (encoder->type) { + case INTEL_OUTPUT_LVDS: + has_panel = true; + has_lvds = true; + break; + case INTEL_OUTPUT_EDP: + has_panel = true; + if (intel_encoder_is_pch_edp(&encoder->base)) + has_pch_edp = true; + else + has_cpu_edp = true; + break; + } + } + + if (HAS_PCH_IBX(dev)) { + has_ck505 = dev_priv->display_clock_mode; + can_ssc = has_ck505; + } else { + has_ck505 = false; + can_ssc = true; + } + + DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n", + has_panel, has_lvds, has_pch_edp, has_cpu_edp, + has_ck505); + + /* Ironlake: try to setup display ref clock before DPLL + * enabling. This is only under driver's control after + * PCH B stepping, previous chipset stepping should be + * ignoring this setting. + */ + val = I915_READ(PCH_DREF_CONTROL); + + /* As we must carefully and slowly disable/enable each source in turn, + * compute the final state we want first and check if we need to + * make any changes at all. + */ + final = val; + final &= ~DREF_NONSPREAD_SOURCE_MASK; + if (has_ck505) + final |= DREF_NONSPREAD_CK505_ENABLE; + else + final |= DREF_NONSPREAD_SOURCE_ENABLE; + + final &= ~DREF_SSC_SOURCE_MASK; + final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; + final &= ~DREF_SSC1_ENABLE; + + if (has_panel) { + final |= DREF_SSC_SOURCE_ENABLE; + + if (intel_panel_use_ssc(dev_priv) && can_ssc) + final |= DREF_SSC1_ENABLE; + + if (has_cpu_edp) { + if (intel_panel_use_ssc(dev_priv) && can_ssc) + final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; + else + final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; + } else + final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + } else { + final |= DREF_SSC_SOURCE_DISABLE; + final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + } + + if (final == val) + return; + + /* Always enable nonspread source */ + val &= ~DREF_NONSPREAD_SOURCE_MASK; + + if (has_ck505) + val |= DREF_NONSPREAD_CK505_ENABLE; + else + val |= DREF_NONSPREAD_SOURCE_ENABLE; + + if (has_panel) { + val &= ~DREF_SSC_SOURCE_MASK; + val |= DREF_SSC_SOURCE_ENABLE; + + /* SSC must be turned on before enabling the CPU output */ + if (intel_panel_use_ssc(dev_priv) && can_ssc) { + DRM_DEBUG_KMS("Using SSC on panel\n"); + val |= DREF_SSC1_ENABLE; + } else + val &= ~DREF_SSC1_ENABLE; + + /* Get SSC going before enabling the outputs */ + I915_WRITE(PCH_DREF_CONTROL, val); + POSTING_READ(PCH_DREF_CONTROL); + udelay(200); + + val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; + + /* Enable CPU source on CPU attached eDP */ + if (has_cpu_edp) { + if (intel_panel_use_ssc(dev_priv) && can_ssc) { + DRM_DEBUG_KMS("Using SSC on eDP\n"); + val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; + } + else + val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; + } else + val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + + I915_WRITE(PCH_DREF_CONTROL, val); + POSTING_READ(PCH_DREF_CONTROL); + udelay(200); + } else { + DRM_DEBUG_KMS("Disabling SSC entirely\n"); + + val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; + + /* Turn off CPU output */ + val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + + I915_WRITE(PCH_DREF_CONTROL, val); + POSTING_READ(PCH_DREF_CONTROL); + udelay(200); + + /* Turn off the SSC source */ + val &= ~DREF_SSC_SOURCE_MASK; + val |= DREF_SSC_SOURCE_DISABLE; + + /* Turn off SSC1 */ + val &= ~DREF_SSC1_ENABLE; + + I915_WRITE(PCH_DREF_CONTROL, val); + POSTING_READ(PCH_DREF_CONTROL); + udelay(200); + } + + BUG_ON(val != final); +} + +/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */ +static void lpt_init_pch_refclk(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_mode_config *mode_config = &dev->mode_config; + struct intel_encoder *encoder; + bool has_vga = false; + bool is_sdv = false; + u32 tmp; + + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { + switch (encoder->type) { + case INTEL_OUTPUT_ANALOG: + has_vga = true; + break; + } + } + + if (!has_vga) + return; + + mutex_lock(&dev_priv->dpio_lock); + + /* XXX: Rip out SDV support once Haswell ships for real. */ + if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00) + is_sdv = true; + + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + tmp &= ~SBI_SSCCTL_DISABLE; + tmp |= SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + + udelay(24); + + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + tmp &= ~SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + + if (!is_sdv) { + tmp = I915_READ(SOUTH_CHICKEN2); + tmp |= FDI_MPHY_IOSFSB_RESET_CTL; + I915_WRITE(SOUTH_CHICKEN2, tmp); + + if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS, 100)) + DRM_ERROR("FDI mPHY reset assert timeout\n"); + + tmp = I915_READ(SOUTH_CHICKEN2); + tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; + I915_WRITE(SOUTH_CHICKEN2, tmp); + + if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS) == 0, + 100)) + DRM_ERROR("FDI mPHY reset de-assert timeout\n"); + } + + tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); + tmp &= ~(0xFF << 24); + tmp |= (0x12 << 24); + intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); + + if (is_sdv) { + tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY); + tmp |= 0x7FFF; + intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY); + } + + tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); + tmp |= (1 << 11); + intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); + tmp |= (1 << 11); + intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); + + if (is_sdv) { + tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY); + tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16); + intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY); + tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16); + intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY); + tmp |= (0x3F << 8); + intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY); + tmp |= (0x3F << 8); + intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY); + } + + tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); + tmp |= (1 << 24) | (1 << 21) | (1 << 18); + intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); + tmp |= (1 << 24) | (1 << 21) | (1 << 18); + intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); + + if (!is_sdv) { + tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); + tmp &= ~(7 << 13); + tmp |= (5 << 13); + intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); + tmp &= ~(7 << 13); + tmp |= (5 << 13); + intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); + } + + tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); + tmp &= ~0xFF; + tmp |= 0x1C; + intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); + tmp &= ~0xFF; + tmp |= 0x1C; + intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); + tmp &= ~(0xFF << 16); + tmp |= (0x1C << 16); + intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); + tmp &= ~(0xFF << 16); + tmp |= (0x1C << 16); + intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); + + if (!is_sdv) { + tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); + tmp |= (1 << 27); + intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); + tmp |= (1 << 27); + intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); + tmp &= ~(0xF << 28); + tmp |= (4 << 28); + intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); + tmp &= ~(0xF << 28); + tmp |= (4 << 28); + intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); + } + + /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */ + tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK); + tmp |= SBI_DBUFF0_ENABLE; + intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK); + + mutex_unlock(&dev_priv->dpio_lock); +} + +/* + * Initialize reference clocks when the driver loads + */ +void intel_init_pch_refclk(struct drm_device *dev) +{ + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) + ironlake_init_pch_refclk(dev); + else if (HAS_PCH_LPT(dev)) + lpt_init_pch_refclk(dev); +} + +static int ironlake_get_refclk(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_encoder *encoder; + struct intel_encoder *edp_encoder = NULL; + int num_connectors = 0; + bool is_lvds = false; + + for_each_encoder_on_crtc(dev, crtc, encoder) { + switch (encoder->type) { + case INTEL_OUTPUT_LVDS: + is_lvds = true; + break; + case INTEL_OUTPUT_EDP: + edp_encoder = encoder; + break; + } + num_connectors++; + } + + if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { + DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", + dev_priv->lvds_ssc_freq); + return dev_priv->lvds_ssc_freq * 1000; + } + + return 120000; +} + +static void ironlake_set_pipeconf(struct drm_crtc *crtc, + struct drm_display_mode *adjusted_mode, + bool dither) +{ + struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + uint32_t val; + + val = I915_READ(PIPECONF(pipe)); + + val &= ~PIPECONF_BPC_MASK; + switch (intel_crtc->config.pipe_bpp) { + case 18: + val |= PIPECONF_6BPC; + break; + case 24: + val |= PIPECONF_8BPC; + break; + case 30: + val |= PIPECONF_10BPC; + break; + case 36: + val |= PIPECONF_12BPC; + break; + default: + /* Case prevented by intel_choose_pipe_bpp_dither. */ + BUG(); + } + + val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); + if (dither) + val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); + + val &= ~PIPECONF_INTERLACE_MASK; + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) + val |= PIPECONF_INTERLACED_ILK; + else + val |= PIPECONF_PROGRESSIVE; + + if (intel_crtc->config.limited_color_range) + val |= PIPECONF_COLOR_RANGE_SELECT; + else + val &= ~PIPECONF_COLOR_RANGE_SELECT; + + I915_WRITE(PIPECONF(pipe), val); + POSTING_READ(PIPECONF(pipe)); +} + +/* + * Set up the pipe CSC unit. + * + * Currently only full range RGB to limited range RGB conversion + * is supported, but eventually this should handle various + * RGB<->YCbCr scenarios as well. + */ +static void intel_set_pipe_csc(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + uint16_t coeff = 0x7800; /* 1.0 */ + + /* + * TODO: Check what kind of values actually come out of the pipe + * with these coeff/postoff values and adjust to get the best + * accuracy. Perhaps we even need to take the bpc value into + * consideration. + */ + + if (intel_crtc->config.limited_color_range) + coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */ + + /* + * GY/GU and RY/RU should be the other way around according + * to BSpec, but reality doesn't agree. Just set them up in + * a way that results in the correct picture. + */ + I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16); + I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0); + + I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff); + I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0); + + I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0); + I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16); + + I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); + I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); + I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); + + if (INTEL_INFO(dev)->gen > 6) { + uint16_t postoff = 0; + + if (intel_crtc->config.limited_color_range) + postoff = (16 * (1 << 13) / 255) & 0x1fff; + + I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); + I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); + I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); + + I915_WRITE(PIPE_CSC_MODE(pipe), 0); + } else { + uint32_t mode = CSC_MODE_YUV_TO_RGB; + + if (intel_crtc->config.limited_color_range) + mode |= CSC_BLACK_SCREEN_OFFSET; + + I915_WRITE(PIPE_CSC_MODE(pipe), mode); + } +} + +static void haswell_set_pipeconf(struct drm_crtc *crtc, + struct drm_display_mode *adjusted_mode, + bool dither) +{ + struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; + uint32_t val; + + val = I915_READ(PIPECONF(cpu_transcoder)); + + val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); + if (dither) + val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); + + val &= ~PIPECONF_INTERLACE_MASK_HSW; + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) + val |= PIPECONF_INTERLACED_ILK; + else + val |= PIPECONF_PROGRESSIVE; + + I915_WRITE(PIPECONF(cpu_transcoder), val); + POSTING_READ(PIPECONF(cpu_transcoder)); +} + +static bool ironlake_compute_clocks(struct drm_crtc *crtc, + struct drm_display_mode *adjusted_mode, + intel_clock_t *clock, + bool *has_reduced_clock, + intel_clock_t *reduced_clock) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_encoder *intel_encoder; + int refclk; + const intel_limit_t *limit; + bool ret, is_sdvo = false, is_tv = false, is_lvds = false; + + for_each_encoder_on_crtc(dev, crtc, intel_encoder) { + switch (intel_encoder->type) { + case INTEL_OUTPUT_LVDS: + is_lvds = true; + break; + case INTEL_OUTPUT_SDVO: + case INTEL_OUTPUT_HDMI: + is_sdvo = true; + if (intel_encoder->needs_tv_clock) + is_tv = true; + break; + case INTEL_OUTPUT_TVOUT: + is_tv = true; + break; + } + } + + refclk = ironlake_get_refclk(crtc); + + /* + * Returns a set of divisors for the desired target clock with the given + * refclk, or FALSE. The returned values represent the clock equation: + * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. + */ + limit = intel_limit(crtc, refclk); + ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, + clock); + if (!ret) + return false; + + if (is_lvds && dev_priv->lvds_downclock_avail) { + /* + * Ensure we match the reduced clock's P to the target clock. + * If the clocks don't match, we can't switch the display clock + * by using the FP0/FP1. In such case we will disable the LVDS + * downclock feature. + */ + *has_reduced_clock = limit->find_pll(limit, crtc, + dev_priv->lvds_downclock, + refclk, + clock, + reduced_clock); + } + + if (is_sdvo && is_tv) + i9xx_adjust_sdvo_tv_clock(to_intel_crtc(crtc)); + + return true; +} + +static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t temp; + + temp = I915_READ(SOUTH_CHICKEN1); + if (temp & FDI_BC_BIFURCATION_SELECT) + return; + + WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); + WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); + + temp |= FDI_BC_BIFURCATION_SELECT; + DRM_DEBUG_KMS("enabling fdi C rx\n"); + I915_WRITE(SOUTH_CHICKEN1, temp); + POSTING_READ(SOUTH_CHICKEN1); +} + +static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc) +{ + struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *pipe_B_crtc = + to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); + + DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n", + intel_crtc->pipe, intel_crtc->fdi_lanes); + if (intel_crtc->fdi_lanes > 4) { + DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n", + intel_crtc->pipe, intel_crtc->fdi_lanes); + /* Clamp lanes to avoid programming the hw with bogus values. */ + intel_crtc->fdi_lanes = 4; + + return false; + } + + if (INTEL_INFO(dev)->num_pipes == 2) + return true; + + switch (intel_crtc->pipe) { + case PIPE_A: + return true; + case PIPE_B: + if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled && + intel_crtc->fdi_lanes > 2) { + DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n", + intel_crtc->pipe, intel_crtc->fdi_lanes); + /* Clamp lanes to avoid programming the hw with bogus values. */ + intel_crtc->fdi_lanes = 2; + + return false; + } + + if (intel_crtc->fdi_lanes > 2) + WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); + else + cpt_enable_fdi_bc_bifurcation(dev); + + return true; + case PIPE_C: + if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) { + if (intel_crtc->fdi_lanes > 2) { + DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n", + intel_crtc->pipe, intel_crtc->fdi_lanes); + /* Clamp lanes to avoid programming the hw with bogus values. */ + intel_crtc->fdi_lanes = 2; + + return false; + } + } else { + DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); + return false; + } + + cpt_enable_fdi_bc_bifurcation(dev); + + return true; + default: + BUG(); + } +} + +int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) +{ + /* + * Account for spread spectrum to avoid + * oversubscribing the link. Max center spread + * is 2.5%; use 5% for safety's sake. + */ + u32 bps = target_clock * bpp * 21 / 20; + return bps / (link_bw * 8) + 1; +} + +void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, + struct intel_link_m_n *m_n) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe = crtc->pipe; + + I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); + I915_WRITE(TRANSDATA_N1(pipe), m_n->gmch_n); + I915_WRITE(TRANSDPLINK_M1(pipe), m_n->link_m); + I915_WRITE(TRANSDPLINK_N1(pipe), m_n->link_n); +} + +void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, + struct intel_link_m_n *m_n) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe = crtc->pipe; + enum transcoder transcoder = crtc->config.cpu_transcoder; + + if (INTEL_INFO(dev)->gen >= 5) { + I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); + I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); + I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); + I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); + } else { + I915_WRITE(PIPE_GMCH_DATA_M(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); + I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n->gmch_n); + I915_WRITE(PIPE_DP_LINK_M(pipe), m_n->link_m); + I915_WRITE(PIPE_DP_LINK_N(pipe), m_n->link_n); + } +} + +static void ironlake_fdi_set_m_n(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_display_mode *adjusted_mode = + &intel_crtc->config.adjusted_mode; + struct intel_link_m_n m_n = {0}; + int target_clock, lane, link_bw; + + /* FDI is a binary signal running at ~2.7GHz, encoding + * each output octet as 10 bits. The actual frequency + * is stored as a divider into a 100MHz clock, and the + * mode pixel clock is stored in units of 1KHz. + * Hence the bw of each lane in terms of the mode signal + * is: + */ + link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; + + if (intel_crtc->config.pixel_target_clock) + target_clock = intel_crtc->config.pixel_target_clock; + else + target_clock = adjusted_mode->clock; + + lane = ironlake_get_lanes_required(target_clock, link_bw, + intel_crtc->config.pipe_bpp); + + intel_crtc->fdi_lanes = lane; + + if (intel_crtc->config.pixel_multiplier > 1) + link_bw *= intel_crtc->config.pixel_multiplier; + intel_link_compute_m_n(intel_crtc->config.pipe_bpp, lane, target_clock, + link_bw, &m_n); + + intel_cpu_transcoder_set_m_n(intel_crtc, &m_n); +} + +static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, + intel_clock_t *clock, u32 *fp, + intel_clock_t *reduced_clock, u32 *fp2) +{ + struct drm_crtc *crtc = &intel_crtc->base; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_encoder *intel_encoder; + uint32_t dpll; + int factor, num_connectors = 0; + bool is_lvds = false, is_sdvo = false, is_tv = false; + + for_each_encoder_on_crtc(dev, crtc, intel_encoder) { + switch (intel_encoder->type) { + case INTEL_OUTPUT_LVDS: + is_lvds = true; + break; + case INTEL_OUTPUT_SDVO: + case INTEL_OUTPUT_HDMI: + is_sdvo = true; + if (intel_encoder->needs_tv_clock) + is_tv = true; + break; + case INTEL_OUTPUT_TVOUT: + is_tv = true; + break; + } + + num_connectors++; + } + + /* Enable autotuning of the PLL clock (if permissible) */ + factor = 21; + if (is_lvds) { + if ((intel_panel_use_ssc(dev_priv) && + dev_priv->lvds_ssc_freq == 100) || + (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) + factor = 25; + } else if (is_sdvo && is_tv) + factor = 20; + + if (clock->m < factor * clock->n) + *fp |= FP_CB_TUNE; + + if (fp2 && (reduced_clock->m < factor * reduced_clock->n)) + *fp2 |= FP_CB_TUNE; + + dpll = 0; + + if (is_lvds) + dpll |= DPLLB_MODE_LVDS; + else + dpll |= DPLLB_MODE_DAC_SERIAL; + if (is_sdvo) { + if (intel_crtc->config.pixel_multiplier > 1) { + dpll |= (intel_crtc->config.pixel_multiplier - 1) + << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; + } + dpll |= DPLL_DVO_HIGH_SPEED; + } + if (intel_crtc->config.has_dp_encoder && + intel_crtc->config.has_pch_encoder) + dpll |= DPLL_DVO_HIGH_SPEED; + + /* compute bitmask from p1 value */ + dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; + /* also FPA1 */ + dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; + + switch (clock->p2) { + case 5: + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; + break; + case 7: + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; + break; + case 10: + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; + break; + case 14: + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; + break; + } + + if (is_sdvo && is_tv) + dpll |= PLL_REF_INPUT_TVCLKINBC; + else if (is_tv) + /* XXX: just matching BIOS for now */ + /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ + dpll |= 3; + else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) + dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; + else + dpll |= PLL_REF_INPUT_DREFCLK; + + return dpll; +} + +static int ironlake_crtc_mode_set(struct drm_crtc *crtc, + int x, int y, + struct drm_framebuffer *fb) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_display_mode *adjusted_mode = + &intel_crtc->config.adjusted_mode; + struct drm_display_mode *mode = &intel_crtc->config.requested_mode; + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + int num_connectors = 0; + intel_clock_t clock, reduced_clock; + u32 dpll, fp = 0, fp2 = 0; + bool ok, has_reduced_clock = false; + bool is_lvds = false; + struct intel_encoder *encoder; + int ret; + bool dither, fdi_config_ok; + + for_each_encoder_on_crtc(dev, crtc, encoder) { + switch (encoder->type) { + case INTEL_OUTPUT_LVDS: + is_lvds = true; + break; + } + + num_connectors++; + } + + WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), + "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); + + intel_crtc->config.cpu_transcoder = pipe; + + ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, + &has_reduced_clock, &reduced_clock); + if (!ok) { + DRM_ERROR("Couldn't find PLL settings for mode!\n"); + return -EINVAL; + } + /* Compat-code for transition, will disappear. */ + if (!intel_crtc->config.clock_set) { + intel_crtc->config.dpll.n = clock.n; + intel_crtc->config.dpll.m1 = clock.m1; + intel_crtc->config.dpll.m2 = clock.m2; + intel_crtc->config.dpll.p1 = clock.p1; + intel_crtc->config.dpll.p2 = clock.p2; + } + + /* Ensure that the cursor is valid for the new mode before changing... */ + intel_crtc_update_cursor(crtc, true); + + /* determine panel color depth */ + dither = intel_crtc->config.dither; + if (is_lvds && dev_priv->lvds_dither) + dither = true; + + fp = clock.n << 16 | clock.m1 << 8 | clock.m2; + if (has_reduced_clock) + fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | + reduced_clock.m2; + + dpll = ironlake_compute_dpll(intel_crtc, &clock, &fp, &reduced_clock, + has_reduced_clock ? &fp2 : NULL); + + DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); + drm_mode_debug_printmodeline(mode); + + /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ + if (intel_crtc->config.has_pch_encoder) { + struct intel_pch_pll *pll; + + pll = intel_get_pch_pll(intel_crtc, dpll, fp); + if (pll == NULL) { + DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n", + pipe); + return -EINVAL; + } + } else + intel_put_pch_pll(intel_crtc); + + if (intel_crtc->config.has_dp_encoder) + intel_dp_set_m_n(intel_crtc); + + for_each_encoder_on_crtc(dev, crtc, encoder) + if (encoder->pre_pll_enable) + encoder->pre_pll_enable(encoder); + + if (intel_crtc->pch_pll) { + I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(intel_crtc->pch_pll->pll_reg); + udelay(150); + + /* The pixel multiplier can only be updated once the + * DPLL is enabled and the clocks are stable. + * + * So write it again. + */ + I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); + } + + intel_crtc->lowfreq_avail = false; + if (intel_crtc->pch_pll) { + if (is_lvds && has_reduced_clock && i915_powersave) { + I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); + intel_crtc->lowfreq_avail = true; + } else { + I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp); + } + } + + intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); + + /* Note, this also computes intel_crtc->fdi_lanes which is used below in + * ironlake_check_fdi_lanes. */ + intel_crtc->fdi_lanes = 0; + if (intel_crtc->config.has_pch_encoder) + ironlake_fdi_set_m_n(crtc); + + fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc); + + ironlake_set_pipeconf(crtc, adjusted_mode, dither); + + intel_wait_for_vblank(dev, pipe); + + /* Set up the display plane register */ + I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); + POSTING_READ(DSPCNTR(plane)); + + ret = intel_pipe_set_base(crtc, x, y, fb); + + intel_update_watermarks(dev); + + intel_update_linetime_watermarks(dev, pipe, adjusted_mode); + + return fdi_config_ok ? ret : -EINVAL; +} + +static bool ironlake_get_pipe_config(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t tmp; + + tmp = I915_READ(PIPECONF(crtc->pipe)); + if (!(tmp & PIPECONF_ENABLE)) + return false; + + if (I915_READ(TRANSCONF(crtc->pipe)) & TRANS_ENABLE) + pipe_config->has_pch_encoder = true; + + return true; +} + +static void haswell_modeset_global_resources(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + bool enable = false; + struct intel_crtc *crtc; + struct intel_encoder *encoder; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { + if (crtc->pipe != PIPE_A && crtc->base.enabled) + enable = true; + /* XXX: Should check for edp transcoder here, but thanks to init + * sequence that's not yet available. Just in case desktop eDP + * on PORT D is possible on haswell, too. */ + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + if (encoder->type != INTEL_OUTPUT_EDP && + encoder->connectors_active) + enable = true; + } + + /* Even the eDP panel fitter is outside the always-on well. */ + if (dev_priv->pch_pf_size) + enable = true; + + intel_set_power_well(dev, enable); +} + +static int haswell_crtc_mode_set(struct drm_crtc *crtc, + int x, int y, + struct drm_framebuffer *fb) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_display_mode *adjusted_mode = + &intel_crtc->config.adjusted_mode; + struct drm_display_mode *mode = &intel_crtc->config.requested_mode; + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + int num_connectors = 0; + bool is_cpu_edp = false; + struct intel_encoder *encoder; + int ret; + bool dither; + + for_each_encoder_on_crtc(dev, crtc, encoder) { + switch (encoder->type) { + case INTEL_OUTPUT_EDP: + if (!intel_encoder_is_pch_edp(&encoder->base)) + is_cpu_edp = true; + break; + } + + num_connectors++; + } + + if (is_cpu_edp) + intel_crtc->config.cpu_transcoder = TRANSCODER_EDP; + else + intel_crtc->config.cpu_transcoder = pipe; + + /* We are not sure yet this won't happen. */ + WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", + INTEL_PCH_TYPE(dev)); + + WARN(num_connectors != 1, "%d connectors attached to pipe %c\n", + num_connectors, pipe_name(pipe)); + + WARN_ON(I915_READ(PIPECONF(intel_crtc->config.cpu_transcoder)) & + (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE)); + + WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE); + + if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock)) + return -EINVAL; + + /* Ensure that the cursor is valid for the new mode before changing... */ + intel_crtc_update_cursor(crtc, true); + + /* determine panel color depth */ + dither = intel_crtc->config.dither; + + DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); + drm_mode_debug_printmodeline(mode); + + if (intel_crtc->config.has_dp_encoder) + intel_dp_set_m_n(intel_crtc); + + intel_crtc->lowfreq_avail = false; + + intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); + + if (intel_crtc->config.has_pch_encoder) + ironlake_fdi_set_m_n(crtc); + + haswell_set_pipeconf(crtc, adjusted_mode, dither); + + intel_set_pipe_csc(crtc); + + /* Set up the display plane register */ + I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE); + POSTING_READ(DSPCNTR(plane)); + + ret = intel_pipe_set_base(crtc, x, y, fb); + + intel_update_watermarks(dev); + + intel_update_linetime_watermarks(dev, pipe, adjusted_mode); + + return ret; +} + +static bool haswell_get_pipe_config(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t tmp; + + tmp = I915_READ(PIPECONF(crtc->config.cpu_transcoder)); + if (!(tmp & PIPECONF_ENABLE)) + return false; + + /* + * aswell has only FDI/PCH transcoder A. It is which is connected to + * DDI E. So just check whether this pipe is wired to DDI E and whether + * the PCH transcoder is on. + */ + tmp = I915_READ(TRANS_DDI_FUNC_CTL(crtc->pipe)); + if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) && + I915_READ(TRANSCONF(PIPE_A)) & TRANS_ENABLE) + pipe_config->has_pch_encoder = true; + + + return true; +} + +static int intel_crtc_mode_set(struct drm_crtc *crtc, + int x, int y, + struct drm_framebuffer *fb) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_encoder_helper_funcs *encoder_funcs; + struct intel_encoder *encoder; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_display_mode *adjusted_mode = + &intel_crtc->config.adjusted_mode; + struct drm_display_mode *mode = &intel_crtc->config.requested_mode; + int pipe = intel_crtc->pipe; + int ret; + + drm_vblank_pre_modeset(dev, pipe); + + ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb); + + drm_vblank_post_modeset(dev, pipe); + + if (ret != 0) + return ret; + + for_each_encoder_on_crtc(dev, crtc, encoder) { + DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", + encoder->base.base.id, + drm_get_encoder_name(&encoder->base), + mode->base.id, mode->name); + if (encoder->mode_set) { + encoder->mode_set(encoder); + } else { + encoder_funcs = encoder->base.helper_private; + encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode); + } + } + + return 0; +} + +static bool intel_eld_uptodate(struct drm_connector *connector, + int reg_eldv, uint32_t bits_eldv, + int reg_elda, uint32_t bits_elda, + int reg_edid) +{ + struct drm_i915_private *dev_priv = connector->dev->dev_private; + uint8_t *eld = connector->eld; + uint32_t i; + + i = I915_READ(reg_eldv); + i &= bits_eldv; + + if (!eld[0]) + return !i; + + if (!i) + return false; + + i = I915_READ(reg_elda); + i &= ~bits_elda; + I915_WRITE(reg_elda, i); + + for (i = 0; i < eld[2]; i++) + if (I915_READ(reg_edid) != *((uint32_t *)eld + i)) + return false; + + return true; +} + +static void g4x_write_eld(struct drm_connector *connector, + struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = connector->dev->dev_private; + uint8_t *eld = connector->eld; + uint32_t eldv; + uint32_t len; + uint32_t i; + + i = I915_READ(G4X_AUD_VID_DID); + + if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL) + eldv = G4X_ELDV_DEVCL_DEVBLC; + else + eldv = G4X_ELDV_DEVCTG; + + if (intel_eld_uptodate(connector, + G4X_AUD_CNTL_ST, eldv, + G4X_AUD_CNTL_ST, G4X_ELD_ADDR, + G4X_HDMIW_HDMIEDID)) + return; + + i = I915_READ(G4X_AUD_CNTL_ST); + i &= ~(eldv | G4X_ELD_ADDR); + len = (i >> 9) & 0x1f; /* ELD buffer size */ + I915_WRITE(G4X_AUD_CNTL_ST, i); + + if (!eld[0]) + return; + + len = min_t(uint8_t, eld[2], len); + DRM_DEBUG_DRIVER("ELD size %d\n", len); + for (i = 0; i < len; i++) + I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); + + i = I915_READ(G4X_AUD_CNTL_ST); + i |= eldv; + I915_WRITE(G4X_AUD_CNTL_ST, i); +} + +static void haswell_write_eld(struct drm_connector *connector, + struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = connector->dev->dev_private; + uint8_t *eld = connector->eld; + struct drm_device *dev = crtc->dev; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + uint32_t eldv; + uint32_t i; + int len; + int pipe = to_intel_crtc(crtc)->pipe; + int tmp; + + int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe); + int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe); + int aud_config = HSW_AUD_CFG(pipe); + int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD; + + + DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n"); + + /* Audio output enable */ + DRM_DEBUG_DRIVER("HDMI audio: enable codec\n"); + tmp = I915_READ(aud_cntrl_st2); + tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4)); + I915_WRITE(aud_cntrl_st2, tmp); + + /* Wait for 1 vertical blank */ + intel_wait_for_vblank(dev, pipe); + + /* Set ELD valid state */ + tmp = I915_READ(aud_cntrl_st2); + DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp); + tmp |= (AUDIO_ELD_VALID_A << (pipe * 4)); + I915_WRITE(aud_cntrl_st2, tmp); + tmp = I915_READ(aud_cntrl_st2); + DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp); + + /* Enable HDMI mode */ + tmp = I915_READ(aud_config); + DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp); + /* clear N_programing_enable and N_value_index */ + tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE); + I915_WRITE(aud_config, tmp); + + DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); + + eldv = AUDIO_ELD_VALID_A << (pipe * 4); + intel_crtc->eld_vld = true; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { + DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); + eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ + I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ + } else + I915_WRITE(aud_config, 0); + + if (intel_eld_uptodate(connector, + aud_cntrl_st2, eldv, + aud_cntl_st, IBX_ELD_ADDRESS, + hdmiw_hdmiedid)) + return; + + i = I915_READ(aud_cntrl_st2); + i &= ~eldv; + I915_WRITE(aud_cntrl_st2, i); + + if (!eld[0]) + return; + + i = I915_READ(aud_cntl_st); + i &= ~IBX_ELD_ADDRESS; + I915_WRITE(aud_cntl_st, i); + i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ + DRM_DEBUG_DRIVER("port num:%d\n", i); + + len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ + DRM_DEBUG_DRIVER("ELD size %d\n", len); + for (i = 0; i < len; i++) + I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); + + i = I915_READ(aud_cntrl_st2); + i |= eldv; + I915_WRITE(aud_cntrl_st2, i); + +} + +static void ironlake_write_eld(struct drm_connector *connector, + struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = connector->dev->dev_private; + uint8_t *eld = connector->eld; + uint32_t eldv; + uint32_t i; + int len; + int hdmiw_hdmiedid; + int aud_config; + int aud_cntl_st; + int aud_cntrl_st2; + int pipe = to_intel_crtc(crtc)->pipe; + + if (HAS_PCH_IBX(connector->dev)) { + hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe); + aud_config = IBX_AUD_CFG(pipe); + aud_cntl_st = IBX_AUD_CNTL_ST(pipe); + aud_cntrl_st2 = IBX_AUD_CNTL_ST2; + } else { + hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe); + aud_config = CPT_AUD_CFG(pipe); + aud_cntl_st = CPT_AUD_CNTL_ST(pipe); + aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; + } + + DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); + + i = I915_READ(aud_cntl_st); + i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ + if (!i) { + DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); + /* operate blindly on all ports */ + eldv = IBX_ELD_VALIDB; + eldv |= IBX_ELD_VALIDB << 4; + eldv |= IBX_ELD_VALIDB << 8; + } else { + DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i); + eldv = IBX_ELD_VALIDB << ((i - 1) * 4); + } + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { + DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); + eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ + I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ + } else + I915_WRITE(aud_config, 0); + + if (intel_eld_uptodate(connector, + aud_cntrl_st2, eldv, + aud_cntl_st, IBX_ELD_ADDRESS, + hdmiw_hdmiedid)) + return; + + i = I915_READ(aud_cntrl_st2); + i &= ~eldv; + I915_WRITE(aud_cntrl_st2, i); + + if (!eld[0]) + return; + + i = I915_READ(aud_cntl_st); + i &= ~IBX_ELD_ADDRESS; + I915_WRITE(aud_cntl_st, i); + + len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ + DRM_DEBUG_DRIVER("ELD size %d\n", len); + for (i = 0; i < len; i++) + I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); + + i = I915_READ(aud_cntrl_st2); + i |= eldv; + I915_WRITE(aud_cntrl_st2, i); +} + +void intel_write_eld(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct drm_crtc *crtc = encoder->crtc; + struct drm_connector *connector; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + connector = drm_select_eld(encoder, mode); + if (!connector) + return; + + DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", + connector->base.id, + drm_get_connector_name(connector), + connector->encoder->base.id, + drm_get_encoder_name(connector->encoder)); + + connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; + + if (dev_priv->display.write_eld) + dev_priv->display.write_eld(connector, crtc); +} + +/** Loads the palette/gamma unit for the CRTC with the prepared values */ +void intel_crtc_load_lut(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int palreg = PALETTE(intel_crtc->pipe); + int i; + + /* The clocks have to be on to load the palette. */ + if (!crtc->enabled || !intel_crtc->active) + return; + + /* use legacy palette for Ironlake */ + if (HAS_PCH_SPLIT(dev)) + palreg = LGC_PALETTE(intel_crtc->pipe); + + for (i = 0; i < 256; i++) { + I915_WRITE(palreg + 4 * i, + (intel_crtc->lut_r[i] << 16) | + (intel_crtc->lut_g[i] << 8) | + intel_crtc->lut_b[i]); + } +} + +static void i845_update_cursor(struct drm_crtc *crtc, u32 base) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + bool visible = base != 0; + u32 cntl; + + if (intel_crtc->cursor_visible == visible) + return; + + cntl = I915_READ(_CURACNTR); + if (visible) { + /* On these chipsets we can only modify the base whilst + * the cursor is disabled. + */ + I915_WRITE(_CURABASE, base); + + cntl &= ~(CURSOR_FORMAT_MASK); + /* XXX width must be 64, stride 256 => 0x00 << 28 */ + cntl |= CURSOR_ENABLE | + CURSOR_GAMMA_ENABLE | + CURSOR_FORMAT_ARGB; + } else + cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); + I915_WRITE(_CURACNTR, cntl); + + intel_crtc->cursor_visible = visible; +} + +static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + bool visible = base != 0; + + if (intel_crtc->cursor_visible != visible) { + uint32_t cntl = I915_READ(CURCNTR(pipe)); + if (base) { + cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); + cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; + cntl |= pipe << 28; /* Connect to correct pipe */ + } else { + cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); + cntl |= CURSOR_MODE_DISABLE; + } + I915_WRITE(CURCNTR(pipe), cntl); + + intel_crtc->cursor_visible = visible; + } + /* and commit changes on next vblank */ + I915_WRITE(CURBASE(pipe), base); +} + +static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + bool visible = base != 0; + + if (intel_crtc->cursor_visible != visible) { + uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); + if (base) { + cntl &= ~CURSOR_MODE; + cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; + } else { + cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); + cntl |= CURSOR_MODE_DISABLE; + } + if (IS_HASWELL(dev)) + cntl |= CURSOR_PIPE_CSC_ENABLE; + I915_WRITE(CURCNTR_IVB(pipe), cntl); + + intel_crtc->cursor_visible = visible; + } + /* and commit changes on next vblank */ + I915_WRITE(CURBASE_IVB(pipe), base); +} + +/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ +static void intel_crtc_update_cursor(struct drm_crtc *crtc, + bool on) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int x = intel_crtc->cursor_x; + int y = intel_crtc->cursor_y; + u32 base, pos; + bool visible; + + pos = 0; + + if (on && crtc->enabled && crtc->fb) { + base = intel_crtc->cursor_addr; + if (x > (int) crtc->fb->width) + base = 0; + + if (y > (int) crtc->fb->height) + base = 0; + } else + base = 0; + + if (x < 0) { + if (x + intel_crtc->cursor_width < 0) + base = 0; + + pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; + x = -x; + } + pos |= x << CURSOR_X_SHIFT; + + if (y < 0) { + if (y + intel_crtc->cursor_height < 0) + base = 0; + + pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; + y = -y; + } + pos |= y << CURSOR_Y_SHIFT; + + visible = base != 0; + if (!visible && !intel_crtc->cursor_visible) + return; + + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { + I915_WRITE(CURPOS_IVB(pipe), pos); + ivb_update_cursor(crtc, base); + } else { + I915_WRITE(CURPOS(pipe), pos); + if (IS_845G(dev) || IS_I865G(dev)) + i845_update_cursor(crtc, base); + else + i9xx_update_cursor(crtc, base); + } +} + +static int intel_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file, + uint32_t handle, + uint32_t width, uint32_t height) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_i915_gem_object *obj; + uint32_t addr; + int ret; + + /* if we want to turn off the cursor ignore width and height */ + if (!handle) { + DRM_DEBUG_KMS("cursor off\n"); + addr = 0; + obj = NULL; + mutex_lock(&dev->struct_mutex); + goto finish; + } + + /* Currently we only support 64x64 cursors */ + if (width != 64 || height != 64) { + DRM_ERROR("we currently only support 64x64 cursors\n"); + return -EINVAL; + } + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); + if (&obj->base == NULL) + return -ENOENT; + + if (obj->base.size < width * height * 4) { + DRM_ERROR("buffer is to small\n"); + ret = -ENOMEM; + goto fail; + } + + /* we only need to pin inside GTT if cursor is non-phy */ + mutex_lock(&dev->struct_mutex); + if (!dev_priv->info->cursor_needs_physical) { + unsigned alignment; + + if (obj->tiling_mode) { + DRM_ERROR("cursor cannot be tiled\n"); + ret = -EINVAL; + goto fail_locked; + } + + /* Note that the w/a also requires 2 PTE of padding following + * the bo. We currently fill all unused PTE with the shadow + * page and so we should always have valid PTE following the + * cursor preventing the VT-d warning. + */ + alignment = 0; + if (need_vtd_wa(dev)) + alignment = 64*1024; + + ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL); + if (ret) { + DRM_ERROR("failed to move cursor bo into the GTT\n"); + goto fail_locked; + } + + ret = i915_gem_object_put_fence(obj); + if (ret) { + DRM_ERROR("failed to release fence for cursor"); + goto fail_unpin; + } + + addr = obj->gtt_offset; + } else { + int align = IS_I830(dev) ? 16 * 1024 : 256; + ret = i915_gem_attach_phys_object(dev, obj, + (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, + align); + if (ret) { + DRM_ERROR("failed to attach phys object\n"); + goto fail_locked; + } + addr = obj->phys_obj->handle->busaddr; + } + + if (IS_GEN2(dev)) + I915_WRITE(CURSIZE, (height << 12) | width); + + finish: + if (intel_crtc->cursor_bo) { + if (dev_priv->info->cursor_needs_physical) { + if (intel_crtc->cursor_bo != obj) + i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); + } else + i915_gem_object_unpin(intel_crtc->cursor_bo); + drm_gem_object_unreference(&intel_crtc->cursor_bo->base); + } + + mutex_unlock(&dev->struct_mutex); + + intel_crtc->cursor_addr = addr; + intel_crtc->cursor_bo = obj; + intel_crtc->cursor_width = width; + intel_crtc->cursor_height = height; + + intel_crtc_update_cursor(crtc, true); + + return 0; +fail_unpin: + i915_gem_object_unpin(obj); +fail_locked: + mutex_unlock(&dev->struct_mutex); +fail: + drm_gem_object_unreference_unlocked(&obj->base); + return ret; +} + +static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + intel_crtc->cursor_x = x; + intel_crtc->cursor_y = y; + + intel_crtc_update_cursor(crtc, true); + + return 0; +} + +/** Sets the color ramps on behalf of RandR */ +void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, + u16 blue, int regno) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + intel_crtc->lut_r[regno] = red >> 8; + intel_crtc->lut_g[regno] = green >> 8; + intel_crtc->lut_b[regno] = blue >> 8; +} + +void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, int regno) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + *red = intel_crtc->lut_r[regno] << 8; + *green = intel_crtc->lut_g[regno] << 8; + *blue = intel_crtc->lut_b[regno] << 8; +} + +static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t start, uint32_t size) +{ + int end = (start + size > 256) ? 256 : start + size, i; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + for (i = start; i < end; i++) { + intel_crtc->lut_r[i] = red[i] >> 8; + intel_crtc->lut_g[i] = green[i] >> 8; + intel_crtc->lut_b[i] = blue[i] >> 8; + } + + intel_crtc_load_lut(crtc); +} + +/* VESA 640x480x72Hz mode to set on the pipe */ +static struct drm_display_mode load_detect_mode = { + DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, + 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), +}; + +static struct drm_framebuffer * +intel_framebuffer_create(struct drm_device *dev, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_i915_gem_object *obj) +{ + struct intel_framebuffer *intel_fb; + int ret; + + intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); + if (!intel_fb) { + drm_gem_object_unreference_unlocked(&obj->base); + return ERR_PTR(-ENOMEM); + } + + ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); + if (ret) { + drm_gem_object_unreference_unlocked(&obj->base); + kfree(intel_fb); + return ERR_PTR(ret); + } + + return &intel_fb->base; +} + +static u32 +intel_framebuffer_pitch_for_width(int width, int bpp) +{ + u32 pitch = DIV_ROUND_UP(width * bpp, 8); + return ALIGN(pitch, 64); +} + +static u32 +intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) +{ + u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); + return ALIGN(pitch * mode->vdisplay, PAGE_SIZE); +} + +static struct drm_framebuffer * +intel_framebuffer_create_for_mode(struct drm_device *dev, + struct drm_display_mode *mode, + int depth, int bpp) +{ + struct drm_i915_gem_object *obj; + struct drm_mode_fb_cmd2 mode_cmd = { 0 }; + + obj = i915_gem_alloc_object(dev, + intel_framebuffer_size_for_mode(mode, bpp)); + if (obj == NULL) + return ERR_PTR(-ENOMEM); + + mode_cmd.width = mode->hdisplay; + mode_cmd.height = mode->vdisplay; + mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, + bpp); + mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); + + return intel_framebuffer_create(dev, &mode_cmd, obj); +} + +static struct drm_framebuffer * +mode_fits_in_fbdev(struct drm_device *dev, + struct drm_display_mode *mode) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + struct drm_framebuffer *fb; + + if (dev_priv->fbdev == NULL) + return NULL; + + obj = dev_priv->fbdev->ifb.obj; + if (obj == NULL) + return NULL; + + fb = &dev_priv->fbdev->ifb.base; + if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, + fb->bits_per_pixel)) + return NULL; + + if (obj->base.size < mode->vdisplay * fb->pitches[0]) + return NULL; + + return fb; +} + +bool intel_get_load_detect_pipe(struct drm_connector *connector, + struct drm_display_mode *mode, + struct intel_load_detect_pipe *old) +{ + struct intel_crtc *intel_crtc; + struct intel_encoder *intel_encoder = + intel_attached_encoder(connector); + struct drm_crtc *possible_crtc; + struct drm_encoder *encoder = &intel_encoder->base; + struct drm_crtc *crtc = NULL; + struct drm_device *dev = encoder->dev; + struct drm_framebuffer *fb; + int i = -1; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", + connector->base.id, drm_get_connector_name(connector), + encoder->base.id, drm_get_encoder_name(encoder)); + + /* + * Algorithm gets a little messy: + * + * - if the connector already has an assigned crtc, use it (but make + * sure it's on first) + * + * - try to find the first unused crtc that can drive this connector, + * and use that if we find one + */ + + /* See if we already have a CRTC for this connector */ + if (encoder->crtc) { + crtc = encoder->crtc; + + mutex_lock(&crtc->mutex); + + old->dpms_mode = connector->dpms; + old->load_detect_temp = false; + + /* Make sure the crtc and connector are running */ + if (connector->dpms != DRM_MODE_DPMS_ON) + connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); + + return true; + } + + /* Find an unused one (if possible) */ + list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { + i++; + if (!(encoder->possible_crtcs & (1 << i))) + continue; + if (!possible_crtc->enabled) { + crtc = possible_crtc; + break; + } + } + + /* + * If we didn't find an unused CRTC, don't use any. + */ + if (!crtc) { + DRM_DEBUG_KMS("no pipe available for load-detect\n"); + return false; + } + + mutex_lock(&crtc->mutex); + intel_encoder->new_crtc = to_intel_crtc(crtc); + to_intel_connector(connector)->new_encoder = intel_encoder; + + intel_crtc = to_intel_crtc(crtc); + old->dpms_mode = connector->dpms; + old->load_detect_temp = true; + old->release_fb = NULL; + + if (!mode) + mode = &load_detect_mode; + + /* We need a framebuffer large enough to accommodate all accesses + * that the plane may generate whilst we perform load detection. + * We can not rely on the fbcon either being present (we get called + * during its initialisation to detect all boot displays, or it may + * not even exist) or that it is large enough to satisfy the + * requested mode. + */ + fb = mode_fits_in_fbdev(dev, mode); + if (fb == NULL) { + DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); + fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); + old->release_fb = fb; + } else + DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); + if (IS_ERR(fb)) { + DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); + mutex_unlock(&crtc->mutex); + return false; + } + + if (intel_set_mode(crtc, mode, 0, 0, fb)) { + DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); + if (old->release_fb) + old->release_fb->funcs->destroy(old->release_fb); + mutex_unlock(&crtc->mutex); + return false; + } + + /* let the connector get through one full cycle before testing */ + intel_wait_for_vblank(dev, intel_crtc->pipe); + return true; +} + +void intel_release_load_detect_pipe(struct drm_connector *connector, + struct intel_load_detect_pipe *old) +{ + struct intel_encoder *intel_encoder = + intel_attached_encoder(connector); + struct drm_encoder *encoder = &intel_encoder->base; + struct drm_crtc *crtc = encoder->crtc; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", + connector->base.id, drm_get_connector_name(connector), + encoder->base.id, drm_get_encoder_name(encoder)); + + if (old->load_detect_temp) { + to_intel_connector(connector)->new_encoder = NULL; + intel_encoder->new_crtc = NULL; + intel_set_mode(crtc, NULL, 0, 0, NULL); + + if (old->release_fb) { + drm_framebuffer_unregister_private(old->release_fb); + drm_framebuffer_unreference(old->release_fb); + } + + mutex_unlock(&crtc->mutex); + return; + } + + /* Switch crtc and encoder back off if necessary */ + if (old->dpms_mode != DRM_MODE_DPMS_ON) + connector->funcs->dpms(connector, old->dpms_mode); + + mutex_unlock(&crtc->mutex); +} + +/* Returns the clock of the currently programmed mode of the given pipe. */ +static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 dpll = I915_READ(DPLL(pipe)); + u32 fp; + intel_clock_t clock; + + if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) + fp = I915_READ(FP0(pipe)); + else + fp = I915_READ(FP1(pipe)); + + clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; + if (IS_PINEVIEW(dev)) { + clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; + clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; + } else { + clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; + clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; + } + + if (!IS_GEN2(dev)) { + if (IS_PINEVIEW(dev)) + clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> + DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); + else + clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> + DPLL_FPA01_P1_POST_DIV_SHIFT); + + switch (dpll & DPLL_MODE_MASK) { + case DPLLB_MODE_DAC_SERIAL: + clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? + 5 : 10; + break; + case DPLLB_MODE_LVDS: + clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? + 7 : 14; + break; + default: + DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " + "mode\n", (int)(dpll & DPLL_MODE_MASK)); + return 0; + } + + /* XXX: Handle the 100Mhz refclk */ + intel_clock(dev, 96000, &clock); + } else { + bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); + + if (is_lvds) { + clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> + DPLL_FPA01_P1_POST_DIV_SHIFT); + clock.p2 = 14; + + if ((dpll & PLL_REF_INPUT_MASK) == + PLLB_REF_INPUT_SPREADSPECTRUMIN) { + /* XXX: might not be 66MHz */ + intel_clock(dev, 66000, &clock); + } else + intel_clock(dev, 48000, &clock); + } else { + if (dpll & PLL_P1_DIVIDE_BY_TWO) + clock.p1 = 2; + else { + clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> + DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; + } + if (dpll & PLL_P2_DIVIDE_BY_4) + clock.p2 = 4; + else + clock.p2 = 2; + + intel_clock(dev, 48000, &clock); + } + } + + /* XXX: It would be nice to validate the clocks, but we can't reuse + * i830PllIsValid() because it relies on the xf86_config connector + * configuration being accurate, which it isn't necessarily. + */ + + return clock.dot; +} + +/** Returns the currently programmed mode of the given pipe. */ +struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, + struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; + struct drm_display_mode *mode; + int htot = I915_READ(HTOTAL(cpu_transcoder)); + int hsync = I915_READ(HSYNC(cpu_transcoder)); + int vtot = I915_READ(VTOTAL(cpu_transcoder)); + int vsync = I915_READ(VSYNC(cpu_transcoder)); + + mode = kzalloc(sizeof(*mode), GFP_KERNEL); + if (!mode) + return NULL; + + mode->clock = intel_crtc_clock_get(dev, crtc); + mode->hdisplay = (htot & 0xffff) + 1; + mode->htotal = ((htot & 0xffff0000) >> 16) + 1; + mode->hsync_start = (hsync & 0xffff) + 1; + mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; + mode->vdisplay = (vtot & 0xffff) + 1; + mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; + mode->vsync_start = (vsync & 0xffff) + 1; + mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; + + drm_mode_set_name(mode); + + return mode; +} + +static void intel_increase_pllclock(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int dpll_reg = DPLL(pipe); + int dpll; + + if (HAS_PCH_SPLIT(dev)) + return; + + if (!dev_priv->lvds_downclock_avail) + return; + + dpll = I915_READ(dpll_reg); + if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { + DRM_DEBUG_DRIVER("upclocking LVDS\n"); + + assert_panel_unlocked(dev_priv, pipe); + + dpll &= ~DISPLAY_RATE_SELECT_FPA1; + I915_WRITE(dpll_reg, dpll); + intel_wait_for_vblank(dev, pipe); + + dpll = I915_READ(dpll_reg); + if (dpll & DISPLAY_RATE_SELECT_FPA1) + DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); + } +} + +static void intel_decrease_pllclock(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + if (HAS_PCH_SPLIT(dev)) + return; + + if (!dev_priv->lvds_downclock_avail) + return; + + /* + * Since this is called by a timer, we should never get here in + * the manual case. + */ + if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { + int pipe = intel_crtc->pipe; + int dpll_reg = DPLL(pipe); + int dpll; + + DRM_DEBUG_DRIVER("downclocking LVDS\n"); + + assert_panel_unlocked(dev_priv, pipe); + + dpll = I915_READ(dpll_reg); + dpll |= DISPLAY_RATE_SELECT_FPA1; + I915_WRITE(dpll_reg, dpll); + intel_wait_for_vblank(dev, pipe); + dpll = I915_READ(dpll_reg); + if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) + DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); + } + +} + +void intel_mark_busy(struct drm_device *dev) +{ + i915_update_gfx_val(dev->dev_private); +} + +void intel_mark_idle(struct drm_device *dev) +{ + struct drm_crtc *crtc; + + if (!i915_powersave) + return; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if (!crtc->fb) + continue; + + intel_decrease_pllclock(crtc); + } +} + +void intel_mark_fb_busy(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + struct drm_crtc *crtc; + + if (!i915_powersave) + return; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if (!crtc->fb) + continue; + + if (to_intel_framebuffer(crtc->fb)->obj == obj) + intel_increase_pllclock(crtc); + } +} + +static void intel_crtc_destroy(struct drm_crtc *crtc) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct intel_unpin_work *work; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + work = intel_crtc->unpin_work; + intel_crtc->unpin_work = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + + if (work) { + cancel_work_sync(&work->work); + kfree(work); + } + + drm_crtc_cleanup(crtc); + + kfree(intel_crtc); +} + +static void intel_unpin_work_fn(struct work_struct *__work) +{ + struct intel_unpin_work *work = + container_of(__work, struct intel_unpin_work, work); + struct drm_device *dev = work->crtc->dev; + + mutex_lock(&dev->struct_mutex); + intel_unpin_fb_obj(work->old_fb_obj); + drm_gem_object_unreference(&work->pending_flip_obj->base); + drm_gem_object_unreference(&work->old_fb_obj->base); + + intel_update_fbc(dev); + mutex_unlock(&dev->struct_mutex); + + BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); + atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); + + kfree(work); +} + +static void do_intel_finish_page_flip(struct drm_device *dev, + struct drm_crtc *crtc) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_unpin_work *work; + unsigned long flags; + + /* Ignore early vblank irqs */ + if (intel_crtc == NULL) + return; + + spin_lock_irqsave(&dev->event_lock, flags); + work = intel_crtc->unpin_work; + + /* Ensure we don't miss a work->pending update ... */ + smp_rmb(); + + if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { + spin_unlock_irqrestore(&dev->event_lock, flags); + return; + } + + /* and that the unpin work is consistent wrt ->pending. */ + smp_rmb(); + + intel_crtc->unpin_work = NULL; + + if (work->event) + drm_send_vblank_event(dev, intel_crtc->pipe, work->event); + + drm_vblank_put(dev, intel_crtc->pipe); + + spin_unlock_irqrestore(&dev->event_lock, flags); + + wake_up_all(&dev_priv->pending_flip_queue); + + queue_work(dev_priv->wq, &work->work); + + trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); +} + +void intel_finish_page_flip(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + + do_intel_finish_page_flip(dev, crtc); +} + +void intel_finish_page_flip_plane(struct drm_device *dev, int plane) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; + + do_intel_finish_page_flip(dev, crtc); +} + +void intel_prepare_page_flip(struct drm_device *dev, int plane) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = + to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); + unsigned long flags; + + /* NB: An MMIO update of the plane base pointer will also + * generate a page-flip completion irq, i.e. every modeset + * is also accompanied by a spurious intel_prepare_page_flip(). + */ + spin_lock_irqsave(&dev->event_lock, flags); + if (intel_crtc->unpin_work) + atomic_inc_not_zero(&intel_crtc->unpin_work->pending); + spin_unlock_irqrestore(&dev->event_lock, flags); +} + +inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) +{ + /* Ensure that the work item is consistent when activating it ... */ + smp_wmb(); + atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING); + /* and that it is marked active as soon as the irq could fire. */ + smp_wmb(); +} + +static int intel_gen2_queue_flip(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + u32 flip_mask; + struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + int ret; + + ret = intel_pin_and_fence_fb_obj(dev, obj, ring); + if (ret) + goto err; + + ret = intel_ring_begin(ring, 6); + if (ret) + goto err_unpin; + + /* Can't queue multiple flips, so wait for the previous + * one to finish before executing the next. + */ + if (intel_crtc->plane) + flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; + else + flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_DISPLAY_FLIP | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); + intel_ring_emit(ring, fb->pitches[0]); + intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); + intel_ring_emit(ring, 0); /* aux display base address, unused */ + + intel_mark_page_flip_active(intel_crtc); + intel_ring_advance(ring); + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: + return ret; +} + +static int intel_gen3_queue_flip(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + u32 flip_mask; + struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + int ret; + + ret = intel_pin_and_fence_fb_obj(dev, obj, ring); + if (ret) + goto err; + + ret = intel_ring_begin(ring, 6); + if (ret) + goto err_unpin; + + if (intel_crtc->plane) + flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; + else + flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); + intel_ring_emit(ring, fb->pitches[0]); + intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); + intel_ring_emit(ring, MI_NOOP); + + intel_mark_page_flip_active(intel_crtc); + intel_ring_advance(ring); + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: + return ret; +} + +static int intel_gen4_queue_flip(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + uint32_t pf, pipesrc; + struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + int ret; + + ret = intel_pin_and_fence_fb_obj(dev, obj, ring); + if (ret) + goto err; + + ret = intel_ring_begin(ring, 4); + if (ret) + goto err_unpin; + + /* i965+ uses the linear or tiled offsets from the + * Display Registers (which do not change across a page-flip) + * so we need only reprogram the base address. + */ + intel_ring_emit(ring, MI_DISPLAY_FLIP | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); + intel_ring_emit(ring, fb->pitches[0]); + intel_ring_emit(ring, + (obj->gtt_offset + intel_crtc->dspaddr_offset) | + obj->tiling_mode); + + /* XXX Enabling the panel-fitter across page-flip is so far + * untested on non-native modes, so ignore it for now. + * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; + */ + pf = 0; + pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; + intel_ring_emit(ring, pf | pipesrc); + + intel_mark_page_flip_active(intel_crtc); + intel_ring_advance(ring); + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: + return ret; +} + +static int intel_gen6_queue_flip(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + uint32_t pf, pipesrc; + int ret; + + ret = intel_pin_and_fence_fb_obj(dev, obj, ring); + if (ret) + goto err; + + ret = intel_ring_begin(ring, 4); + if (ret) + goto err_unpin; + + intel_ring_emit(ring, MI_DISPLAY_FLIP | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); + intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); + intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); + + /* Contrary to the suggestions in the documentation, + * "Enable Panel Fitter" does not seem to be required when page + * flipping with a non-native mode, and worse causes a normal + * modeset to fail. + * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; + */ + pf = 0; + pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; + intel_ring_emit(ring, pf | pipesrc); + + intel_mark_page_flip_active(intel_crtc); + intel_ring_advance(ring); + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: + return ret; +} + +/* + * On gen7 we currently use the blit ring because (in early silicon at least) + * the render ring doesn't give us interrpts for page flip completion, which + * means clients will hang after the first flip is queued. Fortunately the + * blit ring generates interrupts properly, so use it instead. + */ +static int intel_gen7_queue_flip(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; + uint32_t plane_bit = 0; + int ret; + + ret = intel_pin_and_fence_fb_obj(dev, obj, ring); + if (ret) + goto err; + + switch(intel_crtc->plane) { + case PLANE_A: + plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; + break; + case PLANE_B: + plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; + break; + case PLANE_C: + plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; + break; + default: + WARN_ONCE(1, "unknown plane in flip command\n"); + ret = -ENODEV; + goto err_unpin; + } + + ret = intel_ring_begin(ring, 4); + if (ret) + goto err_unpin; + + intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); + intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); + intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); + intel_ring_emit(ring, (MI_NOOP)); + + intel_mark_page_flip_active(intel_crtc); + intel_ring_advance(ring); + return 0; + +err_unpin: + intel_unpin_fb_obj(obj); +err: + return ret; +} + +static int intel_default_queue_flip(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj) +{ + return -ENODEV; +} + +static int intel_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_framebuffer *old_fb = crtc->fb; + struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_unpin_work *work; + unsigned long flags; + int ret; + + /* Can't change pixel format via MI display flips. */ + if (fb->pixel_format != crtc->fb->pixel_format) + return -EINVAL; + + /* + * TILEOFF/LINOFF registers can't be changed via MI display flips. + * Note that pitch changes could also affect these register. + */ + if (INTEL_INFO(dev)->gen > 3 && + (fb->offsets[0] != crtc->fb->offsets[0] || + fb->pitches[0] != crtc->fb->pitches[0])) + return -EINVAL; + + work = kzalloc(sizeof *work, GFP_KERNEL); + if (work == NULL) + return -ENOMEM; + + work->event = event; + work->crtc = crtc; + work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; + INIT_WORK(&work->work, intel_unpin_work_fn); + + ret = drm_vblank_get(dev, intel_crtc->pipe); + if (ret) + goto free_work; + + /* We borrow the event spin lock for protecting unpin_work */ + spin_lock_irqsave(&dev->event_lock, flags); + if (intel_crtc->unpin_work) { + spin_unlock_irqrestore(&dev->event_lock, flags); + kfree(work); + drm_vblank_put(dev, intel_crtc->pipe); + + DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); + return -EBUSY; + } + intel_crtc->unpin_work = work; + spin_unlock_irqrestore(&dev->event_lock, flags); + + if (atomic_read(&intel_crtc->unpin_work_count) >= 2) + flush_workqueue(dev_priv->wq); + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto cleanup; + + /* Reference the objects for the scheduled work. */ + drm_gem_object_reference(&work->old_fb_obj->base); + drm_gem_object_reference(&obj->base); + + crtc->fb = fb; + + work->pending_flip_obj = obj; + + work->enable_stall_check = true; + + atomic_inc(&intel_crtc->unpin_work_count); + intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); + + ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); + if (ret) + goto cleanup_pending; + + intel_disable_fbc(dev); + intel_mark_fb_busy(obj); + mutex_unlock(&dev->struct_mutex); + + trace_i915_flip_request(intel_crtc->plane, obj); + + return 0; + +cleanup_pending: + atomic_dec(&intel_crtc->unpin_work_count); + crtc->fb = old_fb; + drm_gem_object_unreference(&work->old_fb_obj->base); + drm_gem_object_unreference(&obj->base); + mutex_unlock(&dev->struct_mutex); + +cleanup: + spin_lock_irqsave(&dev->event_lock, flags); + intel_crtc->unpin_work = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + + drm_vblank_put(dev, intel_crtc->pipe); +free_work: + kfree(work); + + return ret; +} + +static struct drm_crtc_helper_funcs intel_helper_funcs = { + .mode_set_base_atomic = intel_pipe_set_base_atomic, + .load_lut = intel_crtc_load_lut, +}; + +bool intel_encoder_check_is_cloned(struct intel_encoder *encoder) +{ + struct intel_encoder *other_encoder; + struct drm_crtc *crtc = &encoder->new_crtc->base; + + if (WARN_ON(!crtc)) + return false; + + list_for_each_entry(other_encoder, + &crtc->dev->mode_config.encoder_list, + base.head) { + + if (&other_encoder->new_crtc->base != crtc || + encoder == other_encoder) + continue; + else + return true; + } + + return false; +} + +static bool intel_encoder_crtc_ok(struct drm_encoder *encoder, + struct drm_crtc *crtc) +{ + struct drm_device *dev; + struct drm_crtc *tmp; + int crtc_mask = 1; + + WARN(!crtc, "checking null crtc?\n"); + + dev = crtc->dev; + + list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) { + if (tmp == crtc) + break; + crtc_mask <<= 1; + } + + if (encoder->possible_crtcs & crtc_mask) + return true; + return false; +} + +/** + * intel_modeset_update_staged_output_state + * + * Updates the staged output configuration state, e.g. after we've read out the + * current hw state. + */ +static void intel_modeset_update_staged_output_state(struct drm_device *dev) +{ + struct intel_encoder *encoder; + struct intel_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + connector->new_encoder = + to_intel_encoder(connector->base.encoder); + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + encoder->new_crtc = + to_intel_crtc(encoder->base.crtc); + } +} + +/** + * intel_modeset_commit_output_state + * + * This function copies the stage display pipe configuration to the real one. + */ +static void intel_modeset_commit_output_state(struct drm_device *dev) +{ + struct intel_encoder *encoder; + struct intel_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + connector->base.encoder = &connector->new_encoder->base; + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + encoder->base.crtc = &encoder->new_crtc->base; + } +} + +static int +pipe_config_set_bpp(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct intel_crtc_config *pipe_config) +{ + struct drm_device *dev = crtc->dev; + struct drm_connector *connector; + int bpp; + + switch (fb->pixel_format) { + case DRM_FORMAT_C8: + bpp = 8*3; /* since we go through a colormap */ + break; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_ARGB1555: + /* checked in intel_framebuffer_init already */ + if (WARN_ON(INTEL_INFO(dev)->gen > 3)) + return -EINVAL; + case DRM_FORMAT_RGB565: + bpp = 6*3; /* min is 18bpp */ + break; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + /* checked in intel_framebuffer_init already */ + if (WARN_ON(INTEL_INFO(dev)->gen < 4)) + return -EINVAL; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + bpp = 8*3; + break; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ABGR2101010: + /* checked in intel_framebuffer_init already */ + if (WARN_ON(INTEL_INFO(dev)->gen < 4)) + return -EINVAL; + bpp = 10*3; + break; + /* TODO: gen4+ supports 16 bpc floating point, too. */ + default: + DRM_DEBUG_KMS("unsupported depth\n"); + return -EINVAL; + } + + pipe_config->pipe_bpp = bpp; + + /* Clamp display bpp to EDID value */ + list_for_each_entry(connector, &dev->mode_config.connector_list, + head) { + if (connector->encoder && connector->encoder->crtc != crtc) + continue; + + /* Don't use an invalid EDID bpc value */ + if (connector->display_info.bpc && + connector->display_info.bpc * 3 < bpp) { + DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", + bpp, connector->display_info.bpc*3); + pipe_config->pipe_bpp = connector->display_info.bpc*3; + } + } + + return bpp; +} + +static struct intel_crtc_config * +intel_modeset_pipe_config(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct drm_encoder_helper_funcs *encoder_funcs; + struct intel_encoder *encoder; + struct intel_crtc_config *pipe_config; + int plane_bpp; + + pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); + if (!pipe_config) + return ERR_PTR(-ENOMEM); + + drm_mode_copy(&pipe_config->adjusted_mode, mode); + drm_mode_copy(&pipe_config->requested_mode, mode); + + plane_bpp = pipe_config_set_bpp(crtc, fb, pipe_config); + if (plane_bpp < 0) + goto fail; + + /* Pass our mode to the connectors and the CRTC to give them a chance to + * adjust it according to limitations or connector properties, and also + * a chance to reject the mode entirely. + */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + + if (&encoder->new_crtc->base != crtc) + continue; + + if (encoder->compute_config) { + if (!(encoder->compute_config(encoder, pipe_config))) { + DRM_DEBUG_KMS("Encoder config failure\n"); + goto fail; + } + + continue; + } + + encoder_funcs = encoder->base.helper_private; + if (!(encoder_funcs->mode_fixup(&encoder->base, + &pipe_config->requested_mode, + &pipe_config->adjusted_mode))) { + DRM_DEBUG_KMS("Encoder fixup failed\n"); + goto fail; + } + } + + if (!(intel_crtc_compute_config(crtc, pipe_config))) { + DRM_DEBUG_KMS("CRTC fixup failed\n"); + goto fail; + } + DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); + + pipe_config->dither = pipe_config->pipe_bpp != plane_bpp; + DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n", + plane_bpp, pipe_config->pipe_bpp, pipe_config->dither); + + return pipe_config; +fail: + kfree(pipe_config); + return ERR_PTR(-EINVAL); +} + +/* Computes which crtcs are affected and sets the relevant bits in the mask. For + * simplicity we use the crtc's pipe number (because it's easier to obtain). */ +static void +intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, + unsigned *prepare_pipes, unsigned *disable_pipes) +{ + struct intel_crtc *intel_crtc; + struct drm_device *dev = crtc->dev; + struct intel_encoder *encoder; + struct intel_connector *connector; + struct drm_crtc *tmp_crtc; + + *disable_pipes = *modeset_pipes = *prepare_pipes = 0; + + /* Check which crtcs have changed outputs connected to them, these need + * to be part of the prepare_pipes mask. We don't (yet) support global + * modeset across multiple crtcs, so modeset_pipes will only have one + * bit set at most. */ + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + if (connector->base.encoder == &connector->new_encoder->base) + continue; + + if (connector->base.encoder) { + tmp_crtc = connector->base.encoder->crtc; + + *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; + } + + if (connector->new_encoder) + *prepare_pipes |= + 1 << connector->new_encoder->new_crtc->pipe; + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + if (encoder->base.crtc == &encoder->new_crtc->base) + continue; + + if (encoder->base.crtc) { + tmp_crtc = encoder->base.crtc; + + *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; + } + + if (encoder->new_crtc) + *prepare_pipes |= 1 << encoder->new_crtc->pipe; + } + + /* Check for any pipes that will be fully disabled ... */ + list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, + base.head) { + bool used = false; + + /* Don't try to disable disabled crtcs. */ + if (!intel_crtc->base.enabled) + continue; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + if (encoder->new_crtc == intel_crtc) + used = true; + } + + if (!used) + *disable_pipes |= 1 << intel_crtc->pipe; + } + + + /* set_mode is also used to update properties on life display pipes. */ + intel_crtc = to_intel_crtc(crtc); + if (crtc->enabled) + *prepare_pipes |= 1 << intel_crtc->pipe; + + /* + * For simplicity do a full modeset on any pipe where the output routing + * changed. We could be more clever, but that would require us to be + * more careful with calling the relevant encoder->mode_set functions. + */ + if (*prepare_pipes) + *modeset_pipes = *prepare_pipes; + + /* ... and mask these out. */ + *modeset_pipes &= ~(*disable_pipes); + *prepare_pipes &= ~(*disable_pipes); + + /* + * HACK: We don't (yet) fully support global modesets. intel_set_config + * obies this rule, but the modeset restore mode of + * intel_modeset_setup_hw_state does not. + */ + *modeset_pipes &= 1 << intel_crtc->pipe; + *prepare_pipes &= 1 << intel_crtc->pipe; +} + +static bool intel_crtc_in_use(struct drm_crtc *crtc) +{ + struct drm_encoder *encoder; + struct drm_device *dev = crtc->dev; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) + if (encoder->crtc == crtc) + return true; + + return false; +} + +static void +intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) +{ + struct intel_encoder *intel_encoder; + struct intel_crtc *intel_crtc; + struct drm_connector *connector; + + list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list, + base.head) { + if (!intel_encoder->base.crtc) + continue; + + intel_crtc = to_intel_crtc(intel_encoder->base.crtc); + + if (prepare_pipes & (1 << intel_crtc->pipe)) + intel_encoder->connectors_active = false; + } + + intel_modeset_commit_output_state(dev); + + /* Update computed state. */ + list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, + base.head) { + intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base); + } + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (!connector->encoder || !connector->encoder->crtc) + continue; + + intel_crtc = to_intel_crtc(connector->encoder->crtc); + + if (prepare_pipes & (1 << intel_crtc->pipe)) { + struct drm_property *dpms_property = + dev->mode_config.dpms_property; + + connector->dpms = DRM_MODE_DPMS_ON; + drm_object_property_set_value(&connector->base, + dpms_property, + DRM_MODE_DPMS_ON); + + intel_encoder = to_intel_encoder(connector->encoder); + intel_encoder->connectors_active = true; + } + } + +} + +#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ + list_for_each_entry((intel_crtc), \ + &(dev)->mode_config.crtc_list, \ + base.head) \ + if (mask & (1 <<(intel_crtc)->pipe)) \ + +static bool +intel_pipe_config_compare(struct intel_crtc_config *current_config, + struct intel_crtc_config *pipe_config) +{ + if (current_config->has_pch_encoder != pipe_config->has_pch_encoder) { + DRM_ERROR("mismatch in has_pch_encoder " + "(expected %i, found %i)\n", + current_config->has_pch_encoder, + pipe_config->has_pch_encoder); + return false; + } + + return true; +} + +void +intel_modeset_check_state(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_crtc *crtc; + struct intel_encoder *encoder; + struct intel_connector *connector; + struct intel_crtc_config pipe_config; + + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + /* This also checks the encoder/connector hw state with the + * ->get_hw_state callbacks. */ + intel_connector_check_state(connector); + + WARN(&connector->new_encoder->base != connector->base.encoder, + "connector's staged encoder doesn't match current encoder\n"); + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + bool enabled = false; + bool active = false; + enum pipe pipe, tracked_pipe; + + DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", + encoder->base.base.id, + drm_get_encoder_name(&encoder->base)); + + WARN(&encoder->new_crtc->base != encoder->base.crtc, + "encoder's stage crtc doesn't match current crtc\n"); + WARN(encoder->connectors_active && !encoder->base.crtc, + "encoder's active_connectors set, but no crtc\n"); + + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + if (connector->base.encoder != &encoder->base) + continue; + enabled = true; + if (connector->base.dpms != DRM_MODE_DPMS_OFF) + active = true; + } + WARN(!!encoder->base.crtc != enabled, + "encoder's enabled state mismatch " + "(expected %i, found %i)\n", + !!encoder->base.crtc, enabled); + WARN(active && !encoder->base.crtc, + "active encoder with no crtc\n"); + + WARN(encoder->connectors_active != active, + "encoder's computed active state doesn't match tracked active state " + "(expected %i, found %i)\n", active, encoder->connectors_active); + + active = encoder->get_hw_state(encoder, &pipe); + WARN(active != encoder->connectors_active, + "encoder's hw state doesn't match sw tracking " + "(expected %i, found %i)\n", + encoder->connectors_active, active); + + if (!encoder->base.crtc) + continue; + + tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe; + WARN(active && pipe != tracked_pipe, + "active encoder's pipe doesn't match" + "(expected %i, found %i)\n", + tracked_pipe, pipe); + + } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, + base.head) { + bool enabled = false; + bool active = false; + + DRM_DEBUG_KMS("[CRTC:%d]\n", + crtc->base.base.id); + + WARN(crtc->active && !crtc->base.enabled, + "active crtc, but not enabled in sw tracking\n"); + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + if (encoder->base.crtc != &crtc->base) + continue; + enabled = true; + if (encoder->connectors_active) + active = true; + } + WARN(active != crtc->active, + "crtc's computed active state doesn't match tracked active state " + "(expected %i, found %i)\n", active, crtc->active); + WARN(enabled != crtc->base.enabled, + "crtc's computed enabled state doesn't match tracked enabled state " + "(expected %i, found %i)\n", enabled, crtc->base.enabled); + + memset(&pipe_config, 0, sizeof(pipe_config)); + active = dev_priv->display.get_pipe_config(crtc, + &pipe_config); + + /* hw state is inconsistent with the pipe A quirk */ + if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) + active = crtc->active; + + WARN(crtc->active != active, + "crtc active state doesn't match with hw state " + "(expected %i, found %i)\n", crtc->active, active); + + WARN(active && + !intel_pipe_config_compare(&crtc->config, &pipe_config), + "pipe state doesn't match!\n"); + } +} + +static int __intel_set_mode(struct drm_crtc *crtc, + struct drm_display_mode *mode, + int x, int y, struct drm_framebuffer *fb) +{ + struct drm_device *dev = crtc->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_display_mode *saved_mode, *saved_hwmode; + struct intel_crtc_config *pipe_config = NULL; + struct intel_crtc *intel_crtc; + unsigned disable_pipes, prepare_pipes, modeset_pipes; + int ret = 0; + + saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL); + if (!saved_mode) + return -ENOMEM; + saved_hwmode = saved_mode + 1; + + intel_modeset_affected_pipes(crtc, &modeset_pipes, + &prepare_pipes, &disable_pipes); + + *saved_hwmode = crtc->hwmode; + *saved_mode = crtc->mode; + + /* Hack: Because we don't (yet) support global modeset on multiple + * crtcs, we don't keep track of the new mode for more than one crtc. + * Hence simply check whether any bit is set in modeset_pipes in all the + * pieces of code that are not yet converted to deal with mutliple crtcs + * changing their mode at the same time. */ + if (modeset_pipes) { + pipe_config = intel_modeset_pipe_config(crtc, fb, mode); + if (IS_ERR(pipe_config)) { + ret = PTR_ERR(pipe_config); + pipe_config = NULL; + + goto out; + } + } + + DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n", + modeset_pipes, prepare_pipes, disable_pipes); + + for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) + intel_crtc_disable(&intel_crtc->base); + + for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { + if (intel_crtc->base.enabled) + dev_priv->display.crtc_disable(&intel_crtc->base); + } + + /* crtc->mode is already used by the ->mode_set callbacks, hence we need + * to set it here already despite that we pass it down the callchain. + */ + if (modeset_pipes) { + enum transcoder tmp = to_intel_crtc(crtc)->config.cpu_transcoder; + crtc->mode = *mode; + /* mode_set/enable/disable functions rely on a correct pipe + * config. */ + to_intel_crtc(crtc)->config = *pipe_config; + to_intel_crtc(crtc)->config.cpu_transcoder = tmp; + } + + /* Only after disabling all output pipelines that will be changed can we + * update the the output configuration. */ + intel_modeset_update_state(dev, prepare_pipes); + + if (dev_priv->display.modeset_global_resources) + dev_priv->display.modeset_global_resources(dev); + + /* Set up the DPLL and any encoders state that needs to adjust or depend + * on the DPLL. + */ + for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { + ret = intel_crtc_mode_set(&intel_crtc->base, + x, y, fb); + if (ret) + goto done; + } + + /* Now enable the clocks, plane, pipe, and connectors that we set up. */ + for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) + dev_priv->display.crtc_enable(&intel_crtc->base); + + if (modeset_pipes) { + /* Store real post-adjustment hardware mode. */ + crtc->hwmode = pipe_config->adjusted_mode; + + /* Calculate and store various constants which + * are later needed by vblank and swap-completion + * timestamping. They are derived from true hwmode. + */ + drm_calc_timestamping_constants(crtc); + } + + /* FIXME: add subpixel order */ +done: + if (ret && crtc->enabled) { + crtc->hwmode = *saved_hwmode; + crtc->mode = *saved_mode; + } + +out: + kfree(pipe_config); + kfree(saved_mode); + return ret; +} + +int intel_set_mode(struct drm_crtc *crtc, + struct drm_display_mode *mode, + int x, int y, struct drm_framebuffer *fb) +{ + int ret; + + ret = __intel_set_mode(crtc, mode, x, y, fb); + + if (ret == 0) + intel_modeset_check_state(crtc->dev); + + return ret; +} + +void intel_crtc_restore_mode(struct drm_crtc *crtc) +{ + intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); +} + +#undef for_each_intel_crtc_masked + +static void intel_set_config_free(struct intel_set_config *config) +{ + if (!config) + return; + + kfree(config->save_connector_encoders); + kfree(config->save_encoder_crtcs); + kfree(config); +} + +static int intel_set_config_save_state(struct drm_device *dev, + struct intel_set_config *config) +{ + struct drm_encoder *encoder; + struct drm_connector *connector; + int count; + + config->save_encoder_crtcs = + kcalloc(dev->mode_config.num_encoder, + sizeof(struct drm_crtc *), GFP_KERNEL); + if (!config->save_encoder_crtcs) + return -ENOMEM; + + config->save_connector_encoders = + kcalloc(dev->mode_config.num_connector, + sizeof(struct drm_encoder *), GFP_KERNEL); + if (!config->save_connector_encoders) + return -ENOMEM; + + /* Copy data. Note that driver private data is not affected. + * Should anything bad happen only the expected state is + * restored, not the drivers personal bookkeeping. + */ + count = 0; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + config->save_encoder_crtcs[count++] = encoder->crtc; + } + + count = 0; + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + config->save_connector_encoders[count++] = connector->encoder; + } + + return 0; +} + +static void intel_set_config_restore_state(struct drm_device *dev, + struct intel_set_config *config) +{ + struct intel_encoder *encoder; + struct intel_connector *connector; + int count; + + count = 0; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + encoder->new_crtc = + to_intel_crtc(config->save_encoder_crtcs[count++]); + } + + count = 0; + list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { + connector->new_encoder = + to_intel_encoder(config->save_connector_encoders[count++]); + } +} + +static bool +is_crtc_connector_off(struct drm_mode_set *set) +{ + int i; + + if (set->num_connectors == 0) + return false; + + if (WARN_ON(set->connectors == NULL)) + return false; + + for (i = 0; i < set->num_connectors; i++) + if (set->connectors[i]->encoder && + set->connectors[i]->encoder->crtc == set->crtc && + set->connectors[i]->dpms != DRM_MODE_DPMS_ON) + return true; + + return false; +} + +static void +intel_set_config_compute_mode_changes(struct drm_mode_set *set, + struct intel_set_config *config) +{ + + /* We should be able to check here if the fb has the same properties + * and then just flip_or_move it */ + if (is_crtc_connector_off(set)) { + config->mode_changed = true; + } else if (set->crtc->fb != set->fb) { + /* If we have no fb then treat it as a full mode set */ + if (set->crtc->fb == NULL) { + DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); + config->mode_changed = true; + } else if (set->fb == NULL) { + config->mode_changed = true; + } else if (set->fb->pixel_format != + set->crtc->fb->pixel_format) { + config->mode_changed = true; + } else { + config->fb_changed = true; + } + } + + if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y)) + config->fb_changed = true; + + if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { + DRM_DEBUG_KMS("modes are different, full mode set\n"); + drm_mode_debug_printmodeline(&set->crtc->mode); + drm_mode_debug_printmodeline(set->mode); + config->mode_changed = true; + } +} + +static int +intel_modeset_stage_output_state(struct drm_device *dev, + struct drm_mode_set *set, + struct intel_set_config *config) +{ + struct drm_crtc *new_crtc; + struct intel_connector *connector; + struct intel_encoder *encoder; + int count, ro; + + /* The upper layers ensure that we either disable a crtc or have a list + * of connectors. For paranoia, double-check this. */ + WARN_ON(!set->fb && (set->num_connectors != 0)); + WARN_ON(set->fb && (set->num_connectors == 0)); + + count = 0; + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + /* Otherwise traverse passed in connector list and get encoders + * for them. */ + for (ro = 0; ro < set->num_connectors; ro++) { + if (set->connectors[ro] == &connector->base) { + connector->new_encoder = connector->encoder; + break; + } + } + + /* If we disable the crtc, disable all its connectors. Also, if + * the connector is on the changing crtc but not on the new + * connector list, disable it. */ + if ((!set->fb || ro == set->num_connectors) && + connector->base.encoder && + connector->base.encoder->crtc == set->crtc) { + connector->new_encoder = NULL; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", + connector->base.base.id, + drm_get_connector_name(&connector->base)); + } + + + if (&connector->new_encoder->base != connector->base.encoder) { + DRM_DEBUG_KMS("encoder changed, full mode switch\n"); + config->mode_changed = true; + } + } + /* connector->new_encoder is now updated for all connectors. */ + + /* Update crtc of enabled connectors. */ + count = 0; + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + if (!connector->new_encoder) + continue; + + new_crtc = connector->new_encoder->base.crtc; + + for (ro = 0; ro < set->num_connectors; ro++) { + if (set->connectors[ro] == &connector->base) + new_crtc = set->crtc; + } + + /* Make sure the new CRTC will work with the encoder */ + if (!intel_encoder_crtc_ok(&connector->new_encoder->base, + new_crtc)) { + return -EINVAL; + } + connector->encoder->new_crtc = to_intel_crtc(new_crtc); + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", + connector->base.base.id, + drm_get_connector_name(&connector->base), + new_crtc->base.id); + } + + /* Check for any encoders that needs to be disabled. */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + list_for_each_entry(connector, + &dev->mode_config.connector_list, + base.head) { + if (connector->new_encoder == encoder) { + WARN_ON(!connector->new_encoder->new_crtc); + + goto next_encoder; + } + } + encoder->new_crtc = NULL; +next_encoder: + /* Only now check for crtc changes so we don't miss encoders + * that will be disabled. */ + if (&encoder->new_crtc->base != encoder->base.crtc) { + DRM_DEBUG_KMS("crtc changed, full mode switch\n"); + config->mode_changed = true; + } + } + /* Now we've also updated encoder->new_crtc for all encoders. */ + + return 0; +} + +static int intel_crtc_set_config(struct drm_mode_set *set) +{ + struct drm_device *dev; + struct drm_mode_set save_set; + struct intel_set_config *config; + int ret; + + BUG_ON(!set); + BUG_ON(!set->crtc); + BUG_ON(!set->crtc->helper_private); + + /* Enforce sane interface api - has been abused by the fb helper. */ + BUG_ON(!set->mode && set->fb); + BUG_ON(set->fb && set->num_connectors == 0); + + if (set->fb) { + DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", + set->crtc->base.id, set->fb->base.id, + (int)set->num_connectors, set->x, set->y); + } else { + DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); + } + + dev = set->crtc->dev; + + ret = -ENOMEM; + config = kzalloc(sizeof(*config), GFP_KERNEL); + if (!config) + goto out_config; + + ret = intel_set_config_save_state(dev, config); + if (ret) + goto out_config; + + save_set.crtc = set->crtc; + save_set.mode = &set->crtc->mode; + save_set.x = set->crtc->x; + save_set.y = set->crtc->y; + save_set.fb = set->crtc->fb; + + /* Compute whether we need a full modeset, only an fb base update or no + * change at all. In the future we might also check whether only the + * mode changed, e.g. for LVDS where we only change the panel fitter in + * such cases. */ + intel_set_config_compute_mode_changes(set, config); + + ret = intel_modeset_stage_output_state(dev, set, config); + if (ret) + goto fail; + + if (config->mode_changed) { + if (set->mode) { + DRM_DEBUG_KMS("attempting to set mode from" + " userspace\n"); + drm_mode_debug_printmodeline(set->mode); + } + + ret = intel_set_mode(set->crtc, set->mode, + set->x, set->y, set->fb); + } else if (config->fb_changed) { + intel_crtc_wait_for_pending_flips(set->crtc); + + ret = intel_pipe_set_base(set->crtc, + set->x, set->y, set->fb); + } + + if (ret) { + DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n", + set->crtc->base.id, ret); +fail: + intel_set_config_restore_state(dev, config); + + /* Try to restore the config */ + if (config->mode_changed && + intel_set_mode(save_set.crtc, save_set.mode, + save_set.x, save_set.y, save_set.fb)) + DRM_ERROR("failed to restore config after modeset failure\n"); + } + +out_config: + intel_set_config_free(config); + return ret; +} + +static const struct drm_crtc_funcs intel_crtc_funcs = { + .cursor_set = intel_crtc_cursor_set, + .cursor_move = intel_crtc_cursor_move, + .gamma_set = intel_crtc_gamma_set, + .set_config = intel_crtc_set_config, + .destroy = intel_crtc_destroy, + .page_flip = intel_crtc_page_flip, +}; + +static void intel_cpu_pll_init(struct drm_device *dev) +{ + if (HAS_DDI(dev)) + intel_ddi_pll_init(dev); +} + +static void intel_pch_pll_init(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int i; + + if (dev_priv->num_pch_pll == 0) { + DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n"); + return; + } + + for (i = 0; i < dev_priv->num_pch_pll; i++) { + dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i); + dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i); + dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i); + } +} + +static void intel_crtc_init(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc; + int i; + + intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); + if (intel_crtc == NULL) + return; + + drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); + + drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); + for (i = 0; i < 256; i++) { + intel_crtc->lut_r[i] = i; + intel_crtc->lut_g[i] = i; + intel_crtc->lut_b[i] = i; + } + + /* Swap pipes & planes for FBC on pre-965 */ + intel_crtc->pipe = pipe; + intel_crtc->plane = pipe; + intel_crtc->config.cpu_transcoder = pipe; + if (IS_MOBILE(dev) && IS_GEN3(dev)) { + DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); + intel_crtc->plane = !pipe; + } + + BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || + dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); + dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; + dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; + + drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); +} + +int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; + struct drm_mode_object *drmmode_obj; + struct intel_crtc *crtc; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, + DRM_MODE_OBJECT_CRTC); + + if (!drmmode_obj) { + DRM_ERROR("no such CRTC id\n"); + return -EINVAL; + } + + crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); + pipe_from_crtc_id->pipe = crtc->pipe; + + return 0; +} + +static int intel_encoder_clones(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct intel_encoder *source_encoder; + int index_mask = 0; + int entry = 0; + + list_for_each_entry(source_encoder, + &dev->mode_config.encoder_list, base.head) { + + if (encoder == source_encoder) + index_mask |= (1 << entry); + + /* Intel hw has only one MUX where enocoders could be cloned. */ + if (encoder->cloneable && source_encoder->cloneable) + index_mask |= (1 << entry); + + entry++; + } + + return index_mask; +} + +static bool has_edp_a(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!IS_MOBILE(dev)) + return false; + + if ((I915_READ(DP_A) & DP_DETECTED) == 0) + return false; + + if (IS_GEN5(dev) && + (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) + return false; + + return true; +} + +static void intel_setup_outputs(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_encoder *encoder; + bool dpd_is_edp = false; + bool has_lvds; + + has_lvds = intel_lvds_init(dev); + if (!has_lvds && !HAS_PCH_SPLIT(dev)) { + /* disable the panel fitter on everything but LVDS */ + I915_WRITE(PFIT_CONTROL, 0); + } + + if (!IS_ULT(dev)) + intel_crt_init(dev); + + if (HAS_DDI(dev)) { + int found; + + /* Haswell uses DDI functions to detect digital outputs */ + found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; + /* DDI A only supports eDP */ + if (found) + intel_ddi_init(dev, PORT_A); + + /* DDI B, C and D detection is indicated by the SFUSE_STRAP + * register */ + found = I915_READ(SFUSE_STRAP); + + if (found & SFUSE_STRAP_DDIB_DETECTED) + intel_ddi_init(dev, PORT_B); + if (found & SFUSE_STRAP_DDIC_DETECTED) + intel_ddi_init(dev, PORT_C); + if (found & SFUSE_STRAP_DDID_DETECTED) + intel_ddi_init(dev, PORT_D); + } else if (HAS_PCH_SPLIT(dev)) { + int found; + dpd_is_edp = intel_dpd_is_edp(dev); + + if (has_edp_a(dev)) + intel_dp_init(dev, DP_A, PORT_A); + + if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { + /* PCH SDVOB multiplex with HDMIB */ + found = intel_sdvo_init(dev, PCH_SDVOB, true); + if (!found) + intel_hdmi_init(dev, PCH_HDMIB, PORT_B); + if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) + intel_dp_init(dev, PCH_DP_B, PORT_B); + } + + if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) + intel_hdmi_init(dev, PCH_HDMIC, PORT_C); + + if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) + intel_hdmi_init(dev, PCH_HDMID, PORT_D); + + if (I915_READ(PCH_DP_C) & DP_DETECTED) + intel_dp_init(dev, PCH_DP_C, PORT_C); + + if (I915_READ(PCH_DP_D) & DP_DETECTED) + intel_dp_init(dev, PCH_DP_D, PORT_D); + } else if (IS_VALLEYVIEW(dev)) { + /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ + if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) + intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); + + if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { + intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, + PORT_B); + if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) + intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); + } + } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { + bool found = false; + + if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { + DRM_DEBUG_KMS("probing SDVOB\n"); + found = intel_sdvo_init(dev, GEN3_SDVOB, true); + if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { + DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); + intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); + } + + if (!found && SUPPORTS_INTEGRATED_DP(dev)) { + DRM_DEBUG_KMS("probing DP_B\n"); + intel_dp_init(dev, DP_B, PORT_B); + } + } + + /* Before G4X SDVOC doesn't have its own detect register */ + + if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { + DRM_DEBUG_KMS("probing SDVOC\n"); + found = intel_sdvo_init(dev, GEN3_SDVOC, false); + } + + if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { + + if (SUPPORTS_INTEGRATED_HDMI(dev)) { + DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); + intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); + } + if (SUPPORTS_INTEGRATED_DP(dev)) { + DRM_DEBUG_KMS("probing DP_C\n"); + intel_dp_init(dev, DP_C, PORT_C); + } + } + + if (SUPPORTS_INTEGRATED_DP(dev) && + (I915_READ(DP_D) & DP_DETECTED)) { + DRM_DEBUG_KMS("probing DP_D\n"); + intel_dp_init(dev, DP_D, PORT_D); + } + } else if (IS_GEN2(dev)) + intel_dvo_init(dev); + + if (SUPPORTS_TV(dev)) + intel_tv_init(dev); + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + encoder->base.possible_crtcs = encoder->crtc_mask; + encoder->base.possible_clones = + intel_encoder_clones(encoder); + } + + intel_init_pch_refclk(dev); + + drm_helper_move_panel_connectors_to_head(dev); +} + +static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) +{ + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + + drm_framebuffer_cleanup(fb); + drm_gem_object_unreference_unlocked(&intel_fb->obj->base); + + kfree(intel_fb); +} + +static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, + struct drm_file *file, + unsigned int *handle) +{ + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + + return drm_gem_handle_create(file, &obj->base, handle); +} + +static const struct drm_framebuffer_funcs intel_fb_funcs = { + .destroy = intel_user_framebuffer_destroy, + .create_handle = intel_user_framebuffer_create_handle, +}; + +int intel_framebuffer_init(struct drm_device *dev, + struct intel_framebuffer *intel_fb, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_i915_gem_object *obj) +{ + int ret; + + if (obj->tiling_mode == I915_TILING_Y) { + DRM_DEBUG("hardware does not support tiling Y\n"); + return -EINVAL; + } + + if (mode_cmd->pitches[0] & 63) { + DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n", + mode_cmd->pitches[0]); + return -EINVAL; + } + + /* FIXME <= Gen4 stride limits are bit unclear */ + if (mode_cmd->pitches[0] > 32768) { + DRM_DEBUG("pitch (%d) must be at less than 32768\n", + mode_cmd->pitches[0]); + return -EINVAL; + } + + if (obj->tiling_mode != I915_TILING_NONE && + mode_cmd->pitches[0] != obj->stride) { + DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", + mode_cmd->pitches[0], obj->stride); + return -EINVAL; + } + + /* Reject formats not supported by any plane early. */ + switch (mode_cmd->pixel_format) { + case DRM_FORMAT_C8: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + break; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_ARGB1555: + if (INTEL_INFO(dev)->gen > 3) { + DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); + return -EINVAL; + } + break; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ABGR2101010: + if (INTEL_INFO(dev)->gen < 4) { + DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); + return -EINVAL; + } + break; + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_VYUY: + if (INTEL_INFO(dev)->gen < 5) { + DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); + return -EINVAL; + } + break; + default: + DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format); + return -EINVAL; + } + + /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ + if (mode_cmd->offsets[0] != 0) + return -EINVAL; + + drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); + intel_fb->obj = obj; + + ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); + if (ret) { + DRM_ERROR("framebuffer init failed %d\n", ret); + return ret; + } + + return 0; +} + +static struct drm_framebuffer * +intel_user_framebuffer_create(struct drm_device *dev, + struct drm_file *filp, + struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct drm_i915_gem_object *obj; + + obj = to_intel_bo(drm_gem_object_lookup(dev, filp, + mode_cmd->handles[0])); + if (&obj->base == NULL) + return ERR_PTR(-ENOENT); + + return intel_framebuffer_create(dev, mode_cmd, obj); +} + +static const struct drm_mode_config_funcs intel_mode_funcs = { + .fb_create = intel_user_framebuffer_create, + .output_poll_changed = intel_fb_output_poll_changed, +}; + +/* Set up chip specific display functions */ +static void intel_init_display(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (HAS_DDI(dev)) { + dev_priv->display.get_pipe_config = haswell_get_pipe_config; + dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; + dev_priv->display.crtc_enable = haswell_crtc_enable; + dev_priv->display.crtc_disable = haswell_crtc_disable; + dev_priv->display.off = haswell_crtc_off; + dev_priv->display.update_plane = ironlake_update_plane; + } else if (HAS_PCH_SPLIT(dev)) { + dev_priv->display.get_pipe_config = ironlake_get_pipe_config; + dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; + dev_priv->display.crtc_enable = ironlake_crtc_enable; + dev_priv->display.crtc_disable = ironlake_crtc_disable; + dev_priv->display.off = ironlake_crtc_off; + dev_priv->display.update_plane = ironlake_update_plane; + } else { + dev_priv->display.get_pipe_config = i9xx_get_pipe_config; + dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; + dev_priv->display.crtc_enable = i9xx_crtc_enable; + dev_priv->display.crtc_disable = i9xx_crtc_disable; + dev_priv->display.off = i9xx_crtc_off; + dev_priv->display.update_plane = i9xx_update_plane; + } + + /* Returns the core display clock speed */ + if (IS_VALLEYVIEW(dev)) + dev_priv->display.get_display_clock_speed = + valleyview_get_display_clock_speed; + else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) + dev_priv->display.get_display_clock_speed = + i945_get_display_clock_speed; + else if (IS_I915G(dev)) + dev_priv->display.get_display_clock_speed = + i915_get_display_clock_speed; + else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) + dev_priv->display.get_display_clock_speed = + i9xx_misc_get_display_clock_speed; + else if (IS_I915GM(dev)) + dev_priv->display.get_display_clock_speed = + i915gm_get_display_clock_speed; + else if (IS_I865G(dev)) + dev_priv->display.get_display_clock_speed = + i865_get_display_clock_speed; + else if (IS_I85X(dev)) + dev_priv->display.get_display_clock_speed = + i855_get_display_clock_speed; + else /* 852, 830 */ + dev_priv->display.get_display_clock_speed = + i830_get_display_clock_speed; + + if (HAS_PCH_SPLIT(dev)) { + if (IS_GEN5(dev)) { + dev_priv->display.fdi_link_train = ironlake_fdi_link_train; + dev_priv->display.write_eld = ironlake_write_eld; + } else if (IS_GEN6(dev)) { + dev_priv->display.fdi_link_train = gen6_fdi_link_train; + dev_priv->display.write_eld = ironlake_write_eld; + } else if (IS_IVYBRIDGE(dev)) { + /* FIXME: detect B0+ stepping and use auto training */ + dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; + dev_priv->display.write_eld = ironlake_write_eld; + dev_priv->display.modeset_global_resources = + ivb_modeset_global_resources; + } else if (IS_HASWELL(dev)) { + dev_priv->display.fdi_link_train = hsw_fdi_link_train; + dev_priv->display.write_eld = haswell_write_eld; + dev_priv->display.modeset_global_resources = + haswell_modeset_global_resources; + } + } else if (IS_G4X(dev)) { + dev_priv->display.write_eld = g4x_write_eld; + } + + /* Default just returns -ENODEV to indicate unsupported */ + dev_priv->display.queue_flip = intel_default_queue_flip; + + switch (INTEL_INFO(dev)->gen) { + case 2: + dev_priv->display.queue_flip = intel_gen2_queue_flip; + break; + + case 3: + dev_priv->display.queue_flip = intel_gen3_queue_flip; + break; + + case 4: + case 5: + dev_priv->display.queue_flip = intel_gen4_queue_flip; + break; + + case 6: + dev_priv->display.queue_flip = intel_gen6_queue_flip; + break; + case 7: + dev_priv->display.queue_flip = intel_gen7_queue_flip; + break; + } +} + +/* + * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, + * resume, or other times. This quirk makes sure that's the case for + * affected systems. + */ +static void quirk_pipea_force(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + dev_priv->quirks |= QUIRK_PIPEA_FORCE; + DRM_INFO("applying pipe a force quirk\n"); +} + +/* + * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason + */ +static void quirk_ssc_force_disable(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; + DRM_INFO("applying lvds SSC disable quirk\n"); +} + +/* + * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight + * brightness value + */ +static void quirk_invert_brightness(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; + DRM_INFO("applying inverted panel brightness quirk\n"); +} + +/* + * Some machines (Dell XPS13) suffer broken backlight controls if + * BLM_PCH_PWM_ENABLE is set. + */ +static void quirk_no_pcm_pwm_enable(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE; + DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n"); +} + +struct intel_quirk { + int device; + int subsystem_vendor; + int subsystem_device; + void (*hook)(struct drm_device *dev); +}; + +/* For systems that don't have a meaningful PCI subdevice/subvendor ID */ +struct intel_dmi_quirk { + void (*hook)(struct drm_device *dev); + const struct dmi_system_id (*dmi_id_list)[]; +}; + +static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) +{ + DRM_INFO("Backlight polarity reversed on %s\n", id->ident); + return 1; +} + +static const struct intel_dmi_quirk intel_dmi_quirks[] = { + { + .dmi_id_list = &(const struct dmi_system_id[]) { + { + .callback = intel_dmi_reverse_brightness, + .ident = "NCR Corporation", + .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, ""), + }, + }, + { } /* terminating entry */ + }, + .hook = quirk_invert_brightness, + }, +}; + +static struct intel_quirk intel_quirks[] = { + /* HP Mini needs pipe A force quirk (LP: #322104) */ + { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, + + /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ + { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, + + /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ + { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, + + /* 830/845 need to leave pipe A & dpll A up */ + { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, + { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, + + /* Lenovo U160 cannot use SSC on LVDS */ + { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, + + /* Sony Vaio Y cannot use SSC on LVDS */ + { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, + + /* Acer Aspire 5734Z must invert backlight brightness */ + { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, + + /* Acer/eMachines G725 */ + { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, + + /* Acer/eMachines e725 */ + { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, + + /* Acer/Packard Bell NCL20 */ + { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, + + /* Acer Aspire 4736Z */ + { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, + + /* Dell XPS13 HD Sandy Bridge */ + { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, + /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */ + { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable }, +}; + +static void intel_init_quirks(struct drm_device *dev) +{ + struct pci_dev *d = dev->pdev; + int i; + + for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { + struct intel_quirk *q = &intel_quirks[i]; + + if (d->device == q->device && + (d->subsystem_vendor == q->subsystem_vendor || + q->subsystem_vendor == PCI_ANY_ID) && + (d->subsystem_device == q->subsystem_device || + q->subsystem_device == PCI_ANY_ID)) + q->hook(dev); + } + for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { + if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) + intel_dmi_quirks[i].hook(dev); + } +} + +/* Disable the VGA plane that we never use */ +static void i915_disable_vga(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u8 sr1; + u32 vga_reg = i915_vgacntrl_reg(dev); + + vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); + outb(SR01, VGA_SR_INDEX); + sr1 = inb(VGA_SR_DATA); + outb(sr1 | 1<<5, VGA_SR_DATA); + vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); + udelay(300); + + I915_WRITE(vga_reg, VGA_DISP_DISABLE); + POSTING_READ(vga_reg); +} + +void intel_modeset_init_hw(struct drm_device *dev) +{ + intel_init_power_well(dev); + + intel_prepare_ddi(dev); + + intel_init_clock_gating(dev); + + mutex_lock(&dev->struct_mutex); + intel_enable_gt_powersave(dev); + mutex_unlock(&dev->struct_mutex); +} + +void intel_modeset_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i, j, ret; + + drm_mode_config_init(dev); + + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; + + dev->mode_config.funcs = &intel_mode_funcs; + + intel_init_quirks(dev); + + intel_init_pm(dev); + + if (INTEL_INFO(dev)->num_pipes == 0) + return; + + intel_init_display(dev); + + if (IS_GEN2(dev)) { + dev->mode_config.max_width = 2048; + dev->mode_config.max_height = 2048; + } else if (IS_GEN3(dev)) { + dev->mode_config.max_width = 4096; + dev->mode_config.max_height = 4096; + } else { + dev->mode_config.max_width = 8192; + dev->mode_config.max_height = 8192; + } + dev->mode_config.fb_base = dev_priv->gtt.mappable_base; + + DRM_DEBUG_KMS("%d display pipe%s available.\n", + INTEL_INFO(dev)->num_pipes, + INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); + + for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { + intel_crtc_init(dev, i); + for (j = 0; j < dev_priv->num_plane; j++) { + ret = intel_plane_init(dev, i, j); + if (ret) + DRM_DEBUG_KMS("pipe %d plane %d init failed: %d\n", + i, j, ret); + } + } + + intel_cpu_pll_init(dev); + intel_pch_pll_init(dev); + + /* Just disable it once at startup */ + i915_disable_vga(dev); + intel_setup_outputs(dev); + + /* Just in case the BIOS is doing something questionable. */ + intel_disable_fbc(dev); +} + +static void +intel_connector_break_all_links(struct intel_connector *connector) +{ + connector->base.dpms = DRM_MODE_DPMS_OFF; + connector->base.encoder = NULL; + connector->encoder->connectors_active = false; + connector->encoder->base.crtc = NULL; +} + +static void intel_enable_pipe_a(struct drm_device *dev) +{ + struct intel_connector *connector; + struct drm_connector *crt = NULL; + struct intel_load_detect_pipe load_detect_temp; + + /* We can't just switch on the pipe A, we need to set things up with a + * proper mode and output configuration. As a gross hack, enable pipe A + * by enabling the load detect pipe once. */ + list_for_each_entry(connector, + &dev->mode_config.connector_list, + base.head) { + if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { + crt = &connector->base; + break; + } + } + + if (!crt) + return; + + if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp)) + intel_release_load_detect_pipe(crt, &load_detect_temp); + + +} + +static bool +intel_check_plane_mapping(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg, val; + + if (INTEL_INFO(dev)->num_pipes == 1) + return true; + + reg = DSPCNTR(!crtc->plane); + val = I915_READ(reg); + + if ((val & DISPLAY_PLANE_ENABLE) && + (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) + return false; + + return true; +} + +static void intel_sanitize_crtc(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg; + + /* Clear any frame start delays used for debugging left by the BIOS */ + reg = PIPECONF(crtc->config.cpu_transcoder); + I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); + + /* We need to sanitize the plane -> pipe mapping first because this will + * disable the crtc (and hence change the state) if it is wrong. Note + * that gen4+ has a fixed plane -> pipe mapping. */ + if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { + struct intel_connector *connector; + bool plane; + + DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", + crtc->base.base.id); + + /* Pipe has the wrong plane attached and the plane is active. + * Temporarily change the plane mapping and disable everything + * ... */ + plane = crtc->plane; + crtc->plane = !plane; + dev_priv->display.crtc_disable(&crtc->base); + crtc->plane = plane; + + /* ... and break all links. */ + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + if (connector->encoder->base.crtc != &crtc->base) + continue; + + intel_connector_break_all_links(connector); + } + + WARN_ON(crtc->active); + crtc->base.enabled = false; + } + + if (dev_priv->quirks & QUIRK_PIPEA_FORCE && + crtc->pipe == PIPE_A && !crtc->active) { + /* BIOS forgot to enable pipe A, this mostly happens after + * resume. Force-enable the pipe to fix this, the update_dpms + * call below we restore the pipe to the right state, but leave + * the required bits on. */ + intel_enable_pipe_a(dev); + } + + /* Adjust the state of the output pipe according to whether we + * have active connectors/encoders. */ + intel_crtc_update_dpms(&crtc->base); + + if (crtc->active != crtc->base.enabled) { + struct intel_encoder *encoder; + + /* This can happen either due to bugs in the get_hw_state + * functions or because the pipe is force-enabled due to the + * pipe A quirk. */ + DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", + crtc->base.base.id, + crtc->base.enabled ? "enabled" : "disabled", + crtc->active ? "enabled" : "disabled"); + + crtc->base.enabled = crtc->active; + + /* Because we only establish the connector -> encoder -> + * crtc links if something is active, this means the + * crtc is now deactivated. Break the links. connector + * -> encoder links are only establish when things are + * actually up, hence no need to break them. */ + WARN_ON(crtc->active); + + for_each_encoder_on_crtc(dev, &crtc->base, encoder) { + WARN_ON(encoder->connectors_active); + encoder->base.crtc = NULL; + } + } +} + +static void intel_sanitize_encoder(struct intel_encoder *encoder) +{ + struct intel_connector *connector; + struct drm_device *dev = encoder->base.dev; + + /* We need to check both for a crtc link (meaning that the + * encoder is active and trying to read from a pipe) and the + * pipe itself being active. */ + bool has_active_crtc = encoder->base.crtc && + to_intel_crtc(encoder->base.crtc)->active; + + if (encoder->connectors_active && !has_active_crtc) { + DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", + encoder->base.base.id, + drm_get_encoder_name(&encoder->base)); + + /* Connector is active, but has no active pipe. This is + * fallout from our resume register restoring. Disable + * the encoder manually again. */ + if (encoder->base.crtc) { + DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", + encoder->base.base.id, + drm_get_encoder_name(&encoder->base)); + encoder->disable(encoder); + } + + /* Inconsistent output/port/pipe state happens presumably due to + * a bug in one of the get_hw_state functions. Or someplace else + * in our code, like the register restore mess on resume. Clamp + * things to off as a safer default. */ + list_for_each_entry(connector, + &dev->mode_config.connector_list, + base.head) { + if (connector->encoder != encoder) + continue; + + intel_connector_break_all_links(connector); + } + } + /* Enabled encoders without active connectors will be fixed in + * the crtc fixup. */ +} + +void i915_redisable_vga(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 vga_reg = i915_vgacntrl_reg(dev); + + if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { + DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); + i915_disable_vga(dev); + } +} + +/* Scan out the current hw modeset state, sanitizes it and maps it into the drm + * and i915 state tracking structures. */ +void intel_modeset_setup_hw_state(struct drm_device *dev, + bool force_restore) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe; + u32 tmp; + struct drm_plane *plane; + struct intel_crtc *crtc; + struct intel_encoder *encoder; + struct intel_connector *connector; + + if (HAS_DDI(dev)) { + tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); + + if (tmp & TRANS_DDI_FUNC_ENABLE) { + switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { + case TRANS_DDI_EDP_INPUT_A_ON: + case TRANS_DDI_EDP_INPUT_A_ONOFF: + pipe = PIPE_A; + break; + case TRANS_DDI_EDP_INPUT_B_ONOFF: + pipe = PIPE_B; + break; + case TRANS_DDI_EDP_INPUT_C_ONOFF: + pipe = PIPE_C; + break; + default: + /* A bogus value has been programmed, disable + * the transcoder */ + WARN(1, "Bogus eDP source %08x\n", tmp); + intel_ddi_disable_transcoder_func(dev_priv, + TRANSCODER_EDP); + goto setup_pipes; + } + + crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); + crtc->config.cpu_transcoder = TRANSCODER_EDP; + + DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n", + pipe_name(pipe)); + } + } + +setup_pipes: + list_for_each_entry(crtc, &dev->mode_config.crtc_list, + base.head) { + enum transcoder tmp = crtc->config.cpu_transcoder; + memset(&crtc->config, 0, sizeof(crtc->config)); + crtc->config.cpu_transcoder = tmp; + + crtc->active = dev_priv->display.get_pipe_config(crtc, + &crtc->config); + + crtc->base.enabled = crtc->active; + + DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", + crtc->base.base.id, + crtc->active ? "enabled" : "disabled"); + } + + if (HAS_DDI(dev)) + intel_ddi_setup_hw_pll_state(dev); + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + pipe = 0; + + if (encoder->get_hw_state(encoder, &pipe)) { + encoder->base.crtc = + dev_priv->pipe_to_crtc_mapping[pipe]; + } else { + encoder->base.crtc = NULL; + } + + encoder->connectors_active = false; + DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n", + encoder->base.base.id, + drm_get_encoder_name(&encoder->base), + encoder->base.crtc ? "enabled" : "disabled", + pipe); + } + + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + if (connector->get_hw_state(connector)) { + connector->base.dpms = DRM_MODE_DPMS_ON; + connector->encoder->connectors_active = true; + connector->base.encoder = &connector->encoder->base; + } else { + connector->base.dpms = DRM_MODE_DPMS_OFF; + connector->base.encoder = NULL; + } + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", + connector->base.base.id, + drm_get_connector_name(&connector->base), + connector->base.encoder ? "enabled" : "disabled"); + } + + /* HW state is read out, now we need to sanitize this mess. */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + intel_sanitize_encoder(encoder); + } + + for_each_pipe(pipe) { + crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); + intel_sanitize_crtc(crtc); + } + + if (force_restore) { + /* + * We need to use raw interfaces for restoring state to avoid + * checking (bogus) intermediate states. + */ + for_each_pipe(pipe) { + struct drm_crtc *crtc = + dev_priv->pipe_to_crtc_mapping[pipe]; + + __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, + crtc->fb); + } + list_for_each_entry(plane, &dev->mode_config.plane_list, head) + intel_plane_restore(plane); + + i915_redisable_vga(dev); + } else { + intel_modeset_update_staged_output_state(dev); + } + + intel_modeset_check_state(dev); + + drm_mode_config_reset(dev); +} + +void intel_modeset_gem_init(struct drm_device *dev) +{ + intel_modeset_init_hw(dev); + + intel_setup_overlay(dev); + + intel_modeset_setup_hw_state(dev, false); +} + +void intel_modeset_cleanup(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + struct intel_crtc *intel_crtc; + + drm_kms_helper_poll_fini(dev); + mutex_lock(&dev->struct_mutex); + + intel_unregister_dsm_handler(); + + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + /* Skip inactive CRTCs */ + if (!crtc->fb) + continue; + + intel_crtc = to_intel_crtc(crtc); + intel_increase_pllclock(crtc); + } + + intel_disable_fbc(dev); + + intel_disable_gt_powersave(dev); + + ironlake_teardown_rc6(dev); + + if (IS_VALLEYVIEW(dev)) + vlv_init_dpio(dev); + + mutex_unlock(&dev->struct_mutex); + + /* Disable the irq before mode object teardown, for the irq might + * enqueue unpin/hotplug work. */ + drm_irq_uninstall(dev); + cancel_work_sync(&dev_priv->hotplug_work); + cancel_work_sync(&dev_priv->rps.work); + + /* flush any delayed tasks or pending work */ + flush_scheduled_work(); + + /* destroy backlight, if any, before the connectors */ + intel_panel_destroy_backlight(dev); + + drm_mode_config_cleanup(dev); + + intel_cleanup_overlay(dev); +} + +/* + * Return which encoder is currently attached for connector. + */ +struct drm_encoder *intel_best_encoder(struct drm_connector *connector) +{ + return &intel_attached_encoder(connector)->base; +} + +void intel_connector_attach_encoder(struct intel_connector *connector, + struct intel_encoder *encoder) +{ + connector->encoder = encoder; + drm_mode_connector_attach_encoder(&connector->base, + &encoder->base); +} + +/* + * set vga decode state - true == enable VGA decode + */ +int intel_modeset_vga_set_state(struct drm_device *dev, bool state) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u16 gmch_ctrl; + + pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); + if (state) + gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; + else + gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; + pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); + return 0; +} + +#ifdef CONFIG_DEBUG_FS +#include + +struct intel_display_error_state { + struct intel_cursor_error_state { + u32 control; + u32 position; + u32 base; + u32 size; + } cursor[I915_MAX_PIPES]; + + struct intel_pipe_error_state { + u32 conf; + u32 source; + + u32 htotal; + u32 hblank; + u32 hsync; + u32 vtotal; + u32 vblank; + u32 vsync; + } pipe[I915_MAX_PIPES]; + + struct intel_plane_error_state { + u32 control; + u32 stride; + u32 size; + u32 pos; + u32 addr; + u32 surface; + u32 tile_offset; + } plane[I915_MAX_PIPES]; +}; + +struct intel_display_error_state * +intel_display_capture_error_state(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_display_error_state *error; + enum transcoder cpu_transcoder; + int i; + + error = kmalloc(sizeof(*error), GFP_ATOMIC); + if (error == NULL) + return NULL; + + for_each_pipe(i) { + cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); + + if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { + error->cursor[i].control = I915_READ(CURCNTR(i)); + error->cursor[i].position = I915_READ(CURPOS(i)); + error->cursor[i].base = I915_READ(CURBASE(i)); + } else { + error->cursor[i].control = I915_READ(CURCNTR_IVB(i)); + error->cursor[i].position = I915_READ(CURPOS_IVB(i)); + error->cursor[i].base = I915_READ(CURBASE_IVB(i)); + } + + error->plane[i].control = I915_READ(DSPCNTR(i)); + error->plane[i].stride = I915_READ(DSPSTRIDE(i)); + if (INTEL_INFO(dev)->gen <= 3) { + error->plane[i].size = I915_READ(DSPSIZE(i)); + error->plane[i].pos = I915_READ(DSPPOS(i)); + } + if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) + error->plane[i].addr = I915_READ(DSPADDR(i)); + if (INTEL_INFO(dev)->gen >= 4) { + error->plane[i].surface = I915_READ(DSPSURF(i)); + error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); + } + + error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder)); + error->pipe[i].source = I915_READ(PIPESRC(i)); + error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); + error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); + error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); + error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); + error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); + error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); + } + + return error; +} + +void +intel_display_print_error_state(struct seq_file *m, + struct drm_device *dev, + struct intel_display_error_state *error) +{ + int i; + + seq_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); + for_each_pipe(i) { + seq_printf(m, "Pipe [%d]:\n", i); + seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); + seq_printf(m, " SRC: %08x\n", error->pipe[i].source); + seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); + seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); + seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); + seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); + seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); + seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); + + seq_printf(m, "Plane [%d]:\n", i); + seq_printf(m, " CNTR: %08x\n", error->plane[i].control); + seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride); + if (INTEL_INFO(dev)->gen <= 3) { + seq_printf(m, " SIZE: %08x\n", error->plane[i].size); + seq_printf(m, " POS: %08x\n", error->plane[i].pos); + } + if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) + seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); + if (INTEL_INFO(dev)->gen >= 4) { + seq_printf(m, " SURF: %08x\n", error->plane[i].surface); + seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); + } + + seq_printf(m, "Cursor [%d]:\n", i); + seq_printf(m, " CNTR: %08x\n", error->cursor[i].control); + seq_printf(m, " POS: %08x\n", error->cursor[i].position); + seq_printf(m, " BASE: %08x\n", error->cursor[i].base); + } +} +#endif diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c new file mode 100644 index 0000000..80feaec --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -0,0 +1,3010 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Keith Packard + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include "intel_drv.h" +#include +#include "i915_drv.h" + +#define DP_LINK_CHECK_TIMEOUT (10 * 1000) + +/** + * is_edp - is the given port attached to an eDP panel (either CPU or PCH) + * @intel_dp: DP struct + * + * If a CPU or PCH DP output is attached to an eDP panel, this function + * will return true, and false otherwise. + */ +static bool is_edp(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + + return intel_dig_port->base.type == INTEL_OUTPUT_EDP; +} + +/** + * is_pch_edp - is the port on the PCH and attached to an eDP panel? + * @intel_dp: DP struct + * + * Returns true if the given DP struct corresponds to a PCH DP port attached + * to an eDP panel, false otherwise. Helpful for determining whether we + * may need FDI resources for a given DP output or not. + */ +static bool is_pch_edp(struct intel_dp *intel_dp) +{ + return intel_dp->is_pch_edp; +} + +/** + * is_cpu_edp - is the port on the CPU and attached to an eDP panel? + * @intel_dp: DP struct + * + * Returns true if the given DP struct corresponds to a CPU eDP port. + */ +static bool is_cpu_edp(struct intel_dp *intel_dp) +{ + return is_edp(intel_dp) && !is_pch_edp(intel_dp); +} + +static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + + return intel_dig_port->base.base.dev; +} + +static struct intel_dp *intel_attached_dp(struct drm_connector *connector) +{ + return enc_to_intel_dp(&intel_attached_encoder(connector)->base); +} + +/** + * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? + * @encoder: DRM encoder + * + * Return true if @encoder corresponds to a PCH attached eDP panel. Needed + * by intel_display.c. + */ +bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) +{ + struct intel_dp *intel_dp; + + if (!encoder) + return false; + + intel_dp = enc_to_intel_dp(encoder); + + return is_pch_edp(intel_dp); +} + +static void intel_dp_link_down(struct intel_dp *intel_dp); + +static int +intel_dp_max_link_bw(struct intel_dp *intel_dp) +{ + int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; + + switch (max_link_bw) { + case DP_LINK_BW_1_62: + case DP_LINK_BW_2_7: + break; + default: + max_link_bw = DP_LINK_BW_1_62; + break; + } + return max_link_bw; +} + +/* + * The units on the numbers in the next two are... bizarre. Examples will + * make it clearer; this one parallels an example in the eDP spec. + * + * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: + * + * 270000 * 1 * 8 / 10 == 216000 + * + * The actual data capacity of that configuration is 2.16Gbit/s, so the + * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - + * or equivalently, kilopixels per second - so for 1680x1050R it'd be + * 119000. At 18bpp that's 2142000 kilobits per second. + * + * Thus the strange-looking division by 10 in intel_dp_link_required, to + * get the result in decakilobits instead of kilobits. + */ + +static int +intel_dp_link_required(int pixel_clock, int bpp) +{ + return (pixel_clock * bpp + 9) / 10; +} + +static int +intel_dp_max_data_rate(int max_link_clock, int max_lanes) +{ + return (max_link_clock * max_lanes * 8) / 10; +} + +static int +intel_dp_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; + int target_clock = mode->clock; + int max_rate, mode_rate, max_lanes, max_link_clock; + + if (is_edp(intel_dp) && fixed_mode) { + if (mode->hdisplay > fixed_mode->hdisplay) + return MODE_PANEL; + + if (mode->vdisplay > fixed_mode->vdisplay) + return MODE_PANEL; + + target_clock = fixed_mode->clock; + } + + max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); + max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); + + max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); + mode_rate = intel_dp_link_required(target_clock, 18); + + if (mode_rate > max_rate) + return MODE_CLOCK_HIGH; + + if (mode->clock < 10000) + return MODE_CLOCK_LOW; + + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + return MODE_H_ILLEGAL; + + return MODE_OK; +} + +static uint32_t +pack_aux(uint8_t *src, int src_bytes) +{ + int i; + uint32_t v = 0; + + if (src_bytes > 4) + src_bytes = 4; + for (i = 0; i < src_bytes; i++) + v |= ((uint32_t) src[i]) << ((3-i) * 8); + return v; +} + +static void +unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) +{ + int i; + if (dst_bytes > 4) + dst_bytes = 4; + for (i = 0; i < dst_bytes; i++) + dst[i] = src >> ((3-i) * 8); +} + +/* hrawclock is 1/4 the FSB frequency */ +static int +intel_hrawclk(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t clkcfg; + + /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ + if (IS_VALLEYVIEW(dev)) + return 200; + + clkcfg = I915_READ(CLKCFG); + switch (clkcfg & CLKCFG_FSB_MASK) { + case CLKCFG_FSB_400: + return 100; + case CLKCFG_FSB_533: + return 133; + case CLKCFG_FSB_667: + return 166; + case CLKCFG_FSB_800: + return 200; + case CLKCFG_FSB_1067: + return 266; + case CLKCFG_FSB_1333: + return 333; + /* these two are just a guess; one of them might be right */ + case CLKCFG_FSB_1600: + case CLKCFG_FSB_1600_ALT: + return 400; + default: + return 133; + } +} + +static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp_stat_reg; + + pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; + return (I915_READ(pp_stat_reg) & PP_ON) != 0; +} + +static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp_ctrl_reg; + + pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; + return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0; +} + +static void +intel_dp_check_edp(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp_stat_reg, pp_ctrl_reg; + + if (!is_edp(intel_dp)) + return; + + pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; + pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; + + if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { + WARN(1, "eDP powered off while attempting aux channel communication.\n"); + DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", + I915_READ(pp_stat_reg), + I915_READ(pp_ctrl_reg)); + } +} + +static uint32_t +intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; + uint32_t status; + bool done; + +#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) + if (has_aux_irq) + done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, + msecs_to_jiffies_timeout(10)); + else + done = wait_for_atomic(C, 10) == 0; + if (!done) + DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", + has_aux_irq); +#undef C + + return status; +} + +static int +intel_dp_aux_ch(struct intel_dp *intel_dp, + uint8_t *send, int send_bytes, + uint8_t *recv, int recv_size) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; + uint32_t ch_data = ch_ctl + 4; + int i, ret, recv_bytes; + uint32_t status; + uint32_t aux_clock_divider; + int try, precharge; + bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); + + /* dp aux is extremely sensitive to irq latency, hence request the + * lowest possible wakeup latency and so prevent the cpu from going into + * deep sleep states. + */ + pm_qos_update_request(&dev_priv->pm_qos, 0); + + intel_dp_check_edp(intel_dp); + /* The clock divider is based off the hrawclk, + * and would like to run at 2MHz. So, take the + * hrawclk value and divide by 2 and use that + * + * Note that PCH attached eDP panels should use a 125MHz input + * clock divider. + */ + if (is_cpu_edp(intel_dp)) { + if (HAS_DDI(dev)) + aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; + else if (IS_VALLEYVIEW(dev)) + aux_clock_divider = 100; + else if (IS_GEN6(dev) || IS_GEN7(dev)) + aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ + else + aux_clock_divider = 225; /* eDP input clock at 450Mhz */ + } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { + /* Workaround for non-ULT HSW */ + aux_clock_divider = 74; + } else if (HAS_PCH_SPLIT(dev)) { + aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); + } else { + aux_clock_divider = intel_hrawclk(dev) / 2; + } + + if (IS_GEN6(dev)) + precharge = 3; + else + precharge = 5; + + /* Try to wait for any previous AUX channel activity */ + for (try = 0; try < 3; try++) { + status = I915_READ_NOTRACE(ch_ctl); + if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) + break; + msleep(1); + } + + if (try == 3) { + WARN(1, "dp_aux_ch not started status 0x%08x\n", + I915_READ(ch_ctl)); + ret = -EBUSY; + goto out; + } + + /* Must try at least 3 times according to DP spec */ + for (try = 0; try < 5; try++) { + /* Load the send data into the aux channel data registers */ + for (i = 0; i < send_bytes; i += 4) + I915_WRITE(ch_data + i, + pack_aux(send + i, send_bytes - i)); + + /* Send the command and wait for it to complete */ + I915_WRITE(ch_ctl, + DP_AUX_CH_CTL_SEND_BUSY | + (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | + DP_AUX_CH_CTL_TIME_OUT_400us | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR); + + status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); + + /* Clear done status and any errors */ + I915_WRITE(ch_ctl, + status | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR); + + if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR)) + continue; + if (status & DP_AUX_CH_CTL_DONE) + break; + } + + if ((status & DP_AUX_CH_CTL_DONE) == 0) { + DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); + ret = -EBUSY; + goto out; + } + + /* Check for timeout or receive error. + * Timeouts occur when the sink is not connected + */ + if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { + DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); + ret = -EIO; + goto out; + } + + /* Timeouts occur when the device isn't connected, so they're + * "normal" -- don't fill the kernel log with these */ + if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { + DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); + ret = -ETIMEDOUT; + goto out; + } + + /* Unload any bytes sent back from the other side */ + recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> + DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); + if (recv_bytes > recv_size) + recv_bytes = recv_size; + + for (i = 0; i < recv_bytes; i += 4) + unpack_aux(I915_READ(ch_data + i), + recv + i, recv_bytes - i); + + ret = recv_bytes; +out: + pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); + + return ret; +} + +/* Write data to the aux channel in native mode */ +static int +intel_dp_aux_native_write(struct intel_dp *intel_dp, + uint16_t address, uint8_t *send, int send_bytes) +{ + int ret; + uint8_t msg[20]; + int msg_bytes; + uint8_t ack; + + intel_dp_check_edp(intel_dp); + if (send_bytes > 16) + return -1; + msg[0] = AUX_NATIVE_WRITE << 4; + msg[1] = address >> 8; + msg[2] = address & 0xff; + msg[3] = send_bytes - 1; + memcpy(&msg[4], send, send_bytes); + msg_bytes = send_bytes + 4; + for (;;) { + ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); + if (ret < 0) + return ret; + if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) + break; + else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) + udelay(100); + else + return -EIO; + } + return send_bytes; +} + +/* Write a single byte to the aux channel in native mode */ +static int +intel_dp_aux_native_write_1(struct intel_dp *intel_dp, + uint16_t address, uint8_t byte) +{ + return intel_dp_aux_native_write(intel_dp, address, &byte, 1); +} + +/* read bytes from a native aux channel */ +static int +intel_dp_aux_native_read(struct intel_dp *intel_dp, + uint16_t address, uint8_t *recv, int recv_bytes) +{ + uint8_t msg[4]; + int msg_bytes; + uint8_t reply[20]; + int reply_bytes; + uint8_t ack; + int ret; + + intel_dp_check_edp(intel_dp); + msg[0] = AUX_NATIVE_READ << 4; + msg[1] = address >> 8; + msg[2] = address & 0xff; + msg[3] = recv_bytes - 1; + + msg_bytes = 4; + reply_bytes = recv_bytes + 1; + + for (;;) { + ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, + reply, reply_bytes); + if (ret == 0) + return -EPROTO; + if (ret < 0) + return ret; + ack = reply[0]; + if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { + memcpy(recv, reply + 1, ret - 1); + return ret - 1; + } + else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) + udelay(100); + else + return -EIO; + } +} + +static int +intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, + uint8_t write_byte, uint8_t *read_byte) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + struct intel_dp *intel_dp = container_of(adapter, + struct intel_dp, + adapter); + uint16_t address = algo_data->address; + uint8_t msg[5]; + uint8_t reply[2]; + unsigned retry; + int msg_bytes; + int reply_bytes; + int ret; + + intel_dp_check_edp(intel_dp); + /* Set up the command byte */ + if (mode & MODE_I2C_READ) + msg[0] = AUX_I2C_READ << 4; + else + msg[0] = AUX_I2C_WRITE << 4; + + if (!(mode & MODE_I2C_STOP)) + msg[0] |= AUX_I2C_MOT << 4; + + msg[1] = address >> 8; + msg[2] = address; + + switch (mode) { + case MODE_I2C_WRITE: + msg[3] = 0; + msg[4] = write_byte; + msg_bytes = 5; + reply_bytes = 1; + break; + case MODE_I2C_READ: + msg[3] = 0; + msg_bytes = 4; + reply_bytes = 2; + break; + default: + msg_bytes = 3; + reply_bytes = 1; + break; + } + + for (retry = 0; retry < 5; retry++) { + ret = intel_dp_aux_ch(intel_dp, + msg, msg_bytes, + reply, reply_bytes); + if (ret < 0) { + DRM_DEBUG_KMS("aux_ch failed %d\n", ret); + return ret; + } + + switch (reply[0] & AUX_NATIVE_REPLY_MASK) { + case AUX_NATIVE_REPLY_ACK: + /* I2C-over-AUX Reply field is only valid + * when paired with AUX ACK. + */ + break; + case AUX_NATIVE_REPLY_NACK: + DRM_DEBUG_KMS("aux_ch native nack\n"); + return -EREMOTEIO; + case AUX_NATIVE_REPLY_DEFER: + /* + * For now, just give more slack to branch devices. We + * could check the DPCD for I2C bit rate capabilities, + * and if available, adjust the interval. We could also + * be more careful with DP-to-Legacy adapters where a + * long legacy cable may force very low I2C bit rates. + */ + if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DWN_STRM_PORT_PRESENT) + usleep_range(500, 600); + else + usleep_range(300, 400); + continue; + default: + DRM_ERROR("aux_ch invalid native reply 0x%02x\n", + reply[0]); + return -EREMOTEIO; + } + + switch (reply[0] & AUX_I2C_REPLY_MASK) { + case AUX_I2C_REPLY_ACK: + if (mode == MODE_I2C_READ) { + *read_byte = reply[1]; + } + return reply_bytes - 1; + case AUX_I2C_REPLY_NACK: + DRM_DEBUG_KMS("aux_i2c nack\n"); + return -EREMOTEIO; + case AUX_I2C_REPLY_DEFER: + DRM_DEBUG_KMS("aux_i2c defer\n"); + udelay(100); + break; + default: + DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); + return -EREMOTEIO; + } + } + + DRM_ERROR("too many retries, giving up\n"); + return -EREMOTEIO; +} + +static int +intel_dp_i2c_init(struct intel_dp *intel_dp, + struct intel_connector *intel_connector, const char *name) +{ + int ret; + + DRM_DEBUG_KMS("i2c_init %s\n", name); + intel_dp->algo.running = false; + intel_dp->algo.address = 0; + intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; + + memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); + intel_dp->adapter.owner = THIS_MODULE; + intel_dp->adapter.class = I2C_CLASS_DDC; + strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); + intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; + intel_dp->adapter.algo_data = &intel_dp->algo; + intel_dp->adapter.dev.parent = &intel_connector->base.kdev; + + ironlake_edp_panel_vdd_on(intel_dp); + ret = i2c_dp_aux_add_bus(&intel_dp->adapter); + ironlake_edp_panel_vdd_off(intel_dp, false); + return ret; +} + +bool +intel_dp_compute_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; + struct drm_display_mode *mode = &pipe_config->requested_mode; + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_connector *intel_connector = intel_dp->attached_connector; + int lane_count, clock; + int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); + int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; + int bpp, mode_rate; + static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; + int target_clock, link_avail, link_clock; + + if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && !is_cpu_edp(intel_dp)) + pipe_config->has_pch_encoder = true; + + pipe_config->has_dp_encoder = true; + + if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { + intel_fixed_panel_mode(intel_connector->panel.fixed_mode, + adjusted_mode); + intel_pch_panel_fitting(dev, + intel_connector->panel.fitting_mode, + mode, adjusted_mode); + } + /* We need to take the panel's fixed mode into account. */ + target_clock = adjusted_mode->clock; + + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) + return false; + + DRM_DEBUG_KMS("DP link computation with max lane count %i " + "max bw %02x pixel clock %iKHz\n", + max_lane_count, bws[max_clock], adjusted_mode->clock); + + /* Walk through all bpp values. Luckily they're all nicely spaced with 2 + * bpc in between. */ + bpp = min_t(int, 8*3, pipe_config->pipe_bpp); + if (is_edp(intel_dp) && dev_priv->edp.bpp) + bpp = min_t(int, bpp, dev_priv->edp.bpp); + + for (; bpp >= 6*3; bpp -= 2*3) { + mode_rate = intel_dp_link_required(target_clock, bpp); + + for (clock = 0; clock <= max_clock; clock++) { + for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { + link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); + link_avail = intel_dp_max_data_rate(link_clock, + lane_count); + + if (mode_rate <= link_avail) { + goto found; + } + } + } + } + + return false; + +found: + if (intel_dp->color_range_auto) { + /* + * See: + * CEA-861-E - 5.1 Default Encoding Parameters + * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry + */ + if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1) + intel_dp->color_range = DP_COLOR_RANGE_16_235; + else + intel_dp->color_range = 0; + } + + if (intel_dp->color_range) + pipe_config->limited_color_range = true; + + intel_dp->link_bw = bws[clock]; + intel_dp->lane_count = lane_count; + adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); + pipe_config->pipe_bpp = bpp; + pipe_config->pixel_target_clock = target_clock; + + DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", + intel_dp->link_bw, intel_dp->lane_count, + adjusted_mode->clock, bpp); + DRM_DEBUG_KMS("DP link bw required %i available %i\n", + mode_rate, link_avail); + + intel_link_compute_m_n(bpp, lane_count, + target_clock, adjusted_mode->clock, + &pipe_config->dp_m_n); + + return true; +} + +void intel_dp_init_link_config(struct intel_dp *intel_dp) +{ + memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); + intel_dp->link_configuration[0] = intel_dp->link_bw; + intel_dp->link_configuration[1] = intel_dp->lane_count; + intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; + /* + * Check for DPCD version > 1.1 and enhanced framing support + */ + if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && + (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { + intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + } +} + +static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpa_ctl; + + DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); + dpa_ctl = I915_READ(DP_A); + dpa_ctl &= ~DP_PLL_FREQ_MASK; + + if (clock < 200000) { + /* For a long time we've carried around a ILK-DevA w/a for the + * 160MHz clock. If we're really unlucky, it's still required. + */ + DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n"); + dpa_ctl |= DP_PLL_FREQ_160MHZ; + } else { + dpa_ctl |= DP_PLL_FREQ_270MHZ; + } + + I915_WRITE(DP_A, dpa_ctl); + + POSTING_READ(DP_A); + udelay(500); +} + +static void +intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct drm_crtc *crtc = encoder->crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + /* + * There are four kinds of DP registers: + * + * IBX PCH + * SNB CPU + * IVB CPU + * CPT PCH + * + * IBX PCH and CPU are the same for almost everything, + * except that the CPU DP PLL is configured in this + * register + * + * CPT PCH is quite different, having many bits moved + * to the TRANS_DP_CTL register instead. That + * configuration happens (oddly) in ironlake_pch_enable + */ + + /* Preserve the BIOS-computed detected bit. This is + * supposed to be read-only. + */ + intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; + + /* Handle DP bits in common between all three register formats */ + intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; + + switch (intel_dp->lane_count) { + case 1: + intel_dp->DP |= DP_PORT_WIDTH_1; + break; + case 2: + intel_dp->DP |= DP_PORT_WIDTH_2; + break; + case 4: + intel_dp->DP |= DP_PORT_WIDTH_4; + break; + } + if (intel_dp->has_audio) { + DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", + pipe_name(intel_crtc->pipe)); + intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; + intel_write_eld(encoder, adjusted_mode); + } + + intel_dp_init_link_config(intel_dp); + + /* Split out the IBX/CPU vs CPT settings */ + + if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + intel_dp->DP |= DP_SYNC_HS_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + intel_dp->DP |= DP_SYNC_VS_HIGH; + intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; + + if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) + intel_dp->DP |= DP_ENHANCED_FRAMING; + + intel_dp->DP |= intel_crtc->pipe << 29; + + /* don't miss out required setting for eDP */ + if (adjusted_mode->clock < 200000) + intel_dp->DP |= DP_PLL_FREQ_160MHZ; + else + intel_dp->DP |= DP_PLL_FREQ_270MHZ; + } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { + if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev)) + intel_dp->DP |= intel_dp->color_range; + + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + intel_dp->DP |= DP_SYNC_HS_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + intel_dp->DP |= DP_SYNC_VS_HIGH; + intel_dp->DP |= DP_LINK_TRAIN_OFF; + + if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) + intel_dp->DP |= DP_ENHANCED_FRAMING; + + if (intel_crtc->pipe == 1) + intel_dp->DP |= DP_PIPEB_SELECT; + + if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { + /* don't miss out required setting for eDP */ + if (adjusted_mode->clock < 200000) + intel_dp->DP |= DP_PLL_FREQ_160MHZ; + else + intel_dp->DP |= DP_PLL_FREQ_270MHZ; + } + } else { + intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; + } + + if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) + ironlake_set_pll_edp(crtc, adjusted_mode->clock); +} + +#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) +#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) + +#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) +#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) + +#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) +#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) + +static void ironlake_wait_panel_status(struct intel_dp *intel_dp, + u32 mask, + u32 value) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp_stat_reg, pp_ctrl_reg; + + pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; + pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; + + DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", + mask, value, + I915_READ(pp_stat_reg), + I915_READ(pp_ctrl_reg)); + + if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) { + DRM_ERROR("Panel status timeout: status %08x control %08x\n", + I915_READ(pp_stat_reg), + I915_READ(pp_ctrl_reg)); + } +} + +static void ironlake_wait_panel_on(struct intel_dp *intel_dp) +{ + DRM_DEBUG_KMS("Wait for panel power on\n"); + ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); +} + +static void ironlake_wait_panel_off(struct intel_dp *intel_dp) +{ + DRM_DEBUG_KMS("Wait for panel power off time\n"); + ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); +} + +static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) +{ + DRM_DEBUG_KMS("Wait for panel power cycle\n"); + ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); +} + + +/* Read the current pp_control value, unlocking the register if it + * is locked + */ + +static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + u32 control; + u32 pp_ctrl_reg; + + pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; + control = I915_READ(pp_ctrl_reg); + + control &= ~PANEL_UNLOCK_MASK; + control |= PANEL_UNLOCK_REGS; + return control; +} + +void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp; + u32 pp_stat_reg, pp_ctrl_reg; + + if (!is_edp(intel_dp)) + return; + DRM_DEBUG_KMS("Turn eDP VDD on\n"); + + WARN(intel_dp->want_panel_vdd, + "eDP VDD already requested on\n"); + + intel_dp->want_panel_vdd = true; + + if (ironlake_edp_have_panel_vdd(intel_dp)) { + DRM_DEBUG_KMS("eDP VDD already on\n"); + return; + } + + if (!ironlake_edp_have_panel_power(intel_dp)) + ironlake_wait_panel_power_cycle(intel_dp); + + pp = ironlake_get_pp_control(intel_dp); + pp |= EDP_FORCE_VDD; + + pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; + pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; + + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", + I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); + /* + * If the panel wasn't on, delay before accessing aux channel + */ + if (!ironlake_edp_have_panel_power(intel_dp)) { + DRM_DEBUG_KMS("eDP was not running\n"); + msleep(intel_dp->panel_power_up_delay); + } +} + +static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp; + u32 pp_stat_reg, pp_ctrl_reg; + + WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); + + if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { + pp = ironlake_get_pp_control(intel_dp); + pp &= ~EDP_FORCE_VDD; + + pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; + pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; + + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + + /* Make sure sequencer is idle before allowing subsequent activity */ + DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", + I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); + msleep(intel_dp->panel_power_down_delay); + } +} + +static void ironlake_panel_vdd_work(struct work_struct *__work) +{ + struct intel_dp *intel_dp = container_of(to_delayed_work(__work), + struct intel_dp, panel_vdd_work); + struct drm_device *dev = intel_dp_to_dev(intel_dp); + + mutex_lock(&dev->mode_config.mutex); + ironlake_panel_vdd_off_sync(intel_dp); + mutex_unlock(&dev->mode_config.mutex); +} + +void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) +{ + if (!is_edp(intel_dp)) + return; + + DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); + WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); + + intel_dp->want_panel_vdd = false; + + if (sync) { + ironlake_panel_vdd_off_sync(intel_dp); + } else { + /* + * Queue the timer to fire a long + * time from now (relative to the power down delay) + * to keep the panel power up across a sequence of operations + */ + schedule_delayed_work(&intel_dp->panel_vdd_work, + msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); + } +} + +void ironlake_edp_panel_on(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp; + u32 pp_ctrl_reg; + + if (!is_edp(intel_dp)) + return; + + DRM_DEBUG_KMS("Turn eDP power on\n"); + + if (ironlake_edp_have_panel_power(intel_dp)) { + DRM_DEBUG_KMS("eDP power already on\n"); + return; + } + + ironlake_wait_panel_power_cycle(intel_dp); + + pp = ironlake_get_pp_control(intel_dp); + if (IS_GEN5(dev)) { + /* ILK workaround: disable reset around power sequence */ + pp &= ~PANEL_POWER_RESET; + I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); + } + + pp |= POWER_TARGET_ON; + if (!IS_GEN5(dev)) + pp |= PANEL_POWER_RESET; + + pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; + + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + + ironlake_wait_panel_on(intel_dp); + + if (IS_GEN5(dev)) { + pp |= PANEL_POWER_RESET; /* restore panel reset bit */ + I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); + } +} + +void ironlake_edp_panel_off(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp; + u32 pp_ctrl_reg; + + if (!is_edp(intel_dp)) + return; + + DRM_DEBUG_KMS("Turn eDP power off\n"); + + WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); + + pp = ironlake_get_pp_control(intel_dp); + /* We need to switch off panel power _and_ force vdd, for otherwise some + * panels get very unhappy and cease to work. */ + pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); + + pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; + + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + + intel_dp->want_panel_vdd = false; + + ironlake_wait_panel_off(intel_dp); +} + +void ironlake_edp_backlight_on(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe; + u32 pp; + u32 pp_ctrl_reg; + + if (!is_edp(intel_dp)) + return; + + DRM_DEBUG_KMS("\n"); + /* + * If we enable the backlight right away following a panel power + * on, we may see slight flicker as the panel syncs with the eDP + * link. So delay a bit to make sure the image is solid before + * allowing it to appear. + */ + msleep(intel_dp->backlight_on_delay); + pp = ironlake_get_pp_control(intel_dp); + pp |= EDP_BLC_ENABLE; + + pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; + + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + + intel_panel_enable_backlight(dev, pipe); +} + +void ironlake_edp_backlight_off(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp; + u32 pp_ctrl_reg; + + if (!is_edp(intel_dp)) + return; + + intel_panel_disable_backlight(dev); + + DRM_DEBUG_KMS("\n"); + pp = ironlake_get_pp_control(intel_dp); + pp &= ~EDP_BLC_ENABLE; + + pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; + + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + msleep(intel_dp->backlight_off_delay); +} + +static void ironlake_edp_pll_on(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_crtc *crtc = intel_dig_port->base.base.crtc; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpa_ctl; + + assert_pipe_disabled(dev_priv, + to_intel_crtc(crtc)->pipe); + + DRM_DEBUG_KMS("\n"); + dpa_ctl = I915_READ(DP_A); + WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); + WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); + + /* We don't adjust intel_dp->DP while tearing down the link, to + * facilitate link retraining (e.g. after hotplug). Hence clear all + * enable bits here to ensure that we don't enable too much. */ + intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); + intel_dp->DP |= DP_PLL_ENABLE; + I915_WRITE(DP_A, intel_dp->DP); + POSTING_READ(DP_A); + udelay(200); +} + +static void ironlake_edp_pll_off(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_crtc *crtc = intel_dig_port->base.base.crtc; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpa_ctl; + + assert_pipe_disabled(dev_priv, + to_intel_crtc(crtc)->pipe); + + dpa_ctl = I915_READ(DP_A); + WARN((dpa_ctl & DP_PLL_ENABLE) == 0, + "dp pll off, should be on\n"); + WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); + + /* We can't rely on the value tracked for the DP register in + * intel_dp->DP because link_down must not change that (otherwise link + * re-training will fail. */ + dpa_ctl &= ~DP_PLL_ENABLE; + I915_WRITE(DP_A, dpa_ctl); + POSTING_READ(DP_A); + udelay(200); +} + +/* If the sink supports it, try to set the power state appropriately */ +void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) +{ + int ret, i; + + /* Should have a valid DPCD by this point */ + if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) + return; + + if (mode != DRM_MODE_DPMS_ON) { + ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, + DP_SET_POWER_D3); + if (ret != 1) + DRM_DEBUG_DRIVER("failed to write sink power state\n"); + } else { + /* + * When turning on, we need to retry for 1ms to give the sink + * time to wake up. + */ + for (i = 0; i < 3; i++) { + ret = intel_dp_aux_native_write_1(intel_dp, + DP_SET_POWER, + DP_SET_POWER_D0); + if (ret == 1) + break; + msleep(1); + } + } +} + +static bool intel_dp_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 tmp = I915_READ(intel_dp->output_reg); + + if (!(tmp & DP_PORT_EN)) + return false; + + if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { + *pipe = PORT_TO_PIPE_CPT(tmp); + } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { + *pipe = PORT_TO_PIPE(tmp); + } else { + u32 trans_sel; + u32 trans_dp; + int i; + + switch (intel_dp->output_reg) { + case PCH_DP_B: + trans_sel = TRANS_DP_PORT_SEL_B; + break; + case PCH_DP_C: + trans_sel = TRANS_DP_PORT_SEL_C; + break; + case PCH_DP_D: + trans_sel = TRANS_DP_PORT_SEL_D; + break; + default: + return true; + } + + for_each_pipe(i) { + trans_dp = I915_READ(TRANS_DP_CTL(i)); + if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { + *pipe = i; + return true; + } + } + + DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", + intel_dp->output_reg); + } + + return true; +} + +static void intel_disable_dp(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + + /* Make sure the panel is off before trying to change the mode. But also + * ensure that we have vdd while we switch off the panel. */ + ironlake_edp_panel_vdd_on(intel_dp); + ironlake_edp_backlight_off(intel_dp); + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + ironlake_edp_panel_off(intel_dp); + + /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ + if (!is_cpu_edp(intel_dp)) + intel_dp_link_down(intel_dp); +} + +static void intel_post_disable_dp(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct drm_device *dev = encoder->base.dev; + + if (is_cpu_edp(intel_dp)) { + intel_dp_link_down(intel_dp); + if (!IS_VALLEYVIEW(dev)) + ironlake_edp_pll_off(intel_dp); + } +} + +static void intel_enable_dp(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dp_reg = I915_READ(intel_dp->output_reg); + + if (WARN_ON(dp_reg & DP_PORT_EN)) + return; + + ironlake_edp_panel_vdd_on(intel_dp); + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + intel_dp_start_link_train(intel_dp); + ironlake_edp_panel_on(intel_dp); + ironlake_edp_panel_vdd_off(intel_dp, true); + intel_dp_complete_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp); + ironlake_edp_backlight_on(intel_dp); +} + +static void intel_pre_enable_dp(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct drm_device *dev = encoder->base.dev; + + if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) + ironlake_edp_pll_on(intel_dp); +} + +/* + * Native read with retry for link status and receiver capability reads for + * cases where the sink may still be asleep. + */ +static bool +intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, + uint8_t *recv, int recv_bytes) +{ + int ret, i; + + /* + * Sinks are *supposed* to come up within 1ms from an off state, + * but we're also supposed to retry 3 times per the spec. + */ + for (i = 0; i < 3; i++) { + ret = intel_dp_aux_native_read(intel_dp, address, recv, + recv_bytes); + if (ret == recv_bytes) + return true; + msleep(1); + } + + return false; +} + +/* + * Fetch AUX CH registers 0x202 - 0x207 which contain + * link status information + */ +static bool +intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) +{ + return intel_dp_aux_native_read_retry(intel_dp, + DP_LANE0_1_STATUS, + link_status, + DP_LINK_STATUS_SIZE); +} + +#if 0 +static char *voltage_names[] = { + "0.4V", "0.6V", "0.8V", "1.2V" +}; +static char *pre_emph_names[] = { + "0dB", "3.5dB", "6dB", "9.5dB" +}; +static char *link_train_names[] = { + "pattern 1", "pattern 2", "idle", "off" +}; +#endif + +/* + * These are source-specific values; current Intel hardware supports + * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB + */ + +static uint8_t +intel_dp_voltage_max(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + + if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) + return DP_TRAIN_VOLTAGE_SWING_800; + else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) + return DP_TRAIN_VOLTAGE_SWING_1200; + else + return DP_TRAIN_VOLTAGE_SWING_800; +} + +static uint8_t +intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + + if (HAS_DDI(dev)) { + switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_400: + return DP_TRAIN_PRE_EMPHASIS_9_5; + case DP_TRAIN_VOLTAGE_SWING_600: + return DP_TRAIN_PRE_EMPHASIS_6; + case DP_TRAIN_VOLTAGE_SWING_800: + return DP_TRAIN_PRE_EMPHASIS_3_5; + case DP_TRAIN_VOLTAGE_SWING_1200: + default: + return DP_TRAIN_PRE_EMPHASIS_0; + } + } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { + switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_400: + return DP_TRAIN_PRE_EMPHASIS_6; + case DP_TRAIN_VOLTAGE_SWING_600: + case DP_TRAIN_VOLTAGE_SWING_800: + return DP_TRAIN_PRE_EMPHASIS_3_5; + default: + return DP_TRAIN_PRE_EMPHASIS_0; + } + } else { + switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_400: + return DP_TRAIN_PRE_EMPHASIS_6; + case DP_TRAIN_VOLTAGE_SWING_600: + return DP_TRAIN_PRE_EMPHASIS_6; + case DP_TRAIN_VOLTAGE_SWING_800: + return DP_TRAIN_PRE_EMPHASIS_3_5; + case DP_TRAIN_VOLTAGE_SWING_1200: + default: + return DP_TRAIN_PRE_EMPHASIS_0; + } + } +} + +static void +intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) +{ + uint8_t v = 0; + uint8_t p = 0; + int lane; + uint8_t voltage_max; + uint8_t preemph_max; + + for (lane = 0; lane < intel_dp->lane_count; lane++) { + uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); + uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); + + if (this_v > v) + v = this_v; + if (this_p > p) + p = this_p; + } + + voltage_max = intel_dp_voltage_max(intel_dp); + if (v >= voltage_max) + v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; + + preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); + if (p >= preemph_max) + p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + for (lane = 0; lane < 4; lane++) + intel_dp->train_set[lane] = v | p; +} + +static uint32_t +intel_gen4_signal_levels(uint8_t train_set) +{ + uint32_t signal_levels = 0; + + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_400: + default: + signal_levels |= DP_VOLTAGE_0_4; + break; + case DP_TRAIN_VOLTAGE_SWING_600: + signal_levels |= DP_VOLTAGE_0_6; + break; + case DP_TRAIN_VOLTAGE_SWING_800: + signal_levels |= DP_VOLTAGE_0_8; + break; + case DP_TRAIN_VOLTAGE_SWING_1200: + signal_levels |= DP_VOLTAGE_1_2; + break; + } + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPHASIS_0: + default: + signal_levels |= DP_PRE_EMPHASIS_0; + break; + case DP_TRAIN_PRE_EMPHASIS_3_5: + signal_levels |= DP_PRE_EMPHASIS_3_5; + break; + case DP_TRAIN_PRE_EMPHASIS_6: + signal_levels |= DP_PRE_EMPHASIS_6; + break; + case DP_TRAIN_PRE_EMPHASIS_9_5: + signal_levels |= DP_PRE_EMPHASIS_9_5; + break; + } + return signal_levels; +} + +/* Gen6's DP voltage swing and pre-emphasis control */ +static uint32_t +intel_gen6_edp_signal_levels(uint8_t train_set) +{ + int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | + DP_TRAIN_PRE_EMPHASIS_MASK); + switch (signal_levels) { + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: + case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: + return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: + return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: + case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: + return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; + case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: + case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: + return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; + case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: + case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: + return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; + default: + DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" + "0x%x\n", signal_levels); + return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; + } +} + +/* Gen7's DP voltage swing and pre-emphasis control */ +static uint32_t +intel_gen7_edp_signal_levels(uint8_t train_set) +{ + int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | + DP_TRAIN_PRE_EMPHASIS_MASK); + switch (signal_levels) { + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: + return EDP_LINK_TRAIN_400MV_0DB_IVB; + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: + return EDP_LINK_TRAIN_400MV_3_5DB_IVB; + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: + return EDP_LINK_TRAIN_400MV_6DB_IVB; + + case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: + return EDP_LINK_TRAIN_600MV_0DB_IVB; + case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: + return EDP_LINK_TRAIN_600MV_3_5DB_IVB; + + case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: + return EDP_LINK_TRAIN_800MV_0DB_IVB; + case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: + return EDP_LINK_TRAIN_800MV_3_5DB_IVB; + + default: + DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" + "0x%x\n", signal_levels); + return EDP_LINK_TRAIN_500MV_0DB_IVB; + } +} + +/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ +static uint32_t +intel_hsw_signal_levels(uint8_t train_set) +{ + int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | + DP_TRAIN_PRE_EMPHASIS_MASK); + switch (signal_levels) { + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: + return DDI_BUF_EMP_400MV_0DB_HSW; + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: + return DDI_BUF_EMP_400MV_3_5DB_HSW; + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: + return DDI_BUF_EMP_400MV_6DB_HSW; + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: + return DDI_BUF_EMP_400MV_9_5DB_HSW; + + case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: + return DDI_BUF_EMP_600MV_0DB_HSW; + case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: + return DDI_BUF_EMP_600MV_3_5DB_HSW; + case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: + return DDI_BUF_EMP_600MV_6DB_HSW; + + case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: + return DDI_BUF_EMP_800MV_0DB_HSW; + case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: + return DDI_BUF_EMP_800MV_3_5DB_HSW; + default: + DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" + "0x%x\n", signal_levels); + return DDI_BUF_EMP_400MV_0DB_HSW; + } +} + +/* Properly updates "DP" with the correct signal levels. */ +static void +intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + uint32_t signal_levels, mask; + uint8_t train_set = intel_dp->train_set[0]; + + if (HAS_DDI(dev)) { + signal_levels = intel_hsw_signal_levels(train_set); + mask = DDI_BUF_EMP_MASK; + } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { + signal_levels = intel_gen7_edp_signal_levels(train_set); + mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; + } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { + signal_levels = intel_gen6_edp_signal_levels(train_set); + mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; + } else { + signal_levels = intel_gen4_signal_levels(train_set); + mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; + } + + DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels); + + *DP = (*DP & ~mask) | signal_levels; +} + +static bool +intel_dp_set_link_train(struct intel_dp *intel_dp, + uint32_t dp_reg_value, + uint8_t dp_train_pat) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum port port = intel_dig_port->port; + int ret; + + if (HAS_DDI(dev)) { + uint32_t temp = I915_READ(DP_TP_CTL(port)); + + if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) + temp |= DP_TP_CTL_SCRAMBLE_DISABLE; + else + temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; + + temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; + switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { + case DP_TRAINING_PATTERN_DISABLE: + temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; + + break; + case DP_TRAINING_PATTERN_1: + temp |= DP_TP_CTL_LINK_TRAIN_PAT1; + break; + case DP_TRAINING_PATTERN_2: + temp |= DP_TP_CTL_LINK_TRAIN_PAT2; + break; + case DP_TRAINING_PATTERN_3: + temp |= DP_TP_CTL_LINK_TRAIN_PAT3; + break; + } + I915_WRITE(DP_TP_CTL(port), temp); + + } else if (HAS_PCH_CPT(dev) && + (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { + dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; + + switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { + case DP_TRAINING_PATTERN_DISABLE: + dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; + break; + case DP_TRAINING_PATTERN_1: + dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; + break; + case DP_TRAINING_PATTERN_2: + dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; + break; + case DP_TRAINING_PATTERN_3: + DRM_ERROR("DP training pattern 3 not supported\n"); + dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; + break; + } + + } else { + dp_reg_value &= ~DP_LINK_TRAIN_MASK; + + switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { + case DP_TRAINING_PATTERN_DISABLE: + dp_reg_value |= DP_LINK_TRAIN_OFF; + break; + case DP_TRAINING_PATTERN_1: + dp_reg_value |= DP_LINK_TRAIN_PAT_1; + break; + case DP_TRAINING_PATTERN_2: + dp_reg_value |= DP_LINK_TRAIN_PAT_2; + break; + case DP_TRAINING_PATTERN_3: + DRM_ERROR("DP training pattern 3 not supported\n"); + dp_reg_value |= DP_LINK_TRAIN_PAT_2; + break; + } + } + + I915_WRITE(intel_dp->output_reg, dp_reg_value); + POSTING_READ(intel_dp->output_reg); + + intel_dp_aux_native_write_1(intel_dp, + DP_TRAINING_PATTERN_SET, + dp_train_pat); + + if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != + DP_TRAINING_PATTERN_DISABLE) { + ret = intel_dp_aux_native_write(intel_dp, + DP_TRAINING_LANE0_SET, + intel_dp->train_set, + intel_dp->lane_count); + if (ret != intel_dp->lane_count) + return false; + } + + return true; +} + +static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum port port = intel_dig_port->port; + uint32_t val; + + if (!HAS_DDI(dev)) + return; + + val = I915_READ(DP_TP_CTL(port)); + val &= ~DP_TP_CTL_LINK_TRAIN_MASK; + val |= DP_TP_CTL_LINK_TRAIN_IDLE; + I915_WRITE(DP_TP_CTL(port), val); + + /* + * On PORT_A we can have only eDP in SST mode. There the only reason + * we need to set idle transmission mode is to work around a HW issue + * where we enable the pipe while not in idle link-training mode. + * In this case there is requirement to wait for a minimum number of + * idle patterns to be sent. + */ + if (port == PORT_A) + return; + + if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE), + 1)) + DRM_ERROR("Timed out waiting for DP idle patterns\n"); +} + +/* Enable corresponding port and start training pattern 1 */ +void +intel_dp_start_link_train(struct intel_dp *intel_dp) +{ + struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; + struct drm_device *dev = encoder->dev; + int i; + uint8_t voltage; + bool clock_recovery = false; + int voltage_tries, loop_tries; + uint32_t DP = intel_dp->DP; + + if (HAS_DDI(dev)) + intel_ddi_prepare_link_retrain(encoder); + + /* Write the link configuration data */ + intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, + intel_dp->link_configuration, + DP_LINK_CONFIGURATION_SIZE); + + DP |= DP_PORT_EN; + + memset(intel_dp->train_set, 0, 4); + voltage = 0xff; + voltage_tries = 0; + loop_tries = 0; + clock_recovery = false; + for (;;) { + /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ + uint8_t link_status[DP_LINK_STATUS_SIZE]; + + intel_dp_set_signal_levels(intel_dp, &DP); + + /* Set training pattern 1 */ + if (!intel_dp_set_link_train(intel_dp, DP, + DP_TRAINING_PATTERN_1 | + DP_LINK_SCRAMBLING_DISABLE)) + break; + + drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); + if (!intel_dp_get_link_status(intel_dp, link_status)) { + DRM_ERROR("failed to get link status\n"); + break; + } + + if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { + DRM_DEBUG_KMS("clock recovery OK\n"); + clock_recovery = true; + break; + } + + /* Check to see if we've tried the max voltage */ + for (i = 0; i < intel_dp->lane_count; i++) + if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) + break; + if (i == intel_dp->lane_count) { + ++loop_tries; + if (loop_tries == 5) { + DRM_DEBUG_KMS("too many full retries, give up\n"); + break; + } + memset(intel_dp->train_set, 0, 4); + voltage_tries = 0; + continue; + } + + /* Check to see if we've tried the same voltage 5 times */ + if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { + ++voltage_tries; + if (voltage_tries == 5) { + DRM_DEBUG_KMS("too many voltage retries, give up\n"); + break; + } + } else + voltage_tries = 0; + voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* Compute new intel_dp->train_set as requested by target */ + intel_get_adjust_train(intel_dp, link_status); + } + + intel_dp->DP = DP; +} + +void +intel_dp_complete_link_train(struct intel_dp *intel_dp) +{ + bool channel_eq = false; + int tries, cr_tries; + uint32_t DP = intel_dp->DP; + + /* channel equalization */ + tries = 0; + cr_tries = 0; + channel_eq = false; + for (;;) { + uint8_t link_status[DP_LINK_STATUS_SIZE]; + + if (cr_tries > 5) { + DRM_ERROR("failed to train DP, aborting\n"); + intel_dp_link_down(intel_dp); + break; + } + + intel_dp_set_signal_levels(intel_dp, &DP); + + /* channel eq pattern */ + if (!intel_dp_set_link_train(intel_dp, DP, + DP_TRAINING_PATTERN_2 | + DP_LINK_SCRAMBLING_DISABLE)) + break; + + drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); + if (!intel_dp_get_link_status(intel_dp, link_status)) + break; + + /* Make sure clock is still ok */ + if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { + intel_dp_start_link_train(intel_dp); + cr_tries++; + continue; + } + + if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { + channel_eq = true; + break; + } + + /* Try 5 times, then try clock recovery if that fails */ + if (tries > 5) { + intel_dp_link_down(intel_dp); + intel_dp_start_link_train(intel_dp); + tries = 0; + cr_tries++; + continue; + } + + /* Compute new intel_dp->train_set as requested by target */ + intel_get_adjust_train(intel_dp, link_status); + ++tries; + } + + intel_dp_set_idle_link_train(intel_dp); + + intel_dp->DP = DP; + + if (channel_eq) + DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); + +} + +void intel_dp_stop_link_train(struct intel_dp *intel_dp) +{ + intel_dp_set_link_train(intel_dp, intel_dp->DP, + DP_TRAINING_PATTERN_DISABLE); +} + +static void +intel_dp_link_down(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = + to_intel_crtc(intel_dig_port->base.base.crtc); + uint32_t DP = intel_dp->DP; + + /* + * DDI code has a strict mode set sequence and we should try to respect + * it, otherwise we might hang the machine in many different ways. So we + * really should be disabling the port only on a complete crtc_disable + * sequence. This function is just called under two conditions on DDI + * code: + * - Link train failed while doing crtc_enable, and on this case we + * really should respect the mode set sequence and wait for a + * crtc_disable. + * - Someone turned the monitor off and intel_dp_check_link_status + * called us. We don't need to disable the whole port on this case, so + * when someone turns the monitor on again, + * intel_ddi_prepare_link_retrain will take care of redoing the link + * train. + */ + if (HAS_DDI(dev)) + return; + + if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) + return; + + DRM_DEBUG_KMS("\n"); + + if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { + DP &= ~DP_LINK_TRAIN_MASK_CPT; + I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); + } else { + DP &= ~DP_LINK_TRAIN_MASK; + I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); + } + POSTING_READ(intel_dp->output_reg); + + /* We don't really know why we're doing this */ + intel_wait_for_vblank(dev, intel_crtc->pipe); + + if (HAS_PCH_IBX(dev) && + I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { + struct drm_crtc *crtc = intel_dig_port->base.base.crtc; + + /* Hardware workaround: leaving our transcoder select + * set to transcoder B while it's off will prevent the + * corresponding HDMI output on transcoder A. + * + * Combine this with another hardware workaround: + * transcoder select bit can only be cleared while the + * port is enabled. + */ + DP &= ~DP_PIPEB_SELECT; + I915_WRITE(intel_dp->output_reg, DP); + + /* Changes to enable or select take place the vblank + * after being written. + */ + if (WARN_ON(crtc == NULL)) { + /* We should never try to disable a port without a crtc + * attached. For paranoia keep the code around for a + * bit. */ + POSTING_READ(intel_dp->output_reg); + msleep(50); + } else + intel_wait_for_vblank(dev, intel_crtc->pipe); + } + + DP &= ~DP_AUDIO_OUTPUT_ENABLE; + I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); + POSTING_READ(intel_dp->output_reg); + msleep(intel_dp->panel_power_down_delay); +} + +static bool +intel_dp_get_dpcd(struct intel_dp *intel_dp) +{ + char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; + + if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, + sizeof(intel_dp->dpcd)) == 0) + return false; /* aux transfer failed */ + + hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), + 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); + DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); + + if (intel_dp->dpcd[DP_DPCD_REV] == 0) + return false; /* DPCD not present */ + + if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DWN_STRM_PORT_PRESENT)) + return true; /* native DP sink */ + + if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) + return true; /* no per-port downstream info */ + + if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, + intel_dp->downstream_ports, + DP_MAX_DOWNSTREAM_PORTS) == 0) + return false; /* downstream port status fetch failed */ + + return true; +} + +static void +intel_dp_probe_oui(struct intel_dp *intel_dp) +{ + u8 buf[3]; + + if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) + return; + + ironlake_edp_panel_vdd_on(intel_dp); + + if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) + DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", + buf[0], buf[1], buf[2]); + + if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) + DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", + buf[0], buf[1], buf[2]); + + ironlake_edp_panel_vdd_off(intel_dp, false); +} + +static bool +intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) +{ + int ret; + + ret = intel_dp_aux_native_read_retry(intel_dp, + DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector, 1); + if (!ret) + return false; + + return true; +} + +static void +intel_dp_handle_test_request(struct intel_dp *intel_dp) +{ + /* NAK by default */ + intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); +} + +/* + * According to DP spec + * 5.1.2: + * 1. Read DPCD + * 2. Configure link according to Receiver Capabilities + * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 + * 4. Check link status on receipt of hot-plug interrupt + */ + +void +intel_dp_check_link_status(struct intel_dp *intel_dp) +{ + struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; + u8 sink_irq_vector; + u8 link_status[DP_LINK_STATUS_SIZE]; + + if (!intel_encoder->connectors_active) + return; + + if (WARN_ON(!intel_encoder->base.crtc)) + return; + + /* Try to read receiver status if the link appears to be up */ + if (!intel_dp_get_link_status(intel_dp, link_status)) { + intel_dp_link_down(intel_dp); + return; + } + + /* Now read the DPCD to see if it's actually running */ + if (!intel_dp_get_dpcd(intel_dp)) { + intel_dp_link_down(intel_dp); + return; + } + + /* Try to read the source of the interrupt */ + if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && + intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { + /* Clear interrupt source */ + intel_dp_aux_native_write_1(intel_dp, + DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector); + + if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) + intel_dp_handle_test_request(intel_dp); + if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) + DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); + } + + if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { + DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", + drm_get_encoder_name(&intel_encoder->base)); + intel_dp_start_link_train(intel_dp); + intel_dp_complete_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp); + } +} + +/* XXX this is probably wrong for multiple downstream ports */ +static enum drm_connector_status +intel_dp_detect_dpcd(struct intel_dp *intel_dp) +{ + uint8_t *dpcd = intel_dp->dpcd; + bool hpd; + uint8_t type; + + if (!intel_dp_get_dpcd(intel_dp)) + return connector_status_disconnected; + + /* if there's no downstream port, we're done */ + if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) + return connector_status_connected; + + /* If we're HPD-aware, SINK_COUNT changes dynamically */ + hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); + if (hpd) { + uint8_t reg; + if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, + ®, 1)) + return connector_status_unknown; + return DP_GET_SINK_COUNT(reg) ? connector_status_connected + : connector_status_disconnected; + } + + /* If no HPD, poke DDC gently */ + if (drm_probe_ddc(&intel_dp->adapter)) + return connector_status_connected; + + /* Well we tried, say unknown for unreliable port types */ + type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; + if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) + return connector_status_unknown; + + /* Anything else is out of spec, warn and ignore */ + DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); + return connector_status_disconnected; +} + +static enum drm_connector_status +ironlake_dp_detect(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + enum drm_connector_status status; + + /* Can't disconnect eDP, but you can close the lid... */ + if (is_edp(intel_dp)) { + status = intel_panel_detect(dev); + if (status == connector_status_unknown) + status = connector_status_connected; + return status; + } + + if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) + return connector_status_disconnected; + + return intel_dp_detect_dpcd(intel_dp); +} + +static enum drm_connector_status +g4x_dp_detect(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + uint32_t bit; + + /* Can't disconnect eDP, but you can close the lid... */ + if (is_edp(intel_dp)) { + enum drm_connector_status status; + + status = intel_panel_detect(dev); + if (status == connector_status_unknown) + status = connector_status_connected; + return status; + } + + switch (intel_dig_port->port) { + case PORT_B: + bit = PORTB_HOTPLUG_LIVE_STATUS; + break; + case PORT_C: + bit = PORTC_HOTPLUG_LIVE_STATUS; + break; + case PORT_D: + bit = PORTD_HOTPLUG_LIVE_STATUS; + break; + default: + return connector_status_unknown; + } + + if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) + return connector_status_disconnected; + + return intel_dp_detect_dpcd(intel_dp); +} + +static struct edid * +intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + + /* use cached edid if we have one */ + if (intel_connector->edid) { + struct edid *edid; + int size; + + /* invalid edid */ + if (IS_ERR(intel_connector->edid)) + return NULL; + + size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; + edid = kmalloc(size, GFP_KERNEL); + if (!edid) + return NULL; + + memcpy(edid, intel_connector->edid, size); + return edid; + } + + return drm_get_edid(connector, adapter); +} + +static int +intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + + /* use cached edid if we have one */ + if (intel_connector->edid) { + /* invalid edid */ + if (IS_ERR(intel_connector->edid)) + return 0; + + return intel_connector_update_modes(connector, + intel_connector->edid); + } + + return intel_ddc_get_modes(connector, adapter); +} + +static enum drm_connector_status +intel_dp_detect(struct drm_connector *connector, bool force) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *intel_encoder = &intel_dig_port->base; + struct drm_device *dev = connector->dev; + enum drm_connector_status status; + struct edid *edid = NULL; + + intel_dp->has_audio = false; + + if (HAS_PCH_SPLIT(dev)) + status = ironlake_dp_detect(intel_dp); + else + status = g4x_dp_detect(intel_dp); + + if (status != connector_status_connected) + return status; + + intel_dp_probe_oui(intel_dp); + + if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { + intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); + } else { + edid = intel_dp_get_edid(connector, &intel_dp->adapter); + if (edid) { + intel_dp->has_audio = drm_detect_monitor_audio(edid); + kfree(edid); + } + } + + if (intel_encoder->type != INTEL_OUTPUT_EDP) + intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + return connector_status_connected; +} + +static int intel_dp_get_modes(struct drm_connector *connector) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_device *dev = connector->dev; + int ret; + + /* We should parse the EDID data and find out if it has an audio sink + */ + + ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); + if (ret) + return ret; + + /* if eDP has no EDID, fall back to fixed mode */ + if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { + struct drm_display_mode *mode; + mode = drm_mode_duplicate(dev, + intel_connector->panel.fixed_mode); + if (mode) { + drm_mode_probed_add(connector, mode); + return 1; + } + } + return 0; +} + +static bool +intel_dp_detect_audio(struct drm_connector *connector) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct edid *edid; + bool has_audio = false; + + edid = intel_dp_get_edid(connector, &intel_dp->adapter); + if (edid) { + has_audio = drm_detect_monitor_audio(edid); + kfree(edid); + } + + return has_audio; +} + +static int +intel_dp_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_encoder *intel_encoder = intel_attached_encoder(connector); + struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + int ret; + + ret = drm_object_property_set_value(&connector->base, property, val); + if (ret) + return ret; + + if (property == dev_priv->force_audio_property) { + int i = val; + bool has_audio; + + if (i == intel_dp->force_audio) + return 0; + + intel_dp->force_audio = i; + + if (i == HDMI_AUDIO_AUTO) + has_audio = intel_dp_detect_audio(connector); + else + has_audio = (i == HDMI_AUDIO_ON); + + if (has_audio == intel_dp->has_audio) + return 0; + + intel_dp->has_audio = has_audio; + goto done; + } + + if (property == dev_priv->broadcast_rgb_property) { + bool old_auto = intel_dp->color_range_auto; + uint32_t old_range = intel_dp->color_range; + + switch (val) { + case INTEL_BROADCAST_RGB_AUTO: + intel_dp->color_range_auto = true; + break; + case INTEL_BROADCAST_RGB_FULL: + intel_dp->color_range_auto = false; + intel_dp->color_range = 0; + break; + case INTEL_BROADCAST_RGB_LIMITED: + intel_dp->color_range_auto = false; + intel_dp->color_range = DP_COLOR_RANGE_16_235; + break; + default: + return -EINVAL; + } + + if (old_auto == intel_dp->color_range_auto && + old_range == intel_dp->color_range) + return 0; + + goto done; + } + + if (is_edp(intel_dp) && + property == connector->dev->mode_config.scaling_mode_property) { + if (val == DRM_MODE_SCALE_NONE) { + DRM_DEBUG_KMS("no scaling not supported\n"); + return -EINVAL; + } + + if (intel_connector->panel.fitting_mode == val) { + /* the eDP scaling property is not changed */ + return 0; + } + intel_connector->panel.fitting_mode = val; + + goto done; + } + + return -EINVAL; + +done: + if (intel_encoder->base.crtc) + intel_crtc_restore_mode(intel_encoder->base.crtc); + + return 0; +} + +static void +intel_dp_destroy(struct drm_connector *connector) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_connector *intel_connector = to_intel_connector(connector); + + if (!IS_ERR_OR_NULL(intel_connector->edid)) + kfree(intel_connector->edid); + + if (is_edp(intel_dp)) + intel_panel_fini(&intel_connector->panel); + + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + +void intel_dp_encoder_destroy(struct drm_encoder *encoder) +{ + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = intel_dp_to_dev(intel_dp); + + i2c_del_adapter(&intel_dp->adapter); + drm_encoder_cleanup(encoder); + if (is_edp(intel_dp)) { + cancel_delayed_work_sync(&intel_dp->panel_vdd_work); + mutex_lock(&dev->mode_config.mutex); + ironlake_panel_vdd_off_sync(intel_dp); + mutex_unlock(&dev->mode_config.mutex); + } + kfree(intel_dig_port); +} + +static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { + .mode_set = intel_dp_mode_set, +}; + +static const struct drm_connector_funcs intel_dp_connector_funcs = { + .dpms = intel_connector_dpms, + .detect = intel_dp_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = intel_dp_set_property, + .destroy = intel_dp_destroy, +}; + +static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { + .get_modes = intel_dp_get_modes, + .mode_valid = intel_dp_mode_valid, + .best_encoder = intel_best_encoder, +}; + +static const struct drm_encoder_funcs intel_dp_enc_funcs = { + .destroy = intel_dp_encoder_destroy, +}; + +static void +intel_dp_hot_plug(struct intel_encoder *intel_encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + + intel_dp_check_link_status(intel_dp); +} + +/* Return which DP Port should be selected for Transcoder DP control */ +int +intel_trans_dp_port_sel(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct intel_encoder *intel_encoder; + struct intel_dp *intel_dp; + + for_each_encoder_on_crtc(dev, crtc, intel_encoder) { + intel_dp = enc_to_intel_dp(&intel_encoder->base); + + if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || + intel_encoder->type == INTEL_OUTPUT_EDP) + return intel_dp->output_reg; + } + + return -1; +} + +/* check the VBT to see whether the eDP is on DP-D port */ +bool intel_dpd_is_edp(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct child_device_config *p_child; + int i; + + if (!dev_priv->child_dev_num) + return false; + + for (i = 0; i < dev_priv->child_dev_num; i++) { + p_child = dev_priv->child_dev + i; + + if (p_child->dvo_port == PORT_IDPD && + p_child->device_type == DEVICE_TYPE_eDP) + return true; + } + return false; +} + +static void +intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + + intel_attach_force_audio_property(connector); + intel_attach_broadcast_rgb_property(connector); + intel_dp->color_range_auto = true; + + if (is_edp(intel_dp)) { + drm_mode_create_scaling_mode_property(connector->dev); + drm_object_attach_property( + &connector->base, + connector->dev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_ASPECT); + intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; + } +} + +static void +intel_dp_init_panel_power_sequencer(struct drm_device *dev, + struct intel_dp *intel_dp, + struct edp_power_seq *out) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct edp_power_seq cur, vbt, spec, final; + u32 pp_on, pp_off, pp_div, pp; + int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg; + + if (HAS_PCH_SPLIT(dev)) { + pp_control_reg = PCH_PP_CONTROL; + pp_on_reg = PCH_PP_ON_DELAYS; + pp_off_reg = PCH_PP_OFF_DELAYS; + pp_div_reg = PCH_PP_DIVISOR; + } else { + pp_control_reg = PIPEA_PP_CONTROL; + pp_on_reg = PIPEA_PP_ON_DELAYS; + pp_off_reg = PIPEA_PP_OFF_DELAYS; + pp_div_reg = PIPEA_PP_DIVISOR; + } + + /* Workaround: Need to write PP_CONTROL with the unlock key as + * the very first thing. */ + pp = ironlake_get_pp_control(intel_dp); + I915_WRITE(pp_control_reg, pp); + + pp_on = I915_READ(pp_on_reg); + pp_off = I915_READ(pp_off_reg); + pp_div = I915_READ(pp_div_reg); + + /* Pull timing values out of registers */ + cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> + PANEL_POWER_UP_DELAY_SHIFT; + + cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> + PANEL_LIGHT_ON_DELAY_SHIFT; + + cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> + PANEL_LIGHT_OFF_DELAY_SHIFT; + + cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> + PANEL_POWER_DOWN_DELAY_SHIFT; + + cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> + PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; + + DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", + cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); + + vbt = dev_priv->edp.pps; + + /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of + * our hw here, which are all in 100usec. */ + spec.t1_t3 = 210 * 10; + spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ + spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ + spec.t10 = 500 * 10; + /* This one is special and actually in units of 100ms, but zero + * based in the hw (so we need to add 100 ms). But the sw vbt + * table multiplies it with 1000 to make it in units of 100usec, + * too. */ + spec.t11_t12 = (510 + 100) * 10; + + DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", + vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); + + /* Use the max of the register settings and vbt. If both are + * unset, fall back to the spec limits. */ +#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ + spec.field : \ + max(cur.field, vbt.field)) + assign_final(t1_t3); + assign_final(t8); + assign_final(t9); + assign_final(t10); + assign_final(t11_t12); +#undef assign_final + +#define get_delay(field) (DIV_ROUND_UP(final.field, 10)) + intel_dp->panel_power_up_delay = get_delay(t1_t3); + intel_dp->backlight_on_delay = get_delay(t8); + intel_dp->backlight_off_delay = get_delay(t9); + intel_dp->panel_power_down_delay = get_delay(t10); + intel_dp->panel_power_cycle_delay = get_delay(t11_t12); +#undef get_delay + + DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", + intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, + intel_dp->panel_power_cycle_delay); + + DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", + intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); + + if (out) + *out = final; +} + +static void +intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, + struct intel_dp *intel_dp, + struct edp_power_seq *seq) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp_on, pp_off, pp_div, port_sel = 0; + int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev); + int pp_on_reg, pp_off_reg, pp_div_reg; + + if (HAS_PCH_SPLIT(dev)) { + pp_on_reg = PCH_PP_ON_DELAYS; + pp_off_reg = PCH_PP_OFF_DELAYS; + pp_div_reg = PCH_PP_DIVISOR; + } else { + pp_on_reg = PIPEA_PP_ON_DELAYS; + pp_off_reg = PIPEA_PP_OFF_DELAYS; + pp_div_reg = PIPEA_PP_DIVISOR; + } + + if (IS_VALLEYVIEW(dev)) + port_sel = I915_READ(pp_on_reg) & 0xc0000000; + + /* And finally store the new values in the power sequencer. */ + pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | + (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); + pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | + (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); + /* Compute the divisor for the pp clock, simply match the Bspec + * formula. */ + pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT; + pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) + << PANEL_POWER_CYCLE_DELAY_SHIFT); + + /* Haswell doesn't have any port selection bits for the panel + * power sequencer any more. */ + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { + if (is_cpu_edp(intel_dp)) + port_sel = PANEL_POWER_PORT_DP_A; + else + port_sel = PANEL_POWER_PORT_DP_D; + } + + pp_on |= port_sel; + + I915_WRITE(pp_on_reg, pp_on); + I915_WRITE(pp_off_reg, pp_off); + I915_WRITE(pp_div_reg, pp_div); + + DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", + I915_READ(pp_on_reg), + I915_READ(pp_off_reg), + I915_READ(pp_div_reg)); +} + +void +intel_dp_init_connector(struct intel_digital_port *intel_dig_port, + struct intel_connector *intel_connector) +{ + struct drm_connector *connector = &intel_connector->base; + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_encoder *intel_encoder = &intel_dig_port->base; + struct drm_device *dev = intel_encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_display_mode *fixed_mode = NULL; + struct edp_power_seq power_seq = { 0 }; + enum port port = intel_dig_port->port; + const char *name = NULL; + int type; + + /* Preserve the current hw state. */ + intel_dp->DP = I915_READ(intel_dp->output_reg); + intel_dp->attached_connector = intel_connector; + + if (HAS_PCH_SPLIT(dev) && port == PORT_D) + if (intel_dpd_is_edp(dev)) + intel_dp->is_pch_edp = true; + + /* + * FIXME : We need to initialize built-in panels before external panels. + * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup + */ + if (IS_VALLEYVIEW(dev) && port == PORT_C) { + type = DRM_MODE_CONNECTOR_eDP; + intel_encoder->type = INTEL_OUTPUT_EDP; + } else if (port == PORT_A || is_pch_edp(intel_dp)) { + type = DRM_MODE_CONNECTOR_eDP; + intel_encoder->type = INTEL_OUTPUT_EDP; + } else { + /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for + * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't + * rewrite it. + */ + type = DRM_MODE_CONNECTOR_DisplayPort; + } + + drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); + drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); + + connector->interlace_allowed = true; + connector->doublescan_allowed = 0; + + INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, + ironlake_panel_vdd_work); + + intel_connector_attach_encoder(intel_connector, intel_encoder); + drm_sysfs_connector_add(connector); + + if (HAS_DDI(dev)) + intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; + else + intel_connector->get_hw_state = intel_connector_get_hw_state; + + intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; + if (HAS_DDI(dev)) { + switch (intel_dig_port->port) { + case PORT_A: + intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; + break; + case PORT_B: + intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; + break; + case PORT_C: + intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; + break; + case PORT_D: + intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; + break; + default: + BUG(); + } + } + + /* Set up the DDC bus. */ + switch (port) { + case PORT_A: + intel_encoder->hpd_pin = HPD_PORT_A; + name = "DPDDC-A"; + break; + case PORT_B: + intel_encoder->hpd_pin = HPD_PORT_B; + name = "DPDDC-B"; + break; + case PORT_C: + intel_encoder->hpd_pin = HPD_PORT_C; + name = "DPDDC-C"; + break; + case PORT_D: + intel_encoder->hpd_pin = HPD_PORT_D; + name = "DPDDC-D"; + break; + default: + BUG(); + } + + if (is_edp(intel_dp)) + intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); + + intel_dp_i2c_init(intel_dp, intel_connector, name); + + /* Cache DPCD and EDID for edp. */ + if (is_edp(intel_dp)) { + bool ret; + struct drm_display_mode *scan; + struct edid *edid; + + ironlake_edp_panel_vdd_on(intel_dp); + ret = intel_dp_get_dpcd(intel_dp); + ironlake_edp_panel_vdd_off(intel_dp, false); + + if (ret) { + if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) + dev_priv->no_aux_handshake = + intel_dp->dpcd[DP_MAX_DOWNSPREAD] & + DP_NO_AUX_HANDSHAKE_LINK_TRAINING; + } else { + /* if this fails, presume the device is a ghost */ + DRM_INFO("failed to retrieve link info, disabling eDP\n"); + intel_dp_encoder_destroy(&intel_encoder->base); + intel_dp_destroy(connector); + return; + } + + /* We now know it's not a ghost, init power sequence regs. */ + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, + &power_seq); + + ironlake_edp_panel_vdd_on(intel_dp); + edid = drm_get_edid(connector, &intel_dp->adapter); + if (edid) { + if (drm_add_edid_modes(connector, edid)) { + drm_mode_connector_update_edid_property(connector, edid); + drm_edid_to_eld(connector, edid); + } else { + kfree(edid); + edid = ERR_PTR(-EINVAL); + } + } else { + edid = ERR_PTR(-ENOENT); + } + intel_connector->edid = edid; + + /* prefer fixed mode from EDID if available */ + list_for_each_entry(scan, &connector->probed_modes, head) { + if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { + fixed_mode = drm_mode_duplicate(dev, scan); + break; + } + } + + /* fallback to VBT if available for eDP */ + if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) { + fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); + if (fixed_mode) + fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + } + + ironlake_edp_panel_vdd_off(intel_dp, false); + } + + if (is_edp(intel_dp)) { + intel_panel_init(&intel_connector->panel, fixed_mode); + intel_panel_setup_backlight(connector); + } + + intel_dp_add_properties(intel_dp, connector); + + /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written + * 0xd. Failure to do so will result in spurious interrupts being + * generated on the port when a cable is not attached. + */ + if (IS_G4X(dev) && !IS_GM45(dev)) { + u32 temp = I915_READ(PEG_BAND_GAP_DATA); + I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); + } +} + +void +intel_dp_init(struct drm_device *dev, int output_reg, enum port port) +{ + struct intel_digital_port *intel_dig_port; + struct intel_encoder *intel_encoder; + struct drm_encoder *encoder; + struct intel_connector *intel_connector; + + intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); + if (!intel_dig_port) + return; + + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(intel_dig_port); + return; + } + + intel_encoder = &intel_dig_port->base; + encoder = &intel_encoder->base; + + drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); + + intel_encoder->compute_config = intel_dp_compute_config; + intel_encoder->enable = intel_enable_dp; + intel_encoder->pre_enable = intel_pre_enable_dp; + intel_encoder->disable = intel_disable_dp; + intel_encoder->post_disable = intel_post_disable_dp; + intel_encoder->get_hw_state = intel_dp_get_hw_state; + + intel_dig_port->port = port; + intel_dig_port->dp.output_reg = output_reg; + + intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + intel_encoder->cloneable = false; + intel_encoder->hot_plug = intel_dp_hot_plug; + + intel_dp_init_connector(intel_dig_port, intel_connector); +} diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h new file mode 100644 index 0000000..7cd5584 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -0,0 +1,732 @@ +/* + * Copyright (c) 2006 Dave Airlie + * Copyright (c) 2007-2008 Intel Corporation + * Jesse Barnes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ +#ifndef __INTEL_DRV_H__ +#define __INTEL_DRV_H__ + +#include +#include +#include "i915_drv.h" +#include +#include +#include +#include + +/** + * _wait_for - magic (register) wait macro + * + * Does the right thing for modeset paths when run under kdgb or similar atomic + * contexts. Note that it's important that we check the condition again after + * having timed out, since the timeout could be due to preemption or similar and + * we've never had a chance to check the condition before the timeout. + */ +#define _wait_for(COND, MS, W) ({ \ + unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ + int ret__ = 0; \ + while (!(COND)) { \ + if (time_after(jiffies, timeout__)) { \ + if (!(COND)) \ + ret__ = -ETIMEDOUT; \ + break; \ + } \ + if (W && drm_can_sleep()) { \ + msleep(W); \ + } else { \ + cpu_relax(); \ + } \ + } \ + ret__; \ +}) + +#define wait_for(COND, MS) _wait_for(COND, MS, 1) +#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) +#define wait_for_atomic_us(COND, US) _wait_for((COND), \ + DIV_ROUND_UP((US), 1000), 0) + +#define KHz(x) (1000*x) +#define MHz(x) KHz(1000*x) + +/* + * Display related stuff + */ + +/* store information about an Ixxx DVO */ +/* The i830->i865 use multiple DVOs with multiple i2cs */ +/* the i915, i945 have a single sDVO i2c bus - which is different */ +#define MAX_OUTPUTS 6 +/* maximum connectors per crtcs in the mode set */ +#define INTELFB_CONN_LIMIT 4 + +#define INTEL_I2C_BUS_DVO 1 +#define INTEL_I2C_BUS_SDVO 2 + +/* these are outputs from the chip - integrated only + external chips are via DVO or SDVO output */ +#define INTEL_OUTPUT_UNUSED 0 +#define INTEL_OUTPUT_ANALOG 1 +#define INTEL_OUTPUT_DVO 2 +#define INTEL_OUTPUT_SDVO 3 +#define INTEL_OUTPUT_LVDS 4 +#define INTEL_OUTPUT_TVOUT 5 +#define INTEL_OUTPUT_HDMI 6 +#define INTEL_OUTPUT_DISPLAYPORT 7 +#define INTEL_OUTPUT_EDP 8 +#define INTEL_OUTPUT_UNKNOWN 9 + +#define INTEL_DVO_CHIP_NONE 0 +#define INTEL_DVO_CHIP_LVDS 1 +#define INTEL_DVO_CHIP_TMDS 2 +#define INTEL_DVO_CHIP_TVOUT 4 + +struct intel_framebuffer { + struct drm_framebuffer base; + struct drm_i915_gem_object *obj; +}; + +struct intel_fbdev { + struct drm_fb_helper helper; + struct intel_framebuffer ifb; + struct list_head fbdev_list; + struct drm_display_mode *our_mode; +}; + +struct intel_encoder { + struct drm_encoder base; + /* + * The new crtc this encoder will be driven from. Only differs from + * base->crtc while a modeset is in progress. + */ + struct intel_crtc *new_crtc; + + int type; + bool needs_tv_clock; + /* + * Intel hw has only one MUX where encoders could be clone, hence a + * simple flag is enough to compute the possible_clones mask. + */ + bool cloneable; + bool connectors_active; + void (*hot_plug)(struct intel_encoder *); + bool (*compute_config)(struct intel_encoder *, + struct intel_crtc_config *); + void (*pre_pll_enable)(struct intel_encoder *); + void (*pre_enable)(struct intel_encoder *); + void (*enable)(struct intel_encoder *); + void (*mode_set)(struct intel_encoder *intel_encoder); + void (*disable)(struct intel_encoder *); + void (*post_disable)(struct intel_encoder *); + /* Read out the current hw state of this connector, returning true if + * the encoder is active. If the encoder is enabled it also set the pipe + * it is connected to in the pipe parameter. */ + bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe); + int crtc_mask; + enum hpd_pin hpd_pin; +}; + +struct intel_panel { + struct drm_display_mode *fixed_mode; + int fitting_mode; +}; + +struct intel_connector { + struct drm_connector base; + /* + * The fixed encoder this connector is connected to. + */ + struct intel_encoder *encoder; + + /* + * The new encoder this connector will be driven. Only differs from + * encoder while a modeset is in progress. + */ + struct intel_encoder *new_encoder; + + /* Reads out the current hw, returning true if the connector is enabled + * and active (i.e. dpms ON state). */ + bool (*get_hw_state)(struct intel_connector *); + + /* Panel info for eDP and LVDS */ + struct intel_panel panel; + + /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */ + struct edid *edid; + + /* since POLL and HPD connectors may use the same HPD line keep the native + state of connector->polled in case hotplug storm detection changes it */ + u8 polled; +}; + +struct intel_crtc_config { + struct drm_display_mode requested_mode; + struct drm_display_mode adjusted_mode; + /* This flag must be set by the encoder's compute_config callback if it + * changes the crtc timings in the mode to prevent the crtc fixup from + * overwriting them. Currently only lvds needs that. */ + bool timings_set; + /* Whether to set up the PCH/FDI. Note that we never allow sharing + * between pch encoders and cpu encoders. */ + bool has_pch_encoder; + + /* CPU Transcoder for the pipe. Currently this can only differ from the + * pipe on Haswell (where we have a special eDP transcoder). */ + enum transcoder cpu_transcoder; + + /* + * Use reduced/limited/broadcast rbg range, compressing from the full + * range fed into the crtcs. + */ + bool limited_color_range; + + /* DP has a bunch of special case unfortunately, so mark the pipe + * accordingly. */ + bool has_dp_encoder; + bool dither; + + /* Controls for the clock computation, to override various stages. */ + bool clock_set; + + /* Settings for the intel dpll used on pretty much everything but + * haswell. */ + struct dpll { + unsigned n; + unsigned m1, m2; + unsigned p1, p2; + } dpll; + + int pipe_bpp; + struct intel_link_m_n dp_m_n; + /** + * This is currently used by DP and HDMI encoders since those can have a + * target pixel clock != the port link clock (which is currently stored + * in adjusted_mode->clock). + */ + int pixel_target_clock; + /* Used by SDVO (and if we ever fix it, HDMI). */ + unsigned pixel_multiplier; +}; + +struct intel_crtc { + struct drm_crtc base; + enum pipe pipe; + enum plane plane; + u8 lut_r[256], lut_g[256], lut_b[256]; + /* + * Whether the crtc and the connected output pipeline is active. Implies + * that crtc->enabled is set, i.e. the current mode configuration has + * some outputs connected to this crtc. + */ + bool active; + bool eld_vld; + bool primary_disabled; /* is the crtc obscured by a plane? */ + bool lowfreq_avail; + struct intel_overlay *overlay; + struct intel_unpin_work *unpin_work; + int fdi_lanes; + + atomic_t unpin_work_count; + + /* Display surface base address adjustement for pageflips. Note that on + * gen4+ this only adjusts up to a tile, offsets within a tile are + * handled in the hw itself (with the TILEOFF register). */ + unsigned long dspaddr_offset; + + struct drm_i915_gem_object *cursor_bo; + uint32_t cursor_addr; + int16_t cursor_x, cursor_y; + int16_t cursor_width, cursor_height; + bool cursor_visible; + + struct intel_crtc_config config; + + /* We can share PLLs across outputs if the timings match */ + struct intel_pch_pll *pch_pll; + uint32_t ddi_pll_sel; + + /* reset counter value when the last flip was submitted */ + unsigned int reset_counter; +}; + +struct intel_plane { + struct drm_plane base; + int plane; + enum pipe pipe; + struct drm_i915_gem_object *obj; + bool can_scale; + int max_downscale; + u32 lut_r[1024], lut_g[1024], lut_b[1024]; + int crtc_x, crtc_y; + unsigned int crtc_w, crtc_h; + uint32_t src_x, src_y; + uint32_t src_w, src_h; + void (*update_plane)(struct drm_plane *plane, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t x, uint32_t y, + uint32_t src_w, uint32_t src_h); + void (*disable_plane)(struct drm_plane *plane); + int (*update_colorkey)(struct drm_plane *plane, + struct drm_intel_sprite_colorkey *key); + void (*get_colorkey)(struct drm_plane *plane, + struct drm_intel_sprite_colorkey *key); +}; + +struct intel_watermark_params { + unsigned long fifo_size; + unsigned long max_wm; + unsigned long default_wm; + unsigned long guard_size; + unsigned long cacheline_size; +}; + +struct cxsr_latency { + int is_desktop; + int is_ddr3; + unsigned long fsb_freq; + unsigned long mem_freq; + unsigned long display_sr; + unsigned long display_hpll_disable; + unsigned long cursor_sr; + unsigned long cursor_hpll_disable; +}; + +#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) +#define to_intel_connector(x) container_of(x, struct intel_connector, base) +#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) +#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) +#define to_intel_plane(x) container_of(x, struct intel_plane, base) + +#define DIP_HEADER_SIZE 5 + +#define DIP_TYPE_AVI 0x82 +#define DIP_VERSION_AVI 0x2 +#define DIP_LEN_AVI 13 +#define DIP_AVI_PR_1 0 +#define DIP_AVI_PR_2 1 +#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2) +#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2) +#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2) + +#define DIP_TYPE_SPD 0x83 +#define DIP_VERSION_SPD 0x1 +#define DIP_LEN_SPD 25 +#define DIP_SPD_UNKNOWN 0 +#define DIP_SPD_DSTB 0x1 +#define DIP_SPD_DVDP 0x2 +#define DIP_SPD_DVHS 0x3 +#define DIP_SPD_HDDVR 0x4 +#define DIP_SPD_DVC 0x5 +#define DIP_SPD_DSC 0x6 +#define DIP_SPD_VCD 0x7 +#define DIP_SPD_GAME 0x8 +#define DIP_SPD_PC 0x9 +#define DIP_SPD_BD 0xa +#define DIP_SPD_SCD 0xb + +struct dip_infoframe { + uint8_t type; /* HB0 */ + uint8_t ver; /* HB1 */ + uint8_t len; /* HB2 - body len, not including checksum */ + uint8_t ecc; /* Header ECC */ + uint8_t checksum; /* PB0 */ + union { + struct { + /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */ + uint8_t Y_A_B_S; + /* PB2 - C 7:6, M 5:4, R 3:0 */ + uint8_t C_M_R; + /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */ + uint8_t ITC_EC_Q_SC; + /* PB4 - VIC 6:0 */ + uint8_t VIC; + /* PB5 - YQ 7:6, CN 5:4, PR 3:0 */ + uint8_t YQ_CN_PR; + /* PB6 to PB13 */ + uint16_t top_bar_end; + uint16_t bottom_bar_start; + uint16_t left_bar_end; + uint16_t right_bar_start; + } __attribute__ ((packed)) avi; + struct { + uint8_t vn[8]; + uint8_t pd[16]; + uint8_t sdi; + } __attribute__ ((packed)) spd; + uint8_t payload[27]; + } __attribute__ ((packed)) body; +} __attribute__((packed)); + +struct intel_hdmi { + u32 hdmi_reg; + int ddc_bus; + uint32_t color_range; + bool color_range_auto; + bool has_hdmi_sink; + bool has_audio; + enum hdmi_force_audio force_audio; + bool rgb_quant_range_selectable; + void (*write_infoframe)(struct drm_encoder *encoder, + struct dip_infoframe *frame); + void (*set_infoframes)(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode); +}; + +#define DP_MAX_DOWNSTREAM_PORTS 0x10 +#define DP_LINK_CONFIGURATION_SIZE 9 + +struct intel_dp { + uint32_t output_reg; + uint32_t aux_ch_ctl_reg; + uint32_t DP; + uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; + bool has_audio; + enum hdmi_force_audio force_audio; + uint32_t color_range; + bool color_range_auto; + uint8_t link_bw; + uint8_t lane_count; + uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; + uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; + struct i2c_adapter adapter; + struct i2c_algo_dp_aux_data algo; + bool is_pch_edp; + uint8_t train_set[4]; + int panel_power_up_delay; + int panel_power_down_delay; + int panel_power_cycle_delay; + int backlight_on_delay; + int backlight_off_delay; + struct delayed_work panel_vdd_work; + bool want_panel_vdd; + struct intel_connector *attached_connector; +}; + +struct intel_digital_port { + struct intel_encoder base; + enum port port; + u32 saved_port_bits; + struct intel_dp dp; + struct intel_hdmi hdmi; +}; + +static inline struct drm_crtc * +intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + return dev_priv->pipe_to_crtc_mapping[pipe]; +} + +static inline struct drm_crtc * +intel_get_crtc_for_plane(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + return dev_priv->plane_to_crtc_mapping[plane]; +} + +struct intel_unpin_work { + struct work_struct work; + struct drm_crtc *crtc; + struct drm_i915_gem_object *old_fb_obj; + struct drm_i915_gem_object *pending_flip_obj; + struct drm_pending_vblank_event *event; + atomic_t pending; +#define INTEL_FLIP_INACTIVE 0 +#define INTEL_FLIP_PENDING 1 +#define INTEL_FLIP_COMPLETE 2 + bool enable_stall_check; +}; + +struct intel_fbc_work { + struct delayed_work work; + struct drm_crtc *crtc; + struct drm_framebuffer *fb; + int interval; +}; + +int intel_pch_rawclk(struct drm_device *dev); + +int intel_connector_update_modes(struct drm_connector *connector, + struct edid *edid); +int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); + +extern void intel_attach_force_audio_property(struct drm_connector *connector); +extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); + +extern void intel_crt_init(struct drm_device *dev); +extern void intel_hdmi_init(struct drm_device *dev, + int hdmi_reg, enum port port); +extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, + struct intel_connector *intel_connector); +extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); +extern bool intel_hdmi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config); +extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); +extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, + bool is_sdvob); +extern void intel_dvo_init(struct drm_device *dev); +extern void intel_tv_init(struct drm_device *dev); +extern void intel_mark_busy(struct drm_device *dev); +extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj); +extern void intel_mark_idle(struct drm_device *dev); +extern bool intel_lvds_init(struct drm_device *dev); +extern bool intel_is_dual_link_lvds(struct drm_device *dev); +extern void intel_dp_init(struct drm_device *dev, int output_reg, + enum port port); +extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, + struct intel_connector *intel_connector); +extern void intel_dp_init_link_config(struct intel_dp *intel_dp); +extern void intel_dp_start_link_train(struct intel_dp *intel_dp); +extern void intel_dp_complete_link_train(struct intel_dp *intel_dp); +extern void intel_dp_stop_link_train(struct intel_dp *intel_dp); +extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); +extern void intel_dp_encoder_destroy(struct drm_encoder *encoder); +extern void intel_dp_check_link_status(struct intel_dp *intel_dp); +extern bool intel_dp_compute_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config); +extern bool intel_dpd_is_edp(struct drm_device *dev); +extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp); +extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp); +extern void ironlake_edp_panel_on(struct intel_dp *intel_dp); +extern void ironlake_edp_panel_off(struct intel_dp *intel_dp); +extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); +extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); +extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); +extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); +extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, + enum plane plane); + +/* intel_panel.c */ +extern int intel_panel_init(struct intel_panel *panel, + struct drm_display_mode *fixed_mode); +extern void intel_panel_fini(struct intel_panel *panel); + +extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, + struct drm_display_mode *adjusted_mode); +extern void intel_pch_panel_fitting(struct drm_device *dev, + int fitting_mode, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +extern u32 intel_panel_get_max_backlight(struct drm_device *dev); +extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); +extern int intel_panel_setup_backlight(struct drm_connector *connector); +extern void intel_panel_enable_backlight(struct drm_device *dev, + enum pipe pipe); +extern void intel_panel_disable_backlight(struct drm_device *dev); +extern void intel_panel_destroy_backlight(struct drm_device *dev); +extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); + +struct intel_set_config { + struct drm_encoder **save_connector_encoders; + struct drm_crtc **save_encoder_crtcs; + + bool fb_changed; + bool mode_changed; +}; + +extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, + int x, int y, struct drm_framebuffer *old_fb); +extern void intel_modeset_disable(struct drm_device *dev); +extern void intel_crtc_restore_mode(struct drm_crtc *crtc); +extern void intel_crtc_load_lut(struct drm_crtc *crtc); +extern void intel_crtc_update_dpms(struct drm_crtc *crtc); +extern void intel_encoder_destroy(struct drm_encoder *encoder); +extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode); +extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder); +extern void intel_connector_dpms(struct drm_connector *, int mode); +extern bool intel_connector_get_hw_state(struct intel_connector *connector); +extern void intel_modeset_check_state(struct drm_device *dev); +extern void intel_plane_restore(struct drm_plane *plane); + + +static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) +{ + return to_intel_connector(connector)->encoder; +} + +static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) +{ + struct intel_digital_port *intel_dig_port = + container_of(encoder, struct intel_digital_port, base.base); + return &intel_dig_port->dp; +} + +static inline struct intel_digital_port * +enc_to_dig_port(struct drm_encoder *encoder) +{ + return container_of(encoder, struct intel_digital_port, base.base); +} + +static inline struct intel_digital_port * +dp_to_dig_port(struct intel_dp *intel_dp) +{ + return container_of(intel_dp, struct intel_digital_port, dp); +} + +static inline struct intel_digital_port * +hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) +{ + return container_of(intel_hdmi, struct intel_digital_port, hdmi); +} + +bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, + struct intel_digital_port *port); + +extern void intel_connector_attach_encoder(struct intel_connector *connector, + struct intel_encoder *encoder); +extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); + +extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, + struct drm_crtc *crtc); +int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern enum transcoder +intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, + enum pipe pipe); +extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); +extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); +extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); + +struct intel_load_detect_pipe { + struct drm_framebuffer *release_fb; + bool load_detect_temp; + int dpms_mode; +}; +extern bool intel_get_load_detect_pipe(struct drm_connector *connector, + struct drm_display_mode *mode, + struct intel_load_detect_pipe *old); +extern void intel_release_load_detect_pipe(struct drm_connector *connector, + struct intel_load_detect_pipe *old); + +extern void intelfb_restore(void); +extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, + u16 blue, int regno); +extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, int regno); +extern void intel_enable_clock_gating(struct drm_device *dev); + +extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, + struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined); +extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj); + +extern int intel_framebuffer_init(struct drm_device *dev, + struct intel_framebuffer *ifb, + struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_i915_gem_object *obj); +extern int intel_fbdev_init(struct drm_device *dev); +extern void intel_fbdev_initial_config(struct drm_device *dev); +extern void intel_fbdev_fini(struct drm_device *dev); +extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); +extern void intel_prepare_page_flip(struct drm_device *dev, int plane); +extern void intel_finish_page_flip(struct drm_device *dev, int pipe); +extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); + +extern void intel_setup_overlay(struct drm_device *dev); +extern void intel_cleanup_overlay(struct drm_device *dev); +extern int intel_overlay_switch_off(struct intel_overlay *overlay); +extern int intel_overlay_put_image(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int intel_overlay_attrs(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +extern void intel_fb_output_poll_changed(struct drm_device *dev); +extern void intel_fb_restore_mode(struct drm_device *dev); + +extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, + bool state); +#define assert_pipe_enabled(d, p) assert_pipe(d, p, true) +#define assert_pipe_disabled(d, p) assert_pipe(d, p, false) + +extern void intel_init_clock_gating(struct drm_device *dev); +extern void intel_write_eld(struct drm_encoder *encoder, + struct drm_display_mode *mode); +extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe); +extern void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, + struct intel_link_m_n *m_n); +extern void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, + struct intel_link_m_n *m_n); +extern void intel_prepare_ddi(struct drm_device *dev); +extern void hsw_fdi_link_train(struct drm_crtc *crtc); +extern void intel_ddi_init(struct drm_device *dev, enum port port); + +/* For use by IVB LP watermark workaround in intel_sprite.c */ +extern void intel_update_watermarks(struct drm_device *dev); +extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, + uint32_t sprite_width, + int pixel_size); +extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe, + struct drm_display_mode *mode); + +extern unsigned long intel_gen4_compute_page_offset(int *x, int *y, + unsigned int tiling_mode, + unsigned int bpp, + unsigned int pitch); + +extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg); + +/* Power-related functions, located in intel_pm.c */ +extern void intel_init_pm(struct drm_device *dev); +/* FBC */ +extern bool intel_fbc_enabled(struct drm_device *dev); +extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); +extern void intel_update_fbc(struct drm_device *dev); +/* IPS */ +extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); +extern void intel_gpu_ips_teardown(void); + +extern bool intel_using_power_well(struct drm_device *dev); +extern void intel_init_power_well(struct drm_device *dev); +extern void intel_set_power_well(struct drm_device *dev, bool enable); +extern void intel_enable_gt_powersave(struct drm_device *dev); +extern void intel_disable_gt_powersave(struct drm_device *dev); +extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); +extern void ironlake_teardown_rc6(struct drm_device *dev); + +extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe); +extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv); +extern void intel_ddi_pll_init(struct drm_device *dev); +extern void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc); +extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder); +extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); +extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); +extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev); +extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock); +extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc); +extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); +extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); +extern bool +intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); +extern void intel_ddi_fdi_disable(struct drm_crtc *crtc); + +extern void intel_display_handle_reset(struct drm_device *dev); + +#endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c new file mode 100644 index 0000000..cc70b16 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -0,0 +1,533 @@ +/* + * Copyright 2006 Dave Airlie + * Copyright © 2006-2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + */ +#include +#include +#include +#include +#include "intel_drv.h" +#include +#include "i915_drv.h" +#include "dvo.h" + +#define SIL164_ADDR 0x38 +#define CH7xxx_ADDR 0x76 +#define TFP410_ADDR 0x38 +#define NS2501_ADDR 0x38 + +static const struct intel_dvo_device intel_dvo_devices[] = { + { + .type = INTEL_DVO_CHIP_TMDS, + .name = "sil164", + .dvo_reg = DVOC, + .slave_addr = SIL164_ADDR, + .dev_ops = &sil164_ops, + }, + { + .type = INTEL_DVO_CHIP_TMDS, + .name = "ch7xxx", + .dvo_reg = DVOC, + .slave_addr = CH7xxx_ADDR, + .dev_ops = &ch7xxx_ops, + }, + { + .type = INTEL_DVO_CHIP_LVDS, + .name = "ivch", + .dvo_reg = DVOA, + .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */ + .dev_ops = &ivch_ops, + }, + { + .type = INTEL_DVO_CHIP_TMDS, + .name = "tfp410", + .dvo_reg = DVOC, + .slave_addr = TFP410_ADDR, + .dev_ops = &tfp410_ops, + }, + { + .type = INTEL_DVO_CHIP_LVDS, + .name = "ch7017", + .dvo_reg = DVOC, + .slave_addr = 0x75, + .gpio = GMBUS_PORT_DPB, + .dev_ops = &ch7017_ops, + }, + { + .type = INTEL_DVO_CHIP_TMDS, + .name = "ns2501", + .dvo_reg = DVOC, + .slave_addr = NS2501_ADDR, + .dev_ops = &ns2501_ops, + } +}; + +struct intel_dvo { + struct intel_encoder base; + + struct intel_dvo_device dev; + + struct drm_display_mode *panel_fixed_mode; + bool panel_wants_dither; +}; + +static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder) +{ + return container_of(encoder, struct intel_dvo, base.base); +} + +static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_dvo, base); +} + +static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) +{ + struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base); + + return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev); +} + +static bool intel_dvo_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); + u32 tmp; + + tmp = I915_READ(intel_dvo->dev.dvo_reg); + + if (!(tmp & DVO_ENABLE)) + return false; + + *pipe = PORT_TO_PIPE(tmp); + + return true; +} + +static void intel_disable_dvo(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); + u32 dvo_reg = intel_dvo->dev.dvo_reg; + u32 temp = I915_READ(dvo_reg); + + intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); + I915_WRITE(dvo_reg, temp & ~DVO_ENABLE); + I915_READ(dvo_reg); +} + +static void intel_enable_dvo(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); + u32 dvo_reg = intel_dvo->dev.dvo_reg; + u32 temp = I915_READ(dvo_reg); + + I915_WRITE(dvo_reg, temp | DVO_ENABLE); + I915_READ(dvo_reg); + intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); +} + +static void intel_dvo_dpms(struct drm_connector *connector, int mode) +{ + struct intel_dvo *intel_dvo = intel_attached_dvo(connector); + struct drm_crtc *crtc; + + /* dvo supports only 2 dpms states. */ + if (mode != DRM_MODE_DPMS_ON) + mode = DRM_MODE_DPMS_OFF; + + if (mode == connector->dpms) + return; + + connector->dpms = mode; + + /* Only need to change hw state when actually enabled */ + crtc = intel_dvo->base.base.crtc; + if (!crtc) { + intel_dvo->base.connectors_active = false; + return; + } + + if (mode == DRM_MODE_DPMS_ON) { + intel_dvo->base.connectors_active = true; + + intel_crtc_update_dpms(crtc); + + intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); + } else { + intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); + + intel_dvo->base.connectors_active = false; + + intel_crtc_update_dpms(crtc); + } + + intel_modeset_check_state(connector->dev); +} + +static int intel_dvo_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct intel_dvo *intel_dvo = intel_attached_dvo(connector); + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + + /* XXX: Validate clock range */ + + if (intel_dvo->panel_fixed_mode) { + if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay) + return MODE_PANEL; + if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay) + return MODE_PANEL; + } + + return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode); +} + +static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + + /* If we have timings from the BIOS for the panel, put them in + * to the adjusted mode. The CRTC will be set up for this mode, + * with the panel scaling set up to source from the H/VDisplay + * of the original mode. + */ + if (intel_dvo->panel_fixed_mode != NULL) { +#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x + C(hdisplay); + C(hsync_start); + C(hsync_end); + C(htotal); + C(vdisplay); + C(vsync_start); + C(vsync_end); + C(vtotal); + C(clock); +#undef C + } + + if (intel_dvo->dev.dev_ops->mode_fixup) + return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode); + + return true; +} + +static void intel_dvo_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + int pipe = intel_crtc->pipe; + u32 dvo_val; + u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; + int dpll_reg = DPLL(pipe); + + switch (dvo_reg) { + case DVOA: + default: + dvo_srcdim_reg = DVOA_SRCDIM; + break; + case DVOB: + dvo_srcdim_reg = DVOB_SRCDIM; + break; + case DVOC: + dvo_srcdim_reg = DVOC_SRCDIM; + break; + } + + intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode); + + /* Save the data order, since I don't know what it should be set to. */ + dvo_val = I915_READ(dvo_reg) & + (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG); + dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE | + DVO_BLANK_ACTIVE_HIGH; + + if (pipe == 1) + dvo_val |= DVO_PIPE_B_SELECT; + dvo_val |= DVO_PIPE_STALL; + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + dvo_val |= DVO_HSYNC_ACTIVE_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + dvo_val |= DVO_VSYNC_ACTIVE_HIGH; + + I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED); + + /*I915_WRITE(DVOB_SRCDIM, + (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | + (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/ + I915_WRITE(dvo_srcdim_reg, + (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | + (adjusted_mode->vdisplay << DVO_SRCDIM_VERTICAL_SHIFT)); + /*I915_WRITE(DVOB, dvo_val);*/ + I915_WRITE(dvo_reg, dvo_val); +} + +/** + * Detect the output connection on our DVO device. + * + * Unimplemented. + */ +static enum drm_connector_status +intel_dvo_detect(struct drm_connector *connector, bool force) +{ + struct intel_dvo *intel_dvo = intel_attached_dvo(connector); + return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); +} + +static int intel_dvo_get_modes(struct drm_connector *connector) +{ + struct intel_dvo *intel_dvo = intel_attached_dvo(connector); + struct drm_i915_private *dev_priv = connector->dev->dev_private; + + /* We should probably have an i2c driver get_modes function for those + * devices which will have a fixed set of modes determined by the chip + * (TV-out, for example), but for now with just TMDS and LVDS, + * that's not the case. + */ + intel_ddc_get_modes(connector, + intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC)); + if (!list_empty(&connector->probed_modes)) + return 1; + + if (intel_dvo->panel_fixed_mode != NULL) { + struct drm_display_mode *mode; + mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode); + if (mode) { + drm_mode_probed_add(connector, mode); + return 1; + } + } + + return 0; +} + +static void intel_dvo_destroy(struct drm_connector *connector) +{ + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + +static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { + .mode_fixup = intel_dvo_mode_fixup, + .mode_set = intel_dvo_mode_set, +}; + +static const struct drm_connector_funcs intel_dvo_connector_funcs = { + .dpms = intel_dvo_dpms, + .detect = intel_dvo_detect, + .destroy = intel_dvo_destroy, + .fill_modes = drm_helper_probe_single_connector_modes, +}; + +static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { + .mode_valid = intel_dvo_mode_valid, + .get_modes = intel_dvo_get_modes, + .best_encoder = intel_best_encoder, +}; + +static void intel_dvo_enc_destroy(struct drm_encoder *encoder) +{ + struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + + if (intel_dvo->dev.dev_ops->destroy) + intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev); + + kfree(intel_dvo->panel_fixed_mode); + + intel_encoder_destroy(encoder); +} + +static const struct drm_encoder_funcs intel_dvo_enc_funcs = { + .destroy = intel_dvo_enc_destroy, +}; + +/** + * Attempts to get a fixed panel timing for LVDS (currently only the i830). + * + * Other chips with DVO LVDS will need to extend this to deal with the LVDS + * chip being on DVOB/C and having multiple pipes. + */ +static struct drm_display_mode * +intel_dvo_get_current_mode(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dvo *intel_dvo = intel_attached_dvo(connector); + uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg); + struct drm_display_mode *mode = NULL; + + /* If the DVO port is active, that'll be the LVDS, so we can pull out + * its timings to get how the BIOS set up the panel. + */ + if (dvo_val & DVO_ENABLE) { + struct drm_crtc *crtc; + int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0; + + crtc = intel_get_crtc_for_pipe(dev, pipe); + if (crtc) { + mode = intel_crtc_mode_get(dev, crtc); + if (mode) { + mode->type |= DRM_MODE_TYPE_PREFERRED; + if (dvo_val & DVO_HSYNC_ACTIVE_HIGH) + mode->flags |= DRM_MODE_FLAG_PHSYNC; + if (dvo_val & DVO_VSYNC_ACTIVE_HIGH) + mode->flags |= DRM_MODE_FLAG_PVSYNC; + } + } + } + + return mode; +} + +void intel_dvo_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_encoder *intel_encoder; + struct intel_dvo *intel_dvo; + struct intel_connector *intel_connector; + int i; + int encoder_type = DRM_MODE_ENCODER_NONE; + + intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL); + if (!intel_dvo) + return; + + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(intel_dvo); + return; + } + + intel_encoder = &intel_dvo->base; + drm_encoder_init(dev, &intel_encoder->base, + &intel_dvo_enc_funcs, encoder_type); + + intel_encoder->disable = intel_disable_dvo; + intel_encoder->enable = intel_enable_dvo; + intel_encoder->get_hw_state = intel_dvo_get_hw_state; + intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; + + /* Now, try to find a controller */ + for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { + struct drm_connector *connector = &intel_connector->base; + const struct intel_dvo_device *dvo = &intel_dvo_devices[i]; + struct i2c_adapter *i2c; + int gpio; + bool dvoinit; + + /* Allow the I2C driver info to specify the GPIO to be used in + * special cases, but otherwise default to what's defined + * in the spec. + */ + if (intel_gmbus_is_port_valid(dvo->gpio)) + gpio = dvo->gpio; + else if (dvo->type == INTEL_DVO_CHIP_LVDS) + gpio = GMBUS_PORT_SSC; + else + gpio = GMBUS_PORT_DPB; + + /* Set up the I2C bus necessary for the chip we're probing. + * It appears that everything is on GPIOE except for panels + * on i830 laptops, which are on GPIOB (DVOA). + */ + i2c = intel_gmbus_get_adapter(dev_priv, gpio); + + intel_dvo->dev = *dvo; + + /* GMBUS NAK handling seems to be unstable, hence let the + * transmitter detection run in bit banging mode for now. + */ + intel_gmbus_force_bit(i2c, true); + + dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c); + + intel_gmbus_force_bit(i2c, false); + + if (!dvoinit) + continue; + + intel_encoder->type = INTEL_OUTPUT_DVO; + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + switch (dvo->type) { + case INTEL_DVO_CHIP_TMDS: + intel_encoder->cloneable = true; + drm_connector_init(dev, connector, + &intel_dvo_connector_funcs, + DRM_MODE_CONNECTOR_DVII); + encoder_type = DRM_MODE_ENCODER_TMDS; + break; + case INTEL_DVO_CHIP_LVDS: + intel_encoder->cloneable = false; + drm_connector_init(dev, connector, + &intel_dvo_connector_funcs, + DRM_MODE_CONNECTOR_LVDS); + encoder_type = DRM_MODE_ENCODER_LVDS; + break; + } + + drm_connector_helper_add(connector, + &intel_dvo_connector_helper_funcs); + connector->display_info.subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + + drm_encoder_helper_add(&intel_encoder->base, + &intel_dvo_helper_funcs); + + intel_connector_attach_encoder(intel_connector, intel_encoder); + if (dvo->type == INTEL_DVO_CHIP_LVDS) { + /* For our LVDS chipsets, we should hopefully be able + * to dig the fixed panel mode out of the BIOS data. + * However, it's in a different format from the BIOS + * data on chipsets with integrated LVDS (stored in AIM + * headers, likely), so for now, just get the current + * mode being output through DVO. + */ + intel_dvo->panel_fixed_mode = + intel_dvo_get_current_mode(connector); + intel_dvo->panel_wants_dither = true; + } + + drm_sysfs_connector_add(connector); + return; + } + + drm_encoder_cleanup(&intel_encoder->base); + kfree(intel_dvo); + kfree(intel_connector); +} diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c new file mode 100644 index 0000000..93912e8 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -0,0 +1,313 @@ +/* + * Copyright © 2007 David Airlie + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * David Airlie + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include "intel_drv.h" +#include +#include "i915_drv.h" + +static struct fb_ops intelfb_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, + .fb_setcmap = drm_fb_helper_setcmap, + .fb_debug_enter = drm_fb_helper_debug_enter, + .fb_debug_leave = drm_fb_helper_debug_leave, +}; + +static int intelfb_create(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper; + struct drm_device *dev = ifbdev->helper.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct fb_info *info; + struct drm_framebuffer *fb; + struct drm_mode_fb_cmd2 mode_cmd = {}; + struct drm_i915_gem_object *obj; + struct device *device = &dev->pdev->dev; + int size, ret; + + /* we don't do packed 24bpp */ + if (sizes->surface_bpp == 24) + sizes->surface_bpp = 32; + + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + + mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) / + 8), 64); + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + + size = mode_cmd.pitches[0] * mode_cmd.height; + size = ALIGN(size, PAGE_SIZE); + obj = i915_gem_object_create_stolen(dev, size); + if (obj == NULL) + obj = i915_gem_alloc_object(dev, size); + if (!obj) { + DRM_ERROR("failed to allocate framebuffer\n"); + ret = -ENOMEM; + goto out; + } + + mutex_lock(&dev->struct_mutex); + + /* Flush everything out, we'll be doing GTT only from now on */ + ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); + if (ret) { + DRM_ERROR("failed to pin fb: %d\n", ret); + goto out_unref; + } + + info = framebuffer_alloc(0, device); + if (!info) { + ret = -ENOMEM; + goto out_unpin; + } + + info->par = ifbdev; + + ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); + if (ret) + goto out_unpin; + + fb = &ifbdev->ifb.base; + + ifbdev->helper.fb = fb; + ifbdev->helper.fbdev = info; + + strcpy(info->fix.id, "inteldrmfb"); + + info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; + info->fbops = &intelfb_ops; + + ret = fb_alloc_cmap(&info->cmap, 256, 0); + if (ret) { + ret = -ENOMEM; + goto out_unpin; + } + /* setup aperture base/size for vesafb takeover */ + info->apertures = alloc_apertures(1); + if (!info->apertures) { + ret = -ENOMEM; + goto out_unpin; + } + info->apertures->ranges[0].base = dev->mode_config.fb_base; + info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; + + info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; + info->fix.smem_len = size; + + info->screen_base = + ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, + size); + if (!info->screen_base) { + ret = -ENOSPC; + goto out_unpin; + } + info->screen_size = size; + + /* This driver doesn't need a VT switch to restore the mode on resume */ + fb_enable_skip_vt_switch(info); + + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); + + /* If the object is shmemfs backed, it will have given us zeroed pages. + * If the object is stolen however, it will be full of whatever + * garbage was left in there. + */ + if (ifbdev->ifb.obj->stolen) + memset_io(info->screen_base, 0, info->screen_size); + + /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ + + DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", + fb->width, fb->height, + obj->gtt_offset, obj); + + + mutex_unlock(&dev->struct_mutex); + vga_switcheroo_client_fb_set(dev->pdev, info); + return 0; + +out_unpin: + i915_gem_object_unpin(obj); +out_unref: + drm_gem_object_unreference(&obj->base); + mutex_unlock(&dev->struct_mutex); +out: + return ret; +} + +static struct drm_fb_helper_funcs intel_fb_helper_funcs = { + .gamma_set = intel_crtc_fb_gamma_set, + .gamma_get = intel_crtc_fb_gamma_get, + .fb_probe = intelfb_create, +}; + +static void intel_fbdev_destroy(struct drm_device *dev, + struct intel_fbdev *ifbdev) +{ + struct fb_info *info; + struct intel_framebuffer *ifb = &ifbdev->ifb; + + if (ifbdev->helper.fbdev) { + info = ifbdev->helper.fbdev; + unregister_framebuffer(info); + iounmap(info->screen_base); + if (info->cmap.len) + fb_dealloc_cmap(&info->cmap); + framebuffer_release(info); + } + + drm_fb_helper_fini(&ifbdev->helper); + + drm_framebuffer_unregister_private(&ifb->base); + drm_framebuffer_cleanup(&ifb->base); + if (ifb->obj) { + drm_gem_object_unreference_unlocked(&ifb->obj->base); + ifb->obj = NULL; + } +} + +int intel_fbdev_init(struct drm_device *dev) +{ + struct intel_fbdev *ifbdev; + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); + if (!ifbdev) + return -ENOMEM; + + dev_priv->fbdev = ifbdev; + ifbdev->helper.funcs = &intel_fb_helper_funcs; + + ret = drm_fb_helper_init(dev, &ifbdev->helper, + INTEL_INFO(dev)->num_pipes, + INTELFB_CONN_LIMIT); + if (ret) { + kfree(ifbdev); + return ret; + } + + drm_fb_helper_single_add_all_connectors(&ifbdev->helper); + + return 0; +} + +void intel_fbdev_initial_config(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + /* Due to peculiar init order wrt to hpd handling this is separate. */ + drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32); +} + +void intel_fbdev_fini(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + if (!dev_priv->fbdev) + return; + + intel_fbdev_destroy(dev, dev_priv->fbdev); + kfree(dev_priv->fbdev); + dev_priv->fbdev = NULL; +} + +void intel_fbdev_set_suspend(struct drm_device *dev, int state) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_fbdev *ifbdev = dev_priv->fbdev; + struct fb_info *info; + + if (!ifbdev) + return; + + info = ifbdev->helper.fbdev; + + /* On resume from hibernation: If the object is shmemfs backed, it has + * been restored from swap. If the object is stolen however, it will be + * full of whatever garbage was left in there. + */ + if (!state && ifbdev->ifb.obj->stolen) + memset_io(info->screen_base, 0, info->screen_size); + + fb_set_suspend(info, state); +} + +MODULE_LICENSE("GPL and additional rights"); + +void intel_fb_output_poll_changed(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); +} + +void intel_fb_restore_mode(struct drm_device *dev) +{ + int ret; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_mode_config *config = &dev->mode_config; + struct drm_plane *plane; + + if (INTEL_INFO(dev)->num_pipes == 0) + return; + + drm_modeset_lock_all(dev); + + ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); + if (ret) + DRM_DEBUG("failed to restore crtc mode\n"); + + /* Be sure to shut off any planes that may be active */ + list_for_each_entry(plane, &config->plane_list, head) + if (plane->enabled) + plane->funcs->disable_plane(plane); + + drm_modeset_unlock_all(dev); +} diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c new file mode 100644 index 0000000..a905793 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -0,0 +1,1107 @@ +/* + * Copyright 2006 Dave Airlie + * Copyright © 2006-2009 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * Jesse Barnes + */ + +#include +#include +#include +#include +#include +#include +#include "intel_drv.h" +#include +#include "i915_drv.h" + +static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi) +{ + return hdmi_to_dig_port(intel_hdmi)->base.base.dev; +} + +static void +assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) +{ + struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t enabled_bits; + + enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; + + WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits, + "HDMI port enabled, expecting disabled\n"); +} + +struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) +{ + struct intel_digital_port *intel_dig_port = + container_of(encoder, struct intel_digital_port, base.base); + return &intel_dig_port->hdmi; +} + +static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) +{ + return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base); +} + +void intel_dip_infoframe_csum(struct dip_infoframe *frame) +{ + uint8_t *data = (uint8_t *)frame; + uint8_t sum = 0; + unsigned i; + + frame->checksum = 0; + frame->ecc = 0; + + for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++) + sum += data[i]; + + frame->checksum = 0x100 - sum; +} + +static u32 g4x_infoframe_index(struct dip_infoframe *frame) +{ + switch (frame->type) { + case DIP_TYPE_AVI: + return VIDEO_DIP_SELECT_AVI; + case DIP_TYPE_SPD: + return VIDEO_DIP_SELECT_SPD; + default: + DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + return 0; + } +} + +static u32 g4x_infoframe_enable(struct dip_infoframe *frame) +{ + switch (frame->type) { + case DIP_TYPE_AVI: + return VIDEO_DIP_ENABLE_AVI; + case DIP_TYPE_SPD: + return VIDEO_DIP_ENABLE_SPD; + default: + DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + return 0; + } +} + +static u32 hsw_infoframe_enable(struct dip_infoframe *frame) +{ + switch (frame->type) { + case DIP_TYPE_AVI: + return VIDEO_DIP_ENABLE_AVI_HSW; + case DIP_TYPE_SPD: + return VIDEO_DIP_ENABLE_SPD_HSW; + default: + DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + return 0; + } +} + +static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, + enum transcoder cpu_transcoder) +{ + switch (frame->type) { + case DIP_TYPE_AVI: + return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder); + case DIP_TYPE_SPD: + return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder); + default: + DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + return 0; + } +} + +static void g4x_write_infoframe(struct drm_encoder *encoder, + struct dip_infoframe *frame) +{ + uint32_t *data = (uint32_t *)frame; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val = I915_READ(VIDEO_DIP_CTL); + unsigned i, len = DIP_HEADER_SIZE + frame->len; + + WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); + + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ + val |= g4x_infoframe_index(frame); + + val &= ~g4x_infoframe_enable(frame); + + I915_WRITE(VIDEO_DIP_CTL, val); + + mmiowb(); + for (i = 0; i < len; i += 4) { + I915_WRITE(VIDEO_DIP_DATA, *data); + data++; + } + /* Write every possible data byte to force correct ECC calculation. */ + for (; i < VIDEO_DIP_DATA_SIZE; i += 4) + I915_WRITE(VIDEO_DIP_DATA, 0); + mmiowb(); + + val |= g4x_infoframe_enable(frame); + val &= ~VIDEO_DIP_FREQ_MASK; + val |= VIDEO_DIP_FREQ_VSYNC; + + I915_WRITE(VIDEO_DIP_CTL, val); + POSTING_READ(VIDEO_DIP_CTL); +} + +static void ibx_write_infoframe(struct drm_encoder *encoder, + struct dip_infoframe *frame) +{ + uint32_t *data = (uint32_t *)frame; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + unsigned i, len = DIP_HEADER_SIZE + frame->len; + u32 val = I915_READ(reg); + + WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); + + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ + val |= g4x_infoframe_index(frame); + + val &= ~g4x_infoframe_enable(frame); + + I915_WRITE(reg, val); + + mmiowb(); + for (i = 0; i < len; i += 4) { + I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); + data++; + } + /* Write every possible data byte to force correct ECC calculation. */ + for (; i < VIDEO_DIP_DATA_SIZE; i += 4) + I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); + mmiowb(); + + val |= g4x_infoframe_enable(frame); + val &= ~VIDEO_DIP_FREQ_MASK; + val |= VIDEO_DIP_FREQ_VSYNC; + + I915_WRITE(reg, val); + POSTING_READ(reg); +} + +static void cpt_write_infoframe(struct drm_encoder *encoder, + struct dip_infoframe *frame) +{ + uint32_t *data = (uint32_t *)frame; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + unsigned i, len = DIP_HEADER_SIZE + frame->len; + u32 val = I915_READ(reg); + + WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); + + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ + val |= g4x_infoframe_index(frame); + + /* The DIP control register spec says that we need to update the AVI + * infoframe without clearing its enable bit */ + if (frame->type != DIP_TYPE_AVI) + val &= ~g4x_infoframe_enable(frame); + + I915_WRITE(reg, val); + + mmiowb(); + for (i = 0; i < len; i += 4) { + I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); + data++; + } + /* Write every possible data byte to force correct ECC calculation. */ + for (; i < VIDEO_DIP_DATA_SIZE; i += 4) + I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); + mmiowb(); + + val |= g4x_infoframe_enable(frame); + val &= ~VIDEO_DIP_FREQ_MASK; + val |= VIDEO_DIP_FREQ_VSYNC; + + I915_WRITE(reg, val); + POSTING_READ(reg); +} + +static void vlv_write_infoframe(struct drm_encoder *encoder, + struct dip_infoframe *frame) +{ + uint32_t *data = (uint32_t *)frame; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); + unsigned i, len = DIP_HEADER_SIZE + frame->len; + u32 val = I915_READ(reg); + + WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); + + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ + val |= g4x_infoframe_index(frame); + + val &= ~g4x_infoframe_enable(frame); + + I915_WRITE(reg, val); + + mmiowb(); + for (i = 0; i < len; i += 4) { + I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data); + data++; + } + /* Write every possible data byte to force correct ECC calculation. */ + for (; i < VIDEO_DIP_DATA_SIZE; i += 4) + I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0); + mmiowb(); + + val |= g4x_infoframe_enable(frame); + val &= ~VIDEO_DIP_FREQ_MASK; + val |= VIDEO_DIP_FREQ_VSYNC; + + I915_WRITE(reg, val); + POSTING_READ(reg); +} + +static void hsw_write_infoframe(struct drm_encoder *encoder, + struct dip_infoframe *frame) +{ + uint32_t *data = (uint32_t *)frame; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder); + u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->config.cpu_transcoder); + unsigned int i, len = DIP_HEADER_SIZE + frame->len; + u32 val = I915_READ(ctl_reg); + + if (data_reg == 0) + return; + + val &= ~hsw_infoframe_enable(frame); + I915_WRITE(ctl_reg, val); + + mmiowb(); + for (i = 0; i < len; i += 4) { + I915_WRITE(data_reg + i, *data); + data++; + } + /* Write every possible data byte to force correct ECC calculation. */ + for (; i < VIDEO_DIP_DATA_SIZE; i += 4) + I915_WRITE(data_reg + i, 0); + mmiowb(); + + val |= hsw_infoframe_enable(frame); + I915_WRITE(ctl_reg, val); + POSTING_READ(ctl_reg); +} + +static void intel_set_infoframe(struct drm_encoder *encoder, + struct dip_infoframe *frame) +{ + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + + intel_dip_infoframe_csum(frame); + intel_hdmi->write_infoframe(encoder, frame); +} + +static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode) +{ + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct dip_infoframe avi_if = { + .type = DIP_TYPE_AVI, + .ver = DIP_VERSION_AVI, + .len = DIP_LEN_AVI, + }; + + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) + avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; + + if (intel_hdmi->rgb_quant_range_selectable) { + if (intel_crtc->config.limited_color_range) + avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; + else + avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; + } + + avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode); + + intel_set_infoframe(encoder, &avi_if); +} + +static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) +{ + struct dip_infoframe spd_if; + + memset(&spd_if, 0, sizeof(spd_if)); + spd_if.type = DIP_TYPE_SPD; + spd_if.ver = DIP_VERSION_SPD; + spd_if.len = DIP_LEN_SPD; + strcpy(spd_if.body.spd.vn, "Intel"); + strcpy(spd_if.body.spd.pd, "Integrated gfx"); + spd_if.body.spd.sdi = DIP_SPD_PC; + + intel_set_infoframe(encoder, &spd_if); +} + +static void g4x_set_infoframes(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode) +{ + struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; + u32 reg = VIDEO_DIP_CTL; + u32 val = I915_READ(reg); + u32 port; + + assert_hdmi_port_disabled(intel_hdmi); + + /* If the registers were not initialized yet, they might be zeroes, + * which means we're selecting the AVI DIP and we're setting its + * frequency to once. This seems to really confuse the HW and make + * things stop working (the register spec says the AVI always needs to + * be sent every VSync). So here we avoid writing to the register more + * than we need and also explicitly select the AVI DIP and explicitly + * set its frequency to every VSync. Avoiding to write it twice seems to + * be enough to solve the problem, but being defensive shouldn't hurt us + * either. */ + val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; + + if (!intel_hdmi->has_hdmi_sink) { + if (!(val & VIDEO_DIP_ENABLE)) + return; + val &= ~VIDEO_DIP_ENABLE; + I915_WRITE(reg, val); + POSTING_READ(reg); + return; + } + + switch (intel_dig_port->port) { + case PORT_B: + port = VIDEO_DIP_PORT_B; + break; + case PORT_C: + port = VIDEO_DIP_PORT_C; + break; + default: + BUG(); + return; + } + + if (port != (val & VIDEO_DIP_PORT_MASK)) { + if (val & VIDEO_DIP_ENABLE) { + val &= ~VIDEO_DIP_ENABLE; + I915_WRITE(reg, val); + POSTING_READ(reg); + } + val &= ~VIDEO_DIP_PORT_MASK; + val |= port; + } + + val |= VIDEO_DIP_ENABLE; + val &= ~VIDEO_DIP_ENABLE_VENDOR; + + I915_WRITE(reg, val); + POSTING_READ(reg); + + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); + intel_hdmi_set_spd_infoframe(encoder); +} + +static void ibx_set_infoframes(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode) +{ + struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; + u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + u32 val = I915_READ(reg); + u32 port; + + assert_hdmi_port_disabled(intel_hdmi); + + /* See the big comment in g4x_set_infoframes() */ + val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; + + if (!intel_hdmi->has_hdmi_sink) { + if (!(val & VIDEO_DIP_ENABLE)) + return; + val &= ~VIDEO_DIP_ENABLE; + I915_WRITE(reg, val); + POSTING_READ(reg); + return; + } + + switch (intel_dig_port->port) { + case PORT_B: + port = VIDEO_DIP_PORT_B; + break; + case PORT_C: + port = VIDEO_DIP_PORT_C; + break; + case PORT_D: + port = VIDEO_DIP_PORT_D; + break; + default: + BUG(); + return; + } + + if (port != (val & VIDEO_DIP_PORT_MASK)) { + if (val & VIDEO_DIP_ENABLE) { + val &= ~VIDEO_DIP_ENABLE; + I915_WRITE(reg, val); + POSTING_READ(reg); + } + val &= ~VIDEO_DIP_PORT_MASK; + val |= port; + } + + val |= VIDEO_DIP_ENABLE; + val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | + VIDEO_DIP_ENABLE_GCP); + + I915_WRITE(reg, val); + POSTING_READ(reg); + + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); + intel_hdmi_set_spd_infoframe(encoder); +} + +static void cpt_set_infoframes(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode) +{ + struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + u32 val = I915_READ(reg); + + assert_hdmi_port_disabled(intel_hdmi); + + /* See the big comment in g4x_set_infoframes() */ + val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; + + if (!intel_hdmi->has_hdmi_sink) { + if (!(val & VIDEO_DIP_ENABLE)) + return; + val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI); + I915_WRITE(reg, val); + POSTING_READ(reg); + return; + } + + /* Set both together, unset both together: see the spec. */ + val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI; + val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | + VIDEO_DIP_ENABLE_GCP); + + I915_WRITE(reg, val); + POSTING_READ(reg); + + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); + intel_hdmi_set_spd_infoframe(encoder); +} + +static void vlv_set_infoframes(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode) +{ + struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); + u32 val = I915_READ(reg); + + assert_hdmi_port_disabled(intel_hdmi); + + /* See the big comment in g4x_set_infoframes() */ + val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; + + if (!intel_hdmi->has_hdmi_sink) { + if (!(val & VIDEO_DIP_ENABLE)) + return; + val &= ~VIDEO_DIP_ENABLE; + I915_WRITE(reg, val); + POSTING_READ(reg); + return; + } + + val |= VIDEO_DIP_ENABLE; + val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | + VIDEO_DIP_ENABLE_GCP); + + I915_WRITE(reg, val); + POSTING_READ(reg); + + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); + intel_hdmi_set_spd_infoframe(encoder); +} + +static void hsw_set_infoframes(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode) +{ + struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder); + u32 val = I915_READ(reg); + + assert_hdmi_port_disabled(intel_hdmi); + + if (!intel_hdmi->has_hdmi_sink) { + I915_WRITE(reg, 0); + POSTING_READ(reg); + return; + } + + val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW | + VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW); + + I915_WRITE(reg, val); + POSTING_READ(reg); + + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); + intel_hdmi_set_spd_infoframe(encoder); +} + +static void intel_hdmi_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + u32 hdmi_val; + + hdmi_val = SDVO_ENCODING_HDMI; + if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev)) + hdmi_val |= intel_hdmi->color_range; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH; + + if (intel_crtc->config.pipe_bpp > 24) + hdmi_val |= HDMI_COLOR_FORMAT_12bpc; + else + hdmi_val |= SDVO_COLOR_FORMAT_8bpc; + + /* Required on CPT */ + if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) + hdmi_val |= HDMI_MODE_SELECT_HDMI; + + if (intel_hdmi->has_audio) { + DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", + pipe_name(intel_crtc->pipe)); + hdmi_val |= SDVO_AUDIO_ENABLE; + hdmi_val |= HDMI_MODE_SELECT_HDMI; + intel_write_eld(encoder, adjusted_mode); + } + + if (HAS_PCH_CPT(dev)) + hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe); + else + hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe); + + I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val); + POSTING_READ(intel_hdmi->hdmi_reg); + + intel_hdmi->set_infoframes(encoder, adjusted_mode); +} + +static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + u32 tmp; + + tmp = I915_READ(intel_hdmi->hdmi_reg); + + if (!(tmp & SDVO_ENABLE)) + return false; + + if (HAS_PCH_CPT(dev)) + *pipe = PORT_TO_PIPE_CPT(tmp); + else + *pipe = PORT_TO_PIPE(tmp); + + return true; +} + +static void intel_enable_hdmi(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + u32 temp; + u32 enable_bits = SDVO_ENABLE; + + if (intel_hdmi->has_audio) + enable_bits |= SDVO_AUDIO_ENABLE; + + temp = I915_READ(intel_hdmi->hdmi_reg); + + /* HW workaround for IBX, we need to move the port to transcoder A + * before disabling it, so restore the transcoder select bit here. */ + if (HAS_PCH_IBX(dev)) + enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe); + + /* HW workaround, need to toggle enable bit off and on for 12bpc, but + * we do this anyway which shows more stable in testing. + */ + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE); + POSTING_READ(intel_hdmi->hdmi_reg); + } + + temp |= enable_bits; + + I915_WRITE(intel_hdmi->hdmi_reg, temp); + POSTING_READ(intel_hdmi->hdmi_reg); + + /* HW workaround, need to write this twice for issue that may result + * in first write getting masked. + */ + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(intel_hdmi->hdmi_reg, temp); + POSTING_READ(intel_hdmi->hdmi_reg); + } +} + +static void intel_disable_hdmi(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + u32 temp; + u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE; + + temp = I915_READ(intel_hdmi->hdmi_reg); + + /* HW workaround for IBX, we need to move the port to transcoder A + * before disabling it. */ + if (HAS_PCH_IBX(dev)) { + struct drm_crtc *crtc = encoder->base.crtc; + int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; + + if (temp & SDVO_PIPE_B_SELECT) { + temp &= ~SDVO_PIPE_B_SELECT; + I915_WRITE(intel_hdmi->hdmi_reg, temp); + POSTING_READ(intel_hdmi->hdmi_reg); + + /* Again we need to write this twice. */ + I915_WRITE(intel_hdmi->hdmi_reg, temp); + POSTING_READ(intel_hdmi->hdmi_reg); + + /* Transcoder selection bits only update + * effectively on vblank. */ + if (crtc) + intel_wait_for_vblank(dev, pipe); + else + msleep(50); + } + } + + /* HW workaround, need to toggle enable bit off and on for 12bpc, but + * we do this anyway which shows more stable in testing. + */ + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE); + POSTING_READ(intel_hdmi->hdmi_reg); + } + + temp &= ~enable_bits; + + I915_WRITE(intel_hdmi->hdmi_reg, temp); + POSTING_READ(intel_hdmi->hdmi_reg); + + /* HW workaround, need to write this twice for issue that may result + * in first write getting masked. + */ + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(intel_hdmi->hdmi_reg, temp); + POSTING_READ(intel_hdmi->hdmi_reg); + } +} + +static int intel_hdmi_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + if (mode->clock > 165000) + return MODE_CLOCK_HIGH; + if (mode->clock < 20000) + return MODE_CLOCK_LOW; + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + + return MODE_OK; +} + +bool intel_hdmi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct drm_device *dev = encoder->base.dev; + struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; + + if (intel_hdmi->color_range_auto) { + /* See CEA-861-E - 5.1 Default Encoding Parameters */ + if (intel_hdmi->has_hdmi_sink && + drm_match_cea_mode(adjusted_mode) > 1) + intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235; + else + intel_hdmi->color_range = 0; + } + + if (intel_hdmi->color_range) + pipe_config->limited_color_range = true; + + if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) + pipe_config->has_pch_encoder = true; + + /* + * HDMI is either 12 or 8, so if the display lets 10bpc sneak + * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi + * outputs. + */ + if (pipe_config->pipe_bpp > 8*3 && HAS_PCH_SPLIT(dev)) { + DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n"); + pipe_config->pipe_bpp = 12*3; + } else { + DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n"); + pipe_config->pipe_bpp = 8*3; + } + + return true; +} + +static enum drm_connector_status +intel_hdmi_detect(struct drm_connector *connector, bool force) +{ + struct drm_device *dev = connector->dev; + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct intel_digital_port *intel_dig_port = + hdmi_to_dig_port(intel_hdmi); + struct intel_encoder *intel_encoder = &intel_dig_port->base; + struct drm_i915_private *dev_priv = dev->dev_private; + struct edid *edid; + enum drm_connector_status status = connector_status_disconnected; + + intel_hdmi->has_hdmi_sink = false; + intel_hdmi->has_audio = false; + intel_hdmi->rgb_quant_range_selectable = false; + edid = drm_get_edid(connector, + intel_gmbus_get_adapter(dev_priv, + intel_hdmi->ddc_bus)); + + if (edid) { + if (edid->input & DRM_EDID_INPUT_DIGITAL) { + status = connector_status_connected; + if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI) + intel_hdmi->has_hdmi_sink = + drm_detect_hdmi_monitor(edid); + intel_hdmi->has_audio = drm_detect_monitor_audio(edid); + intel_hdmi->rgb_quant_range_selectable = + drm_rgb_quant_range_selectable(edid); + } + kfree(edid); + } + + if (status == connector_status_connected) { + if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) + intel_hdmi->has_audio = + (intel_hdmi->force_audio == HDMI_AUDIO_ON); + intel_encoder->type = INTEL_OUTPUT_HDMI; + } + + return status; +} + +static int intel_hdmi_get_modes(struct drm_connector *connector) +{ + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct drm_i915_private *dev_priv = connector->dev->dev_private; + + /* We should parse the EDID data and find out if it's an HDMI sink so + * we can send audio to it. + */ + + return intel_ddc_get_modes(connector, + intel_gmbus_get_adapter(dev_priv, + intel_hdmi->ddc_bus)); +} + +static bool +intel_hdmi_detect_audio(struct drm_connector *connector) +{ + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct edid *edid; + bool has_audio = false; + + edid = drm_get_edid(connector, + intel_gmbus_get_adapter(dev_priv, + intel_hdmi->ddc_bus)); + if (edid) { + if (edid->input & DRM_EDID_INPUT_DIGITAL) + has_audio = drm_detect_monitor_audio(edid); + kfree(edid); + } + + return has_audio; +} + +static int +intel_hdmi_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct intel_digital_port *intel_dig_port = + hdmi_to_dig_port(intel_hdmi); + struct drm_i915_private *dev_priv = connector->dev->dev_private; + int ret; + + ret = drm_object_property_set_value(&connector->base, property, val); + if (ret) + return ret; + + if (property == dev_priv->force_audio_property) { + enum hdmi_force_audio i = val; + bool has_audio; + + if (i == intel_hdmi->force_audio) + return 0; + + intel_hdmi->force_audio = i; + + if (i == HDMI_AUDIO_AUTO) + has_audio = intel_hdmi_detect_audio(connector); + else + has_audio = (i == HDMI_AUDIO_ON); + + if (i == HDMI_AUDIO_OFF_DVI) + intel_hdmi->has_hdmi_sink = 0; + + intel_hdmi->has_audio = has_audio; + goto done; + } + + if (property == dev_priv->broadcast_rgb_property) { + bool old_auto = intel_hdmi->color_range_auto; + uint32_t old_range = intel_hdmi->color_range; + + switch (val) { + case INTEL_BROADCAST_RGB_AUTO: + intel_hdmi->color_range_auto = true; + break; + case INTEL_BROADCAST_RGB_FULL: + intel_hdmi->color_range_auto = false; + intel_hdmi->color_range = 0; + break; + case INTEL_BROADCAST_RGB_LIMITED: + intel_hdmi->color_range_auto = false; + intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235; + break; + default: + return -EINVAL; + } + + if (old_auto == intel_hdmi->color_range_auto && + old_range == intel_hdmi->color_range) + return 0; + + goto done; + } + + return -EINVAL; + +done: + if (intel_dig_port->base.base.crtc) + intel_crtc_restore_mode(intel_dig_port->base.base.crtc); + + return 0; +} + +static void intel_hdmi_destroy(struct drm_connector *connector) +{ + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + +static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { + .mode_set = intel_hdmi_mode_set, +}; + +static const struct drm_connector_funcs intel_hdmi_connector_funcs = { + .dpms = intel_connector_dpms, + .detect = intel_hdmi_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = intel_hdmi_set_property, + .destroy = intel_hdmi_destroy, +}; + +static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { + .get_modes = intel_hdmi_get_modes, + .mode_valid = intel_hdmi_mode_valid, + .best_encoder = intel_best_encoder, +}; + +static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { + .destroy = intel_encoder_destroy, +}; + +static void +intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) +{ + intel_attach_force_audio_property(connector); + intel_attach_broadcast_rgb_property(connector); + intel_hdmi->color_range_auto = true; +} + +void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, + struct intel_connector *intel_connector) +{ + struct drm_connector *connector = &intel_connector->base; + struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; + struct intel_encoder *intel_encoder = &intel_dig_port->base; + struct drm_device *dev = intel_encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum port port = intel_dig_port->port; + + drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); + + connector->interlace_allowed = 1; + connector->doublescan_allowed = 0; + + switch (port) { + case PORT_B: + intel_hdmi->ddc_bus = GMBUS_PORT_DPB; + intel_encoder->hpd_pin = HPD_PORT_B; + break; + case PORT_C: + intel_hdmi->ddc_bus = GMBUS_PORT_DPC; + intel_encoder->hpd_pin = HPD_PORT_C; + break; + case PORT_D: + intel_hdmi->ddc_bus = GMBUS_PORT_DPD; + intel_encoder->hpd_pin = HPD_PORT_D; + break; + case PORT_A: + intel_encoder->hpd_pin = HPD_PORT_A; + /* Internal port only for eDP. */ + default: + BUG(); + } + + if (IS_VALLEYVIEW(dev)) { + intel_hdmi->write_infoframe = vlv_write_infoframe; + intel_hdmi->set_infoframes = vlv_set_infoframes; + } else if (!HAS_PCH_SPLIT(dev)) { + intel_hdmi->write_infoframe = g4x_write_infoframe; + intel_hdmi->set_infoframes = g4x_set_infoframes; + } else if (HAS_DDI(dev)) { + intel_hdmi->write_infoframe = hsw_write_infoframe; + intel_hdmi->set_infoframes = hsw_set_infoframes; + } else if (HAS_PCH_IBX(dev)) { + intel_hdmi->write_infoframe = ibx_write_infoframe; + intel_hdmi->set_infoframes = ibx_set_infoframes; + } else { + intel_hdmi->write_infoframe = cpt_write_infoframe; + intel_hdmi->set_infoframes = cpt_set_infoframes; + } + + if (HAS_DDI(dev)) + intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; + else + intel_connector->get_hw_state = intel_connector_get_hw_state; + + intel_hdmi_add_properties(intel_hdmi, connector); + + intel_connector_attach_encoder(intel_connector, intel_encoder); + drm_sysfs_connector_add(connector); + + /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written + * 0xd. Failure to do so will result in spurious interrupts being + * generated on the port when a cable is not attached. + */ + if (IS_G4X(dev) && !IS_GM45(dev)) { + u32 temp = I915_READ(PEG_BAND_GAP_DATA); + I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); + } +} + +void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) +{ + struct intel_digital_port *intel_dig_port; + struct intel_encoder *intel_encoder; + struct drm_encoder *encoder; + struct intel_connector *intel_connector; + + intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); + if (!intel_dig_port) + return; + + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(intel_dig_port); + return; + } + + intel_encoder = &intel_dig_port->base; + encoder = &intel_encoder->base; + + drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); + + intel_encoder->compute_config = intel_hdmi_compute_config; + intel_encoder->enable = intel_enable_hdmi; + intel_encoder->disable = intel_disable_hdmi; + intel_encoder->get_hw_state = intel_hdmi_get_hw_state; + + intel_encoder->type = INTEL_OUTPUT_HDMI; + intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + intel_encoder->cloneable = false; + + intel_dig_port->port = port; + intel_dig_port->hdmi.hdmi_reg = hdmi_reg; + intel_dig_port->dp.output_reg = 0; + + intel_hdmi_init_connector(intel_dig_port, intel_connector); +} diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c new file mode 100644 index 0000000..639fe19 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -0,0 +1,615 @@ +/* + * Copyright (c) 2006 Dave Airlie + * Copyright © 2006-2008,2010 Intel Corporation + * Jesse Barnes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * Chris Wilson + */ +#include +#include +#include +#include +#include "intel_drv.h" +#include +#include "i915_drv.h" + +struct gmbus_port { + const char *name; + int reg; +}; + +static const struct gmbus_port gmbus_ports[] = { + { "ssc", GPIOB }, + { "vga", GPIOA }, + { "panel", GPIOC }, + { "dpc", GPIOD }, + { "dpb", GPIOE }, + { "dpd", GPIOF }, +}; + +/* Intel GPIO access functions */ + +#define I2C_RISEFALL_TIME 10 + +static inline struct intel_gmbus * +to_intel_gmbus(struct i2c_adapter *i2c) +{ + return container_of(i2c, struct intel_gmbus, adapter); +} + +void +intel_i2c_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); + I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0); +} + +static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) +{ + u32 val; + + /* When using bit bashing for I2C, this bit needs to be set to 1 */ + if (!IS_PINEVIEW(dev_priv->dev)) + return; + + val = I915_READ(DSPCLK_GATE_D); + if (enable) + val |= DPCUNIT_CLOCK_GATE_DISABLE; + else + val &= ~DPCUNIT_CLOCK_GATE_DISABLE; + I915_WRITE(DSPCLK_GATE_D, val); +} + +static u32 get_reserved(struct intel_gmbus *bus) +{ + struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_device *dev = dev_priv->dev; + u32 reserved = 0; + + /* On most chips, these bits must be preserved in software. */ + if (!IS_I830(dev) && !IS_845G(dev)) + reserved = I915_READ_NOTRACE(bus->gpio_reg) & + (GPIO_DATA_PULLUP_DISABLE | + GPIO_CLOCK_PULLUP_DISABLE); + + return reserved; +} + +static int get_clock(void *data) +{ + struct intel_gmbus *bus = data; + struct drm_i915_private *dev_priv = bus->dev_priv; + u32 reserved = get_reserved(bus); + I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK); + I915_WRITE_NOTRACE(bus->gpio_reg, reserved); + return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0; +} + +static int get_data(void *data) +{ + struct intel_gmbus *bus = data; + struct drm_i915_private *dev_priv = bus->dev_priv; + u32 reserved = get_reserved(bus); + I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK); + I915_WRITE_NOTRACE(bus->gpio_reg, reserved); + return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0; +} + +static void set_clock(void *data, int state_high) +{ + struct intel_gmbus *bus = data; + struct drm_i915_private *dev_priv = bus->dev_priv; + u32 reserved = get_reserved(bus); + u32 clock_bits; + + if (state_high) + clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; + else + clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | + GPIO_CLOCK_VAL_MASK; + + I915_WRITE_NOTRACE(bus->gpio_reg, reserved | clock_bits); + POSTING_READ(bus->gpio_reg); +} + +static void set_data(void *data, int state_high) +{ + struct intel_gmbus *bus = data; + struct drm_i915_private *dev_priv = bus->dev_priv; + u32 reserved = get_reserved(bus); + u32 data_bits; + + if (state_high) + data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK; + else + data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | + GPIO_DATA_VAL_MASK; + + I915_WRITE_NOTRACE(bus->gpio_reg, reserved | data_bits); + POSTING_READ(bus->gpio_reg); +} + +static int +intel_gpio_pre_xfer(struct i2c_adapter *adapter) +{ + struct intel_gmbus *bus = container_of(adapter, + struct intel_gmbus, + adapter); + struct drm_i915_private *dev_priv = bus->dev_priv; + + intel_i2c_reset(dev_priv->dev); + intel_i2c_quirk_set(dev_priv, true); + set_data(bus, 1); + set_clock(bus, 1); + udelay(I2C_RISEFALL_TIME); + return 0; +} + +static void +intel_gpio_post_xfer(struct i2c_adapter *adapter) +{ + struct intel_gmbus *bus = container_of(adapter, + struct intel_gmbus, + adapter); + struct drm_i915_private *dev_priv = bus->dev_priv; + + set_data(bus, 1); + set_clock(bus, 1); + intel_i2c_quirk_set(dev_priv, false); +} + +static void +intel_gpio_setup(struct intel_gmbus *bus, u32 pin) +{ + struct drm_i915_private *dev_priv = bus->dev_priv; + struct i2c_algo_bit_data *algo; + + algo = &bus->bit_algo; + + /* -1 to map pin pair to gmbus index */ + bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg; + + bus->adapter.algo_data = algo; + algo->setsda = set_data; + algo->setscl = set_clock; + algo->getsda = get_data; + algo->getscl = get_clock; + algo->pre_xfer = intel_gpio_pre_xfer; + algo->post_xfer = intel_gpio_post_xfer; + algo->udelay = I2C_RISEFALL_TIME; + algo->timeout = usecs_to_jiffies(2200); + algo->data = bus; +} + +/* + * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI + * mode. This results in spurious interrupt warnings if the legacy irq no. is + * shared with another device. The kernel then disables that interrupt source + * and so prevents the other device from working properly. + */ +#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) +static int +gmbus_wait_hw_status(struct drm_i915_private *dev_priv, + u32 gmbus2_status, + u32 gmbus4_irq_en) +{ + int i; + int reg_offset = dev_priv->gpio_mmio_base; + u32 gmbus2 = 0; + DEFINE_WAIT(wait); + + if (!HAS_GMBUS_IRQ(dev_priv->dev)) + gmbus4_irq_en = 0; + + /* Important: The hw handles only the first bit, so set only one! Since + * we also need to check for NAKs besides the hw ready/idle signal, we + * need to wake up periodically and check that ourselves. */ + I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en); + + for (i = 0; i < msecs_to_jiffies_timeout(50); i++) { + prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait, + TASK_UNINTERRUPTIBLE); + + gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset); + if (gmbus2 & (GMBUS_SATOER | gmbus2_status)) + break; + + schedule_timeout(1); + } + finish_wait(&dev_priv->gmbus_wait_queue, &wait); + + I915_WRITE(GMBUS4 + reg_offset, 0); + + if (gmbus2 & GMBUS_SATOER) + return -ENXIO; + if (gmbus2 & gmbus2_status) + return 0; + return -ETIMEDOUT; +} + +static int +gmbus_wait_idle(struct drm_i915_private *dev_priv) +{ + int ret; + int reg_offset = dev_priv->gpio_mmio_base; + +#define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0) + + if (!HAS_GMBUS_IRQ(dev_priv->dev)) + return wait_for(C, 10); + + /* Important: The hw handles only the first bit, so set only one! */ + I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN); + + ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, + msecs_to_jiffies_timeout(10)); + + I915_WRITE(GMBUS4 + reg_offset, 0); + + if (ret) + return 0; + else + return -ETIMEDOUT; +#undef C +} + +static int +gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, + u32 gmbus1_index) +{ + int reg_offset = dev_priv->gpio_mmio_base; + u16 len = msg->len; + u8 *buf = msg->buf; + + I915_WRITE(GMBUS1 + reg_offset, + gmbus1_index | + GMBUS_CYCLE_WAIT | + (len << GMBUS_BYTE_COUNT_SHIFT) | + (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | + GMBUS_SLAVE_READ | GMBUS_SW_RDY); + while (len) { + int ret; + u32 val, loop = 0; + + ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY, + GMBUS_HW_RDY_EN); + if (ret) + return ret; + + val = I915_READ(GMBUS3 + reg_offset); + do { + *buf++ = val & 0xff; + val >>= 8; + } while (--len && ++loop < 4); + } + + return 0; +} + +static int +gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) +{ + int reg_offset = dev_priv->gpio_mmio_base; + u16 len = msg->len; + u8 *buf = msg->buf; + u32 val, loop; + + val = loop = 0; + while (len && loop < 4) { + val |= *buf++ << (8 * loop++); + len -= 1; + } + + I915_WRITE(GMBUS3 + reg_offset, val); + I915_WRITE(GMBUS1 + reg_offset, + GMBUS_CYCLE_WAIT | + (msg->len << GMBUS_BYTE_COUNT_SHIFT) | + (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | + GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); + while (len) { + int ret; + + val = loop = 0; + do { + val |= *buf++ << (8 * loop); + } while (--len && ++loop < 4); + + I915_WRITE(GMBUS3 + reg_offset, val); + + ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY, + GMBUS_HW_RDY_EN); + if (ret) + return ret; + } + return 0; +} + +/* + * The gmbus controller can combine a 1 or 2 byte write with a read that + * immediately follows it by using an "INDEX" cycle. + */ +static bool +gmbus_is_index_read(struct i2c_msg *msgs, int i, int num) +{ + return (i + 1 < num && + !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 && + (msgs[i + 1].flags & I2C_M_RD)); +} + +static int +gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) +{ + int reg_offset = dev_priv->gpio_mmio_base; + u32 gmbus1_index = 0; + u32 gmbus5 = 0; + int ret; + + if (msgs[0].len == 2) + gmbus5 = GMBUS_2BYTE_INDEX_EN | + msgs[0].buf[1] | (msgs[0].buf[0] << 8); + if (msgs[0].len == 1) + gmbus1_index = GMBUS_CYCLE_INDEX | + (msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT); + + /* GMBUS5 holds 16-bit index */ + if (gmbus5) + I915_WRITE(GMBUS5 + reg_offset, gmbus5); + + ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index); + + /* Clear GMBUS5 after each index transfer */ + if (gmbus5) + I915_WRITE(GMBUS5 + reg_offset, 0); + + return ret; +} + +static int +gmbus_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) +{ + struct intel_gmbus *bus = container_of(adapter, + struct intel_gmbus, + adapter); + struct drm_i915_private *dev_priv = bus->dev_priv; + int i, reg_offset; + int ret = 0; + + mutex_lock(&dev_priv->gmbus_mutex); + + if (bus->force_bit) { + ret = i2c_bit_algo.master_xfer(adapter, msgs, num); + goto out; + } + + reg_offset = dev_priv->gpio_mmio_base; + + I915_WRITE(GMBUS0 + reg_offset, bus->reg0); + + for (i = 0; i < num; i++) { + if (gmbus_is_index_read(msgs, i, num)) { + ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); + i += 1; /* set i to the index of the read xfer */ + } else if (msgs[i].flags & I2C_M_RD) { + ret = gmbus_xfer_read(dev_priv, &msgs[i], 0); + } else { + ret = gmbus_xfer_write(dev_priv, &msgs[i]); + } + + if (ret == -ETIMEDOUT) + goto timeout; + if (ret == -ENXIO) + goto clear_err; + + ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE, + GMBUS_HW_WAIT_EN); + if (ret == -ENXIO) + goto clear_err; + if (ret) + goto timeout; + } + + /* Generate a STOP condition on the bus. Note that gmbus can't generata + * a STOP on the very first cycle. To simplify the code we + * unconditionally generate the STOP condition with an additional gmbus + * cycle. */ + I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY); + + /* Mark the GMBUS interface as disabled after waiting for idle. + * We will re-enable it at the start of the next xfer, + * till then let it sleep. + */ + if (gmbus_wait_idle(dev_priv)) { + DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n", + adapter->name); + ret = -ETIMEDOUT; + } + I915_WRITE(GMBUS0 + reg_offset, 0); + ret = ret ?: i; + goto out; + +clear_err: + /* + * Wait for bus to IDLE before clearing NAK. + * If we clear the NAK while bus is still active, then it will stay + * active and the next transaction may fail. + * + * If no ACK is received during the address phase of a transaction, the + * adapter must report -ENXIO. It is not clear what to return if no ACK + * is received at other times. But we have to be careful to not return + * spurious -ENXIO because that will prevent i2c and drm edid functions + * from retrying. So return -ENXIO only when gmbus properly quiescents - + * timing out seems to happen when there _is_ a ddc chip present, but + * it's slow responding and only answers on the 2nd retry. + */ + ret = -ENXIO; + if (gmbus_wait_idle(dev_priv)) { + DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n", + adapter->name); + ret = -ETIMEDOUT; + } + + /* Toggle the Software Clear Interrupt bit. This has the effect + * of resetting the GMBUS controller and so clearing the + * BUS_ERROR raised by the slave's NAK. + */ + I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT); + I915_WRITE(GMBUS1 + reg_offset, 0); + I915_WRITE(GMBUS0 + reg_offset, 0); + + DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n", + adapter->name, msgs[i].addr, + (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); + + goto out; + +timeout: + DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n", + bus->adapter.name, bus->reg0 & 0xff); + I915_WRITE(GMBUS0 + reg_offset, 0); + + /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ + bus->force_bit = 1; + ret = i2c_bit_algo.master_xfer(adapter, msgs, num); + +out: + mutex_unlock(&dev_priv->gmbus_mutex); + return ret; +} + +static u32 gmbus_func(struct i2c_adapter *adapter) +{ + return i2c_bit_algo.functionality(adapter) & + (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | + /* I2C_FUNC_10BIT_ADDR | */ + I2C_FUNC_SMBUS_READ_BLOCK_DATA | + I2C_FUNC_SMBUS_BLOCK_PROC_CALL); +} + +static const struct i2c_algorithm gmbus_algorithm = { + .master_xfer = gmbus_xfer, + .functionality = gmbus_func +}; + +/** + * intel_gmbus_setup - instantiate all Intel i2c GMBuses + * @dev: DRM device + */ +int intel_setup_gmbus(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret, i; + + if (HAS_PCH_NOP(dev)) + return 0; + else if (HAS_PCH_SPLIT(dev)) + dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA; + else if (IS_VALLEYVIEW(dev)) + dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; + else + dev_priv->gpio_mmio_base = 0; + + mutex_init(&dev_priv->gmbus_mutex); + init_waitqueue_head(&dev_priv->gmbus_wait_queue); + + for (i = 0; i < GMBUS_NUM_PORTS; i++) { + struct intel_gmbus *bus = &dev_priv->gmbus[i]; + u32 port = i + 1; /* +1 to map gmbus index to pin pair */ + + bus->adapter.owner = THIS_MODULE; + bus->adapter.class = I2C_CLASS_DDC; + snprintf(bus->adapter.name, + sizeof(bus->adapter.name), + "i915 gmbus %s", + gmbus_ports[i].name); + + bus->adapter.dev.parent = &dev->pdev->dev; + bus->dev_priv = dev_priv; + + bus->adapter.algo = &gmbus_algorithm; + + /* By default use a conservative clock rate */ + bus->reg0 = port | GMBUS_RATE_100KHZ; + + /* gmbus seems to be broken on i830 */ + if (IS_I830(dev)) + bus->force_bit = 1; + + intel_gpio_setup(bus, port); + + ret = i2c_add_adapter(&bus->adapter); + if (ret) + goto err; + } + + intel_i2c_reset(dev_priv->dev); + + return 0; + +err: + while (--i) { + struct intel_gmbus *bus = &dev_priv->gmbus[i]; + i2c_del_adapter(&bus->adapter); + } + return ret; +} + +struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, + unsigned port) +{ + WARN_ON(!intel_gmbus_is_port_valid(port)); + /* -1 to map pin pair to gmbus index */ + return (intel_gmbus_is_port_valid(port)) ? + &dev_priv->gmbus[port - 1].adapter : NULL; +} + +void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed) +{ + struct intel_gmbus *bus = to_intel_gmbus(adapter); + + bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | speed; +} + +void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) +{ + struct intel_gmbus *bus = to_intel_gmbus(adapter); + + bus->force_bit += force_bit ? 1 : -1; + DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n", + force_bit ? "en" : "dis", adapter->name, + bus->force_bit); +} + +void intel_teardown_gmbus(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + for (i = 0; i < GMBUS_NUM_PORTS; i++) { + struct intel_gmbus *bus = &dev_priv->gmbus[i]; + i2c_del_adapter(&bus->adapter); + } +} diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c new file mode 100644 index 0000000..29412cc --- /dev/null +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -0,0 +1,1291 @@ +/* + * Copyright © 2006-2007 Intel Corporation + * Copyright (c) 2006 Dave Airlie + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * Dave Airlie + * Jesse Barnes + */ + +#include +#include +#include +#include +#include +#include +#include +#include "intel_drv.h" +#include +#include "i915_drv.h" +#include + +/* Private structure for the integrated LVDS support */ +struct intel_lvds_connector { + struct intel_connector base; + + struct notifier_block lid_notifier; +}; + +struct intel_lvds_encoder { + struct intel_encoder base; + + u32 pfit_control; + u32 pfit_pgm_ratios; + bool is_dual_link; + u32 reg; + + struct intel_lvds_connector *attached_connector; +}; + +static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder) +{ + return container_of(encoder, struct intel_lvds_encoder, base.base); +} + +static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector) +{ + return container_of(connector, struct intel_lvds_connector, base.base); +} + +static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); + u32 tmp; + + tmp = I915_READ(lvds_encoder->reg); + + if (!(tmp & LVDS_PORT_EN)) + return false; + + if (HAS_PCH_CPT(dev)) + *pipe = PORT_TO_PIPE_CPT(tmp); + else + *pipe = PORT_TO_PIPE(tmp); + + return true; +} + +/* The LVDS pin pair needs to be on before the DPLLs are enabled. + * This is an exception to the general rule that mode_set doesn't turn + * things on. + */ +static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder) +{ + struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + struct drm_display_mode *fixed_mode = + lvds_encoder->attached_connector->base.panel.fixed_mode; + int pipe = intel_crtc->pipe; + u32 temp; + + temp = I915_READ(lvds_encoder->reg); + temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; + + if (HAS_PCH_CPT(dev)) { + temp &= ~PORT_TRANS_SEL_MASK; + temp |= PORT_TRANS_SEL_CPT(pipe); + } else { + if (pipe == 1) { + temp |= LVDS_PIPEB_SELECT; + } else { + temp &= ~LVDS_PIPEB_SELECT; + } + } + + /* set the corresponsding LVDS_BORDER bit */ + temp |= dev_priv->lvds_border_bits; + /* Set the B0-B3 data pairs corresponding to whether we're going to + * set the DPLLs for dual-channel mode or not. + */ + if (lvds_encoder->is_dual_link) + temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; + else + temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); + + /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) + * appropriately here, but we need to look more thoroughly into how + * panels behave in the two modes. + */ + + /* Set the dithering flag on LVDS as needed, note that there is no + * special lvds dither control bit on pch-split platforms, dithering is + * only controlled through the PIPECONF reg. */ + if (INTEL_INFO(dev)->gen == 4) { + if (dev_priv->lvds_dither) + temp |= LVDS_ENABLE_DITHER; + else + temp &= ~LVDS_ENABLE_DITHER; + } + temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); + if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC) + temp |= LVDS_HSYNC_POLARITY; + if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC) + temp |= LVDS_VSYNC_POLARITY; + + I915_WRITE(lvds_encoder->reg, temp); +} + +static void intel_pre_enable_lvds(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct intel_lvds_encoder *enc = to_lvds_encoder(&encoder->base); + struct drm_i915_private *dev_priv = dev->dev_private; + + if (HAS_PCH_SPLIT(dev) || !enc->pfit_control) + return; + + /* + * Enable automatic panel scaling so that non-native modes + * fill the screen. The panel fitter should only be + * adjusted whilst the pipe is disabled, according to + * register description and PRM. + */ + DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", + enc->pfit_control, + enc->pfit_pgm_ratios); + + I915_WRITE(PFIT_PGM_RATIOS, enc->pfit_pgm_ratios); + I915_WRITE(PFIT_CONTROL, enc->pfit_control); +} + +/** + * Sets the power state for the panel. + */ +static void intel_enable_lvds(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + struct drm_i915_private *dev_priv = dev->dev_private; + u32 ctl_reg, stat_reg; + + if (HAS_PCH_SPLIT(dev)) { + ctl_reg = PCH_PP_CONTROL; + stat_reg = PCH_PP_STATUS; + } else { + ctl_reg = PP_CONTROL; + stat_reg = PP_STATUS; + } + + I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN); + + I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); + POSTING_READ(lvds_encoder->reg); + if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) + DRM_ERROR("timed out waiting for panel to power on\n"); + + intel_panel_enable_backlight(dev, intel_crtc->pipe); +} + +static void intel_disable_lvds(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); + struct drm_i915_private *dev_priv = dev->dev_private; + u32 ctl_reg, stat_reg; + + if (HAS_PCH_SPLIT(dev)) { + ctl_reg = PCH_PP_CONTROL; + stat_reg = PCH_PP_STATUS; + } else { + ctl_reg = PP_CONTROL; + stat_reg = PP_STATUS; + } + + intel_panel_disable_backlight(dev); + + I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); + if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) + DRM_ERROR("timed out waiting for panel to power off\n"); + + I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN); + POSTING_READ(lvds_encoder->reg); +} + +static int intel_lvds_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; + + if (mode->hdisplay > fixed_mode->hdisplay) + return MODE_PANEL; + if (mode->vdisplay > fixed_mode->vdisplay) + return MODE_PANEL; + + return MODE_OK; +} + +static void +centre_horizontally(struct drm_display_mode *mode, + int width) +{ + u32 border, sync_pos, blank_width, sync_width; + + /* keep the hsync and hblank widths constant */ + sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start; + blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start; + sync_pos = (blank_width - sync_width + 1) / 2; + + border = (mode->hdisplay - width + 1) / 2; + border += border & 1; /* make the border even */ + + mode->crtc_hdisplay = width; + mode->crtc_hblank_start = width + border; + mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width; + + mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos; + mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width; +} + +static void +centre_vertically(struct drm_display_mode *mode, + int height) +{ + u32 border, sync_pos, blank_width, sync_width; + + /* keep the vsync and vblank widths constant */ + sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start; + blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start; + sync_pos = (blank_width - sync_width + 1) / 2; + + border = (mode->vdisplay - height + 1) / 2; + + mode->crtc_vdisplay = height; + mode->crtc_vblank_start = height + border; + mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width; + + mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos; + mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width; +} + +static inline u32 panel_fitter_scaling(u32 source, u32 target) +{ + /* + * Floating point operation is not supported. So the FACTOR + * is defined, which can avoid the floating point computation + * when calculating the panel ratio. + */ +#define ACCURACY 12 +#define FACTOR (1 << ACCURACY) + u32 ratio = source * FACTOR / target; + return (FACTOR * ratio + FACTOR/2) / FACTOR; +} + +static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, + struct intel_crtc_config *pipe_config) +{ + struct drm_device *dev = intel_encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_lvds_encoder *lvds_encoder = + to_lvds_encoder(&intel_encoder->base); + struct intel_connector *intel_connector = + &lvds_encoder->attached_connector->base; + struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; + struct drm_display_mode *mode = &pipe_config->requested_mode; + struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc; + u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; + unsigned int lvds_bpp; + int pipe; + + /* Should never happen!! */ + if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) { + DRM_ERROR("Can't support LVDS on pipe A\n"); + return false; + } + + if (intel_encoder_check_is_cloned(&lvds_encoder->base)) + return false; + + if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) == + LVDS_A3_POWER_UP) + lvds_bpp = 8*3; + else + lvds_bpp = 6*3; + + if (lvds_bpp != pipe_config->pipe_bpp) { + DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n", + pipe_config->pipe_bpp, lvds_bpp); + pipe_config->pipe_bpp = lvds_bpp; + } + /* + * We have timings from the BIOS for the panel, put them in + * to the adjusted mode. The CRTC will be set up for this mode, + * with the panel scaling set up to source from the H/VDisplay + * of the original mode. + */ + intel_fixed_panel_mode(intel_connector->panel.fixed_mode, + adjusted_mode); + + if (HAS_PCH_SPLIT(dev)) { + pipe_config->has_pch_encoder = true; + + intel_pch_panel_fitting(dev, + intel_connector->panel.fitting_mode, + mode, adjusted_mode); + return true; + } + + /* Native modes don't need fitting */ + if (adjusted_mode->hdisplay == mode->hdisplay && + adjusted_mode->vdisplay == mode->vdisplay) + goto out; + + /* 965+ wants fuzzy fitting */ + if (INTEL_INFO(dev)->gen >= 4) + pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | + PFIT_FILTER_FUZZY); + + /* + * Enable automatic panel scaling for non-native modes so that they fill + * the screen. Should be enabled before the pipe is enabled, according + * to register description and PRM. + * Change the value here to see the borders for debugging + */ + for_each_pipe(pipe) + I915_WRITE(BCLRPAT(pipe), 0); + + drm_mode_set_crtcinfo(adjusted_mode, 0); + pipe_config->timings_set = true; + + switch (intel_connector->panel.fitting_mode) { + case DRM_MODE_SCALE_CENTER: + /* + * For centered modes, we have to calculate border widths & + * heights and modify the values programmed into the CRTC. + */ + centre_horizontally(adjusted_mode, mode->hdisplay); + centre_vertically(adjusted_mode, mode->vdisplay); + border = LVDS_BORDER_ENABLE; + break; + + case DRM_MODE_SCALE_ASPECT: + /* Scale but preserve the aspect ratio */ + if (INTEL_INFO(dev)->gen >= 4) { + u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; + u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; + + /* 965+ is easy, it does everything in hw */ + if (scaled_width > scaled_height) + pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR; + else if (scaled_width < scaled_height) + pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER; + else if (adjusted_mode->hdisplay != mode->hdisplay) + pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO; + } else { + u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; + u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; + /* + * For earlier chips we have to calculate the scaling + * ratio by hand and program it into the + * PFIT_PGM_RATIO register + */ + if (scaled_width > scaled_height) { /* pillar */ + centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay); + + border = LVDS_BORDER_ENABLE; + if (mode->vdisplay != adjusted_mode->vdisplay) { + u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay); + pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | + bits << PFIT_VERT_SCALE_SHIFT); + pfit_control |= (PFIT_ENABLE | + VERT_INTERP_BILINEAR | + HORIZ_INTERP_BILINEAR); + } + } else if (scaled_width < scaled_height) { /* letter */ + centre_vertically(adjusted_mode, scaled_width / mode->hdisplay); + + border = LVDS_BORDER_ENABLE; + if (mode->hdisplay != adjusted_mode->hdisplay) { + u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay); + pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | + bits << PFIT_VERT_SCALE_SHIFT); + pfit_control |= (PFIT_ENABLE | + VERT_INTERP_BILINEAR | + HORIZ_INTERP_BILINEAR); + } + } else + /* Aspects match, Let hw scale both directions */ + pfit_control |= (PFIT_ENABLE | + VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | + VERT_INTERP_BILINEAR | + HORIZ_INTERP_BILINEAR); + } + break; + + case DRM_MODE_SCALE_FULLSCREEN: + /* + * Full scaling, even if it changes the aspect ratio. + * Fortunately this is all done for us in hw. + */ + if (mode->vdisplay != adjusted_mode->vdisplay || + mode->hdisplay != adjusted_mode->hdisplay) { + pfit_control |= PFIT_ENABLE; + if (INTEL_INFO(dev)->gen >= 4) + pfit_control |= PFIT_SCALING_AUTO; + else + pfit_control |= (VERT_AUTO_SCALE | + VERT_INTERP_BILINEAR | + HORIZ_AUTO_SCALE | + HORIZ_INTERP_BILINEAR); + } + break; + + default: + break; + } + +out: + /* If not enabling scaling, be consistent and always use 0. */ + if ((pfit_control & PFIT_ENABLE) == 0) { + pfit_control = 0; + pfit_pgm_ratios = 0; + } + + /* Make sure pre-965 set dither correctly */ + if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither) + pfit_control |= PANEL_8TO6_DITHER_ENABLE; + + if (pfit_control != lvds_encoder->pfit_control || + pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) { + lvds_encoder->pfit_control = pfit_control; + lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios; + } + dev_priv->lvds_border_bits = border; + + /* + * XXX: It would be nice to support lower refresh rates on the + * panels to reduce power consumption, and perhaps match the + * user's requested refresh rate. + */ + + return true; +} + +static void intel_lvds_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + /* + * The LVDS pin pair will already have been turned on in the + * intel_crtc_mode_set since it has a large impact on the DPLL + * settings. + */ +} + +/** + * Detect the LVDS connection. + * + * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means + * connected and closed means disconnected. We also send hotplug events as + * needed, using lid status notification from the input layer. + */ +static enum drm_connector_status +intel_lvds_detect(struct drm_connector *connector, bool force) +{ + struct drm_device *dev = connector->dev; + enum drm_connector_status status; + + status = intel_panel_detect(dev); + if (status != connector_status_unknown) + return status; + + return connector_status_connected; +} + +/** + * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. + */ +static int intel_lvds_get_modes(struct drm_connector *connector) +{ + struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector); + struct drm_device *dev = connector->dev; + struct drm_display_mode *mode; + + /* use cached edid if we have one */ + if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) + return drm_add_edid_modes(connector, lvds_connector->base.edid); + + mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode); + if (mode == NULL) + return 0; + + drm_mode_probed_add(connector, mode); + return 1; +} + +static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) +{ + DRM_INFO("Skipping forced modeset for %s\n", id->ident); + return 1; +} + +/* The GPU hangs up on these systems if modeset is performed on LID open */ +static const struct dmi_system_id intel_no_modeset_on_lid[] = { + { + .callback = intel_no_modeset_on_lid_dmi_callback, + .ident = "Toshiba Tecra A11", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"), + }, + }, + + { } /* terminating entry */ +}; + +/* + * Lid events. Note the use of 'modeset': + * - we set it to MODESET_ON_LID_OPEN on lid close, + * and set it to MODESET_DONE on open + * - we use it as a "only once" bit (ie we ignore + * duplicate events where it was already properly set) + * - the suspend/resume paths will set it to + * MODESET_SUSPENDED and ignore the lid open event, + * because they restore the mode ("lid open"). + */ +static int intel_lid_notify(struct notifier_block *nb, unsigned long val, + void *unused) +{ + struct intel_lvds_connector *lvds_connector = + container_of(nb, struct intel_lvds_connector, lid_notifier); + struct drm_connector *connector = &lvds_connector->base.base; + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev->switch_power_state != DRM_SWITCH_POWER_ON) + return NOTIFY_OK; + + mutex_lock(&dev_priv->modeset_restore_lock); + if (dev_priv->modeset_restore == MODESET_SUSPENDED) + goto exit; + /* + * check and update the status of LVDS connector after receiving + * the LID nofication event. + */ + connector->status = connector->funcs->detect(connector, false); + + /* Don't force modeset on machines where it causes a GPU lockup */ + if (dmi_check_system(intel_no_modeset_on_lid)) + goto exit; + if (!acpi_lid_open()) { + /* do modeset on next lid open event */ + dev_priv->modeset_restore = MODESET_ON_LID_OPEN; + goto exit; + } + + if (dev_priv->modeset_restore == MODESET_DONE) + goto exit; + + drm_modeset_lock_all(dev); + intel_modeset_setup_hw_state(dev, true); + drm_modeset_unlock_all(dev); + + dev_priv->modeset_restore = MODESET_DONE; + +exit: + mutex_unlock(&dev_priv->modeset_restore_lock); + return NOTIFY_OK; +} + +/** + * intel_lvds_destroy - unregister and free LVDS structures + * @connector: connector to free + * + * Unregister the DDC bus for this connector then free the driver private + * structure. + */ +static void intel_lvds_destroy(struct drm_connector *connector) +{ + struct intel_lvds_connector *lvds_connector = + to_lvds_connector(connector); + + if (lvds_connector->lid_notifier.notifier_call) + acpi_lid_notifier_unregister(&lvds_connector->lid_notifier); + + if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) + kfree(lvds_connector->base.edid); + + intel_panel_fini(&lvds_connector->base.panel); + + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + +static int intel_lvds_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t value) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_device *dev = connector->dev; + + if (property == dev->mode_config.scaling_mode_property) { + struct drm_crtc *crtc; + + if (value == DRM_MODE_SCALE_NONE) { + DRM_DEBUG_KMS("no scaling not supported\n"); + return -EINVAL; + } + + if (intel_connector->panel.fitting_mode == value) { + /* the LVDS scaling property is not changed */ + return 0; + } + intel_connector->panel.fitting_mode = value; + + crtc = intel_attached_encoder(connector)->base.crtc; + if (crtc && crtc->enabled) { + /* + * If the CRTC is enabled, the display will be changed + * according to the new panel fitting mode. + */ + intel_crtc_restore_mode(crtc); + } + } + + return 0; +} + +static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { + .mode_set = intel_lvds_mode_set, +}; + +static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { + .get_modes = intel_lvds_get_modes, + .mode_valid = intel_lvds_mode_valid, + .best_encoder = intel_best_encoder, +}; + +static const struct drm_connector_funcs intel_lvds_connector_funcs = { + .dpms = intel_connector_dpms, + .detect = intel_lvds_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = intel_lvds_set_property, + .destroy = intel_lvds_destroy, +}; + +static const struct drm_encoder_funcs intel_lvds_enc_funcs = { + .destroy = intel_encoder_destroy, +}; + +static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) +{ + DRM_INFO("Skipping LVDS initialization for %s\n", id->ident); + return 1; +} + +/* These systems claim to have LVDS, but really don't */ +static const struct dmi_system_id intel_no_lvds[] = { + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Apple Mac Mini (Core series)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple"), + DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Apple Mac Mini (Core 2 series)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple"), + DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "MSI IM-945GSE-A", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MSI"), + DMI_MATCH(DMI_PRODUCT_NAME, "A9830IMS"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Dell Studio Hybrid", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Dell OptiPlex FX170", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "AOpen Mini PC", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "AOpen"), + DMI_MATCH(DMI_PRODUCT_NAME, "i965GMx-IF"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "AOpen Mini PC MP915", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), + DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "AOpen i915GMm-HFS", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), + DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "AOpen i45GMx-I", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), + DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Aopen i945GTt-VFA", + .matches = { + DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Clientron U800", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), + DMI_MATCH(DMI_PRODUCT_NAME, "U800"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Clientron E830", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), + DMI_MATCH(DMI_PRODUCT_NAME, "E830"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Asus EeeBox PC EB1007", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Asus AT5NM10T-I", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Hewlett-Packard HP t5740", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, " t5740"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Hewlett-Packard t5745", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Hewlett-Packard st5747", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "MSI Wind Box DC500", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), + DMI_MATCH(DMI_BOARD_NAME, "MS-7469"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Gigabyte GA-D525TUD", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "D525TUD"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Supermicro X7SPA-H", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), + DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Fujitsu Esprimo Q900", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"), + }, + }, + + { } /* terminating entry */ +}; + +/** + * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID + * @dev: drm device + * @connector: LVDS connector + * + * Find the reduced downclock for LVDS in EDID. + */ +static void intel_find_lvds_downclock(struct drm_device *dev, + struct drm_display_mode *fixed_mode, + struct drm_connector *connector) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_display_mode *scan; + int temp_downclock; + + temp_downclock = fixed_mode->clock; + list_for_each_entry(scan, &connector->probed_modes, head) { + /* + * If one mode has the same resolution with the fixed_panel + * mode while they have the different refresh rate, it means + * that the reduced downclock is found for the LVDS. In such + * case we can set the different FPx0/1 to dynamically select + * between low and high frequency. + */ + if (scan->hdisplay == fixed_mode->hdisplay && + scan->hsync_start == fixed_mode->hsync_start && + scan->hsync_end == fixed_mode->hsync_end && + scan->htotal == fixed_mode->htotal && + scan->vdisplay == fixed_mode->vdisplay && + scan->vsync_start == fixed_mode->vsync_start && + scan->vsync_end == fixed_mode->vsync_end && + scan->vtotal == fixed_mode->vtotal) { + if (scan->clock < temp_downclock) { + /* + * The downclock is already found. But we + * expect to find the lower downclock. + */ + temp_downclock = scan->clock; + } + } + } + if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) { + /* We found the downclock for LVDS. */ + dev_priv->lvds_downclock_avail = 1; + dev_priv->lvds_downclock = temp_downclock; + DRM_DEBUG_KMS("LVDS downclock is found in EDID. " + "Normal clock %dKhz, downclock %dKhz\n", + fixed_mode->clock, temp_downclock); + } +} + +/* + * Enumerate the child dev array parsed from VBT to check whether + * the LVDS is present. + * If it is present, return 1. + * If it is not present, return false. + * If no child dev is parsed from VBT, it assumes that the LVDS is present. + */ +static bool lvds_is_present_in_vbt(struct drm_device *dev, + u8 *i2c_pin) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + if (!dev_priv->child_dev_num) + return true; + + for (i = 0; i < dev_priv->child_dev_num; i++) { + struct child_device_config *child = dev_priv->child_dev + i; + + /* If the device type is not LFP, continue. + * We have to check both the new identifiers as well as the + * old for compatibility with some BIOSes. + */ + if (child->device_type != DEVICE_TYPE_INT_LFP && + child->device_type != DEVICE_TYPE_LFP) + continue; + + if (intel_gmbus_is_port_valid(child->i2c_pin)) + *i2c_pin = child->i2c_pin; + + /* However, we cannot trust the BIOS writers to populate + * the VBT correctly. Since LVDS requires additional + * information from AIM blocks, a non-zero addin offset is + * a good indicator that the LVDS is actually present. + */ + if (child->addin_offset) + return true; + + /* But even then some BIOS writers perform some black magic + * and instantiate the device without reference to any + * additional data. Trust that if the VBT was written into + * the OpRegion then they have validated the LVDS's existence. + */ + if (dev_priv->opregion.vbt) + return true; + } + + return false; +} + +static int intel_dual_link_lvds_callback(const struct dmi_system_id *id) +{ + DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident); + return 1; +} + +static const struct dmi_system_id intel_dual_link_lvds[] = { + { + .callback = intel_dual_link_lvds_callback, + .ident = "Apple MacBook Pro (Core i5/i7 Series)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"), + }, + }, + { } /* terminating entry */ +}; + +bool intel_is_dual_link_lvds(struct drm_device *dev) +{ + struct intel_encoder *encoder; + struct intel_lvds_encoder *lvds_encoder; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + if (encoder->type == INTEL_OUTPUT_LVDS) { + lvds_encoder = to_lvds_encoder(&encoder->base); + + return lvds_encoder->is_dual_link; + } + } + + return false; +} + +static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) +{ + struct drm_device *dev = lvds_encoder->base.base.dev; + unsigned int val; + struct drm_i915_private *dev_priv = dev->dev_private; + + /* use the module option value if specified */ + if (i915_lvds_channel_mode > 0) + return i915_lvds_channel_mode == 2; + + if (dmi_check_system(intel_dual_link_lvds)) + return true; + + /* BIOS should set the proper LVDS register value at boot, but + * in reality, it doesn't set the value when the lid is closed; + * we need to check "the value to be set" in VBT when LVDS + * register is uninitialized. + */ + val = I915_READ(lvds_encoder->reg); + if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED))) + val = dev_priv->bios_lvds_val; + + return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP; +} + +static bool intel_lvds_supported(struct drm_device *dev) +{ + /* With the introduction of the PCH we gained a dedicated + * LVDS presence pin, use it. */ + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) + return true; + + /* Otherwise LVDS was only attached to mobile products, + * except for the inglorious 830gm */ + if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev)) + return true; + + return false; +} + +/** + * intel_lvds_init - setup LVDS connectors on this device + * @dev: drm device + * + * Create the connector, register the LVDS DDC bus, and try to figure out what + * modes we can display on the LVDS panel (if present). + */ +bool intel_lvds_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_lvds_encoder *lvds_encoder; + struct intel_encoder *intel_encoder; + struct intel_lvds_connector *lvds_connector; + struct intel_connector *intel_connector; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct drm_display_mode *scan; /* *modes, *bios_mode; */ + struct drm_display_mode *fixed_mode = NULL; + struct edid *edid; + struct drm_crtc *crtc; + u32 lvds; + int pipe; + u8 pin; + + if (!intel_lvds_supported(dev)) + return false; + + /* Skip init on machines we know falsely report LVDS */ + if (dmi_check_system(intel_no_lvds)) + return false; + + pin = GMBUS_PORT_PANEL; + if (!lvds_is_present_in_vbt(dev, &pin)) { + DRM_DEBUG_KMS("LVDS is not present in VBT\n"); + return false; + } + + if (HAS_PCH_SPLIT(dev)) { + if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) + return false; + if (dev_priv->edp.support) { + DRM_DEBUG_KMS("disable LVDS for eDP support\n"); + return false; + } + } + + lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL); + if (!lvds_encoder) + return false; + + lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL); + if (!lvds_connector) { + kfree(lvds_encoder); + return false; + } + + lvds_encoder->attached_connector = lvds_connector; + + if (!HAS_PCH_SPLIT(dev)) { + lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL); + } + + intel_encoder = &lvds_encoder->base; + encoder = &intel_encoder->base; + intel_connector = &lvds_connector->base; + connector = &intel_connector->base; + drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, + DRM_MODE_CONNECTOR_LVDS); + + drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, + DRM_MODE_ENCODER_LVDS); + + intel_encoder->enable = intel_enable_lvds; + intel_encoder->pre_enable = intel_pre_enable_lvds; + intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds; + intel_encoder->compute_config = intel_lvds_compute_config; + intel_encoder->disable = intel_disable_lvds; + intel_encoder->get_hw_state = intel_lvds_get_hw_state; + intel_connector->get_hw_state = intel_connector_get_hw_state; + + intel_connector_attach_encoder(intel_connector, intel_encoder); + intel_encoder->type = INTEL_OUTPUT_LVDS; + + intel_encoder->cloneable = false; + if (HAS_PCH_SPLIT(dev)) + intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + else if (IS_GEN4(dev)) + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + else + intel_encoder->crtc_mask = (1 << 1); + + drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); + drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); + connector->display_info.subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + + if (HAS_PCH_SPLIT(dev)) { + lvds_encoder->reg = PCH_LVDS; + } else { + lvds_encoder->reg = LVDS; + } + + /* create the scaling mode property */ + drm_mode_create_scaling_mode_property(dev); + drm_object_attach_property(&connector->base, + dev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_ASPECT); + intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; + /* + * LVDS discovery: + * 1) check for EDID on DDC + * 2) check for VBT data + * 3) check to see if LVDS is already on + * if none of the above, no panel + * 4) make sure lid is open + * if closed, act like it's not there for now + */ + + /* + * Attempt to get the fixed panel mode from DDC. Assume that the + * preferred mode is the right one. + */ + edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin)); + if (edid) { + if (drm_add_edid_modes(connector, edid)) { + drm_mode_connector_update_edid_property(connector, + edid); + } else { + kfree(edid); + edid = ERR_PTR(-EINVAL); + } + } else { + edid = ERR_PTR(-ENOENT); + } + lvds_connector->base.edid = edid; + + if (IS_ERR_OR_NULL(edid)) { + /* Didn't get an EDID, so + * Set wide sync ranges so we get all modes + * handed to valid_mode for checking + */ + connector->display_info.min_vfreq = 0; + connector->display_info.max_vfreq = 200; + connector->display_info.min_hfreq = 0; + connector->display_info.max_hfreq = 200; + } + + list_for_each_entry(scan, &connector->probed_modes, head) { + if (scan->type & DRM_MODE_TYPE_PREFERRED) { + DRM_DEBUG_KMS("using preferred mode from EDID: "); + drm_mode_debug_printmodeline(scan); + + fixed_mode = drm_mode_duplicate(dev, scan); + if (fixed_mode) { + intel_find_lvds_downclock(dev, fixed_mode, + connector); + goto out; + } + } + } + + /* Failed to get EDID, what about VBT? */ + if (dev_priv->lfp_lvds_vbt_mode) { + DRM_DEBUG_KMS("using mode from VBT: "); + drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode); + + fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); + if (fixed_mode) { + fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + goto out; + } + } + + /* + * If we didn't get EDID, try checking if the panel is already turned + * on. If so, assume that whatever is currently programmed is the + * correct mode. + */ + + /* Ironlake: FIXME if still fail, not try pipe mode now */ + if (HAS_PCH_SPLIT(dev)) + goto failed; + + lvds = I915_READ(LVDS); + pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; + crtc = intel_get_crtc_for_pipe(dev, pipe); + + if (crtc && (lvds & LVDS_PORT_EN)) { + fixed_mode = intel_crtc_mode_get(dev, crtc); + if (fixed_mode) { + DRM_DEBUG_KMS("using current (BIOS) mode: "); + drm_mode_debug_printmodeline(fixed_mode); + fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + goto out; + } + } + + /* If we still don't have a mode after all that, give up. */ + if (!fixed_mode) + goto failed; + +out: + lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); + DRM_DEBUG_KMS("detected %s-link lvds configuration\n", + lvds_encoder->is_dual_link ? "dual" : "single"); + + /* + * Unlock registers and just + * leave them unlocked + */ + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(PCH_PP_CONTROL, + I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); + } else { + I915_WRITE(PP_CONTROL, + I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); + } + lvds_connector->lid_notifier.notifier_call = intel_lid_notify; + if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { + DRM_DEBUG_KMS("lid notifier registration failed\n"); + lvds_connector->lid_notifier.notifier_call = NULL; + } + drm_sysfs_connector_add(connector); + + intel_panel_init(&intel_connector->panel, fixed_mode); + intel_panel_setup_backlight(connector); + + return true; + +failed: + DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); + drm_connector_cleanup(connector); + drm_encoder_cleanup(encoder); + if (fixed_mode) + drm_mode_destroy(dev, fixed_mode); + kfree(lvds_encoder); + kfree(lvds_connector); + return false; +} diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c new file mode 100644 index 0000000..0e860f3 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2007 Dave Airlie + * Copyright (c) 2007, 2010 Intel Corporation + * Jesse Barnes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include "intel_drv.h" +#include "i915_drv.h" + +/** + * intel_connector_update_modes - update connector from edid + * @connector: DRM connector device to use + * @edid: previously read EDID information + */ +int intel_connector_update_modes(struct drm_connector *connector, + struct edid *edid) +{ + int ret; + + drm_mode_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + drm_edid_to_eld(connector, edid); + + return ret; +} + +/** + * intel_ddc_get_modes - get modelist from monitor + * @connector: DRM connector device to use + * @adapter: i2c adapter + * + * Fetch the EDID information from @connector using the DDC bus. + */ +int intel_ddc_get_modes(struct drm_connector *connector, + struct i2c_adapter *adapter) +{ + struct edid *edid; + int ret; + + edid = drm_get_edid(connector, adapter); + if (!edid) + return 0; + + ret = intel_connector_update_modes(connector, edid); + kfree(edid); + + return ret; +} + +static const struct drm_prop_enum_list force_audio_names[] = { + { HDMI_AUDIO_OFF_DVI, "force-dvi" }, + { HDMI_AUDIO_OFF, "off" }, + { HDMI_AUDIO_AUTO, "auto" }, + { HDMI_AUDIO_ON, "on" }, +}; + +void +intel_attach_force_audio_property(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_property *prop; + + prop = dev_priv->force_audio_property; + if (prop == NULL) { + prop = drm_property_create_enum(dev, 0, + "audio", + force_audio_names, + ARRAY_SIZE(force_audio_names)); + if (prop == NULL) + return; + + dev_priv->force_audio_property = prop; + } + drm_object_attach_property(&connector->base, prop, 0); +} + +static const struct drm_prop_enum_list broadcast_rgb_names[] = { + { INTEL_BROADCAST_RGB_AUTO, "Automatic" }, + { INTEL_BROADCAST_RGB_FULL, "Full" }, + { INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" }, +}; + +void +intel_attach_broadcast_rgb_property(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_property *prop; + + prop = dev_priv->broadcast_rgb_property; + if (prop == NULL) { + prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, + "Broadcast RGB", + broadcast_rgb_names, + ARRAY_SIZE(broadcast_rgb_names)); + if (prop == NULL) + return; + + dev_priv->broadcast_rgb_property = prop; + } + + drm_object_attach_property(&connector->base, prop, 0); +} diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c new file mode 100644 index 0000000..841d2f4 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -0,0 +1,559 @@ +/* + * Copyright 2008 Intel Corporation + * Copyright 2008 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include + +#include +#include +#include "i915_drv.h" +#include "intel_drv.h" + +#define PCI_ASLE 0xe4 +#define PCI_ASLS 0xfc + +#define OPREGION_HEADER_OFFSET 0 +#define OPREGION_ACPI_OFFSET 0x100 +#define ACPI_CLID 0x01ac /* current lid state indicator */ +#define ACPI_CDCK 0x01b0 /* current docking state indicator */ +#define OPREGION_SWSCI_OFFSET 0x200 +#define OPREGION_ASLE_OFFSET 0x300 +#define OPREGION_VBT_OFFSET 0x400 + +#define OPREGION_SIGNATURE "IntelGraphicsMem" +#define MBOX_ACPI (1<<0) +#define MBOX_SWSCI (1<<1) +#define MBOX_ASLE (1<<2) + +struct opregion_header { + u8 signature[16]; + u32 size; + u32 opregion_ver; + u8 bios_ver[32]; + u8 vbios_ver[16]; + u8 driver_ver[16]; + u32 mboxes; + u8 reserved[164]; +} __attribute__((packed)); + +/* OpRegion mailbox #1: public ACPI methods */ +struct opregion_acpi { + u32 drdy; /* driver readiness */ + u32 csts; /* notification status */ + u32 cevt; /* current event */ + u8 rsvd1[20]; + u32 didl[8]; /* supported display devices ID list */ + u32 cpdl[8]; /* currently presented display list */ + u32 cadl[8]; /* currently active display list */ + u32 nadl[8]; /* next active devices list */ + u32 aslp; /* ASL sleep time-out */ + u32 tidx; /* toggle table index */ + u32 chpd; /* current hotplug enable indicator */ + u32 clid; /* current lid state*/ + u32 cdck; /* current docking state */ + u32 sxsw; /* Sx state resume */ + u32 evts; /* ASL supported events */ + u32 cnot; /* current OS notification */ + u32 nrdy; /* driver status */ + u8 rsvd2[60]; +} __attribute__((packed)); + +/* OpRegion mailbox #2: SWSCI */ +struct opregion_swsci { + u32 scic; /* SWSCI command|status|data */ + u32 parm; /* command parameters */ + u32 dslp; /* driver sleep time-out */ + u8 rsvd[244]; +} __attribute__((packed)); + +/* OpRegion mailbox #3: ASLE */ +struct opregion_asle { + u32 ardy; /* driver readiness */ + u32 aslc; /* ASLE interrupt command */ + u32 tche; /* technology enabled indicator */ + u32 alsi; /* current ALS illuminance reading */ + u32 bclp; /* backlight brightness to set */ + u32 pfit; /* panel fitting state */ + u32 cblv; /* current brightness level */ + u16 bclm[20]; /* backlight level duty cycle mapping table */ + u32 cpfm; /* current panel fitting mode */ + u32 epfm; /* enabled panel fitting modes */ + u8 plut[74]; /* panel LUT and identifier */ + u32 pfmb; /* PWM freq and min brightness */ + u8 rsvd[102]; +} __attribute__((packed)); + +/* ASLE irq request bits */ +#define ASLE_SET_ALS_ILLUM (1 << 0) +#define ASLE_SET_BACKLIGHT (1 << 1) +#define ASLE_SET_PFIT (1 << 2) +#define ASLE_SET_PWM_FREQ (1 << 3) +#define ASLE_REQ_MSK 0xf + +/* response bits of ASLE irq request */ +#define ASLE_ALS_ILLUM_FAILED (1<<10) +#define ASLE_BACKLIGHT_FAILED (1<<12) +#define ASLE_PFIT_FAILED (1<<14) +#define ASLE_PWM_FREQ_FAILED (1<<16) + +/* ASLE backlight brightness to set */ +#define ASLE_BCLP_VALID (1<<31) +#define ASLE_BCLP_MSK (~(1<<31)) + +/* ASLE panel fitting request */ +#define ASLE_PFIT_VALID (1<<31) +#define ASLE_PFIT_CENTER (1<<0) +#define ASLE_PFIT_STRETCH_TEXT (1<<1) +#define ASLE_PFIT_STRETCH_GFX (1<<2) + +/* PWM frequency and minimum brightness */ +#define ASLE_PFMB_BRIGHTNESS_MASK (0xff) +#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8) +#define ASLE_PFMB_PWM_MASK (0x7ffffe00) +#define ASLE_PFMB_PWM_VALID (1<<31) + +#define ASLE_CBLV_VALID (1<<31) + +#define ACPI_OTHER_OUTPUT (0<<8) +#define ACPI_VGA_OUTPUT (1<<8) +#define ACPI_TV_OUTPUT (2<<8) +#define ACPI_DIGITAL_OUTPUT (3<<8) +#define ACPI_LVDS_OUTPUT (4<<8) + +#ifdef CONFIG_ACPI +static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct opregion_asle __iomem *asle = dev_priv->opregion.asle; + u32 max; + + DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); + + if (!(bclp & ASLE_BCLP_VALID)) + return ASLE_BACKLIGHT_FAILED; + + bclp &= ASLE_BCLP_MSK; + if (bclp > 255) + return ASLE_BACKLIGHT_FAILED; + + max = intel_panel_get_max_backlight(dev); + intel_panel_set_backlight(dev, bclp * max / 255); + iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv); + + return 0; +} + +static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) +{ + /* alsi is the current ALS reading in lux. 0 indicates below sensor + range, 0xffff indicates above sensor range. 1-0xfffe are valid */ + return 0; +} + +static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + if (pfmb & ASLE_PFMB_PWM_VALID) { + u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL); + u32 pwm = pfmb & ASLE_PFMB_PWM_MASK; + blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK; + pwm = pwm >> 9; + /* FIXME - what do we do with the PWM? */ + } + return 0; +} + +static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) +{ + /* Panel fitting is currently controlled by the X code, so this is a + noop until modesetting support works fully */ + if (!(pfit & ASLE_PFIT_VALID)) + return ASLE_PFIT_FAILED; + return 0; +} + +void intel_opregion_asle_intr(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct opregion_asle __iomem *asle = dev_priv->opregion.asle; + u32 asle_stat = 0; + u32 asle_req; + + if (!asle) + return; + + asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK; + + if (!asle_req) { + DRM_DEBUG_DRIVER("non asle set request??\n"); + return; + } + + if (asle_req & ASLE_SET_ALS_ILLUM) + asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi)); + + if (asle_req & ASLE_SET_BACKLIGHT) + asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp)); + + if (asle_req & ASLE_SET_PFIT) + asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit)); + + if (asle_req & ASLE_SET_PWM_FREQ) + asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb)); + + iowrite32(asle_stat, &asle->aslc); +} + +void intel_opregion_gse_intr(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct opregion_asle __iomem *asle = dev_priv->opregion.asle; + u32 asle_stat = 0; + u32 asle_req; + + if (!asle) + return; + + asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK; + + if (!asle_req) { + DRM_DEBUG_DRIVER("non asle set request??\n"); + return; + } + + if (asle_req & ASLE_SET_ALS_ILLUM) { + DRM_DEBUG_DRIVER("Illum is not supported\n"); + asle_stat |= ASLE_ALS_ILLUM_FAILED; + } + + if (asle_req & ASLE_SET_BACKLIGHT) + asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp)); + + if (asle_req & ASLE_SET_PFIT) { + DRM_DEBUG_DRIVER("Pfit is not supported\n"); + asle_stat |= ASLE_PFIT_FAILED; + } + + if (asle_req & ASLE_SET_PWM_FREQ) { + DRM_DEBUG_DRIVER("PWM freq is not supported\n"); + asle_stat |= ASLE_PWM_FREQ_FAILED; + } + + iowrite32(asle_stat, &asle->aslc); +} +#define ASLE_ALS_EN (1<<0) +#define ASLE_BLC_EN (1<<1) +#define ASLE_PFIT_EN (1<<2) +#define ASLE_PFMB_EN (1<<3) + +void intel_opregion_enable_asle(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct opregion_asle __iomem *asle = dev_priv->opregion.asle; + + if (asle) { + if (IS_MOBILE(dev)) + intel_enable_asle(dev); + + iowrite32(ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | + ASLE_PFMB_EN, + &asle->tche); + iowrite32(1, &asle->ardy); + } +} + +#define ACPI_EV_DISPLAY_SWITCH (1<<0) +#define ACPI_EV_LID (1<<1) +#define ACPI_EV_DOCK (1<<2) + +static struct intel_opregion *system_opregion; + +static int intel_opregion_video_event(struct notifier_block *nb, + unsigned long val, void *data) +{ + /* The only video events relevant to opregion are 0x80. These indicate + either a docking event, lid switch or display switch request. In + Linux, these are handled by the dock, button and video drivers. + */ + + struct opregion_acpi __iomem *acpi; + struct acpi_bus_event *event = data; + int ret = NOTIFY_OK; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)) +#define ACPI_VIDEO_CLASS "video" +#endif + if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) + return NOTIFY_DONE; + + if (!system_opregion) + return NOTIFY_DONE; + + acpi = system_opregion->acpi; + + if (event->type == 0x80 && + (ioread32(&acpi->cevt) & 1) == 0) + ret = NOTIFY_BAD; + + iowrite32(0, &acpi->csts); + + return ret; +} + +static struct notifier_block intel_opregion_notifier = { + .notifier_call = intel_opregion_video_event, +}; + +/* + * Initialise the DIDL field in opregion. This passes a list of devices to + * the firmware. Values are defined by section B.4.2 of the ACPI specification + * (version 3) + */ + +static void intel_didl_outputs(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + struct drm_connector *connector; + acpi_handle handle; + struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; + unsigned long long device_id; + acpi_status status; + u32 temp; + int i = 0; + + handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); + if (!handle || acpi_bus_get_device(handle, &acpi_dev)) + return; + + if (acpi_is_video_device(handle)) + acpi_video_bus = acpi_dev; + else { + list_for_each_entry(acpi_cdev, &acpi_dev->children, node) { + if (acpi_is_video_device(acpi_cdev->handle)) { + acpi_video_bus = acpi_cdev; + break; + } + } + } + + if (!acpi_video_bus) { + pr_warn("No ACPI video bus found\n"); + return; + } + + list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { + if (i >= 8) { + dev_printk(KERN_ERR, &dev->pdev->dev, + "More than 8 outputs detected\n"); + return; + } + status = + acpi_evaluate_integer(acpi_cdev->handle, "_ADR", + NULL, &device_id); + if (ACPI_SUCCESS(status)) { + if (!device_id) + goto blind_set; + iowrite32((u32)(device_id & 0x0f0f), + &opregion->acpi->didl[i]); + i++; + } + } + +end: + /* If fewer than 8 outputs, the list must be null terminated */ + if (i < 8) + iowrite32(0, &opregion->acpi->didl[i]); + return; + +blind_set: + i = 0; + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + int output_type = ACPI_OTHER_OUTPUT; + if (i >= 8) { + dev_printk(KERN_ERR, &dev->pdev->dev, + "More than 8 outputs detected\n"); + return; + } + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_VGA: + case DRM_MODE_CONNECTOR_DVIA: + output_type = ACPI_VGA_OUTPUT; + break; + case DRM_MODE_CONNECTOR_Composite: + case DRM_MODE_CONNECTOR_SVIDEO: + case DRM_MODE_CONNECTOR_Component: + case DRM_MODE_CONNECTOR_9PinDIN: + output_type = ACPI_TV_OUTPUT; + break; + case DRM_MODE_CONNECTOR_DVII: + case DRM_MODE_CONNECTOR_DVID: + case DRM_MODE_CONNECTOR_DisplayPort: + case DRM_MODE_CONNECTOR_HDMIA: + case DRM_MODE_CONNECTOR_HDMIB: + output_type = ACPI_DIGITAL_OUTPUT; + break; + case DRM_MODE_CONNECTOR_LVDS: + output_type = ACPI_LVDS_OUTPUT; + break; + } + temp = ioread32(&opregion->acpi->didl[i]); + iowrite32(temp | (1<<31) | output_type | i, + &opregion->acpi->didl[i]); + i++; + } + goto end; +} + +static void intel_setup_cadls(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + int i = 0; + u32 disp_id; + + /* Initialize the CADL field by duplicating the DIDL values. + * Technically, this is not always correct as display outputs may exist, + * but not active. This initialization is necessary for some Clevo + * laptops that check this field before processing the brightness and + * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if + * there are less than eight devices. */ + do { + disp_id = ioread32(&opregion->acpi->didl[i]); + iowrite32(disp_id, &opregion->acpi->cadl[i]); + } while (++i < 8 && disp_id != 0); +} + +void intel_opregion_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + + if (!opregion->header) + return; + + if (opregion->acpi) { + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + intel_didl_outputs(dev); + intel_setup_cadls(dev); + } + + /* Notify BIOS we are ready to handle ACPI video ext notifs. + * Right now, all the events are handled by the ACPI video module. + * We don't actually need to do anything with them. */ + iowrite32(0, &opregion->acpi->csts); + iowrite32(1, &opregion->acpi->drdy); + + system_opregion = opregion; + register_acpi_notifier(&intel_opregion_notifier); + } + + if (opregion->asle) + intel_opregion_enable_asle(dev); +} + +void intel_opregion_fini(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + + if (!opregion->header) + return; + + if (opregion->acpi) { + iowrite32(0, &opregion->acpi->drdy); + + system_opregion = NULL; + unregister_acpi_notifier(&intel_opregion_notifier); + } + + /* just clear all opregion memory pointers now */ + iounmap(opregion->header); + opregion->header = NULL; + opregion->acpi = NULL; + opregion->swsci = NULL; + opregion->asle = NULL; + opregion->vbt = NULL; +} +#endif + +int intel_opregion_setup(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + void __iomem *base; + u32 asls, mboxes; + char buf[sizeof(OPREGION_SIGNATURE)]; + int err = 0; + + pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); + DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); + if (asls == 0) { + DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n"); + return -ENOTSUPP; + } + + base = acpi_os_ioremap(asls, OPREGION_SIZE); + if (!base) + return -ENOMEM; + + memcpy_fromio(buf, base, sizeof(buf)); + + if (memcmp(buf, OPREGION_SIGNATURE, 16)) { + DRM_DEBUG_DRIVER("opregion signature mismatch\n"); + err = -EINVAL; + goto err_out; + } + opregion->header = base; + opregion->vbt = base + OPREGION_VBT_OFFSET; + + opregion->lid_state = base + ACPI_CLID; + + mboxes = ioread32(&opregion->header->mboxes); + if (mboxes & MBOX_ACPI) { + DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); + opregion->acpi = base + OPREGION_ACPI_OFFSET; + } + + if (mboxes & MBOX_SWSCI) { + DRM_DEBUG_DRIVER("SWSCI supported\n"); + opregion->swsci = base + OPREGION_SWSCI_OFFSET; + } + if (mboxes & MBOX_ASLE) { + DRM_DEBUG_DRIVER("ASLE supported\n"); + opregion->asle = base + OPREGION_ASLE_OFFSET; + } + + return 0; + +err_out: + iounmap(base); + return err; +} diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c new file mode 100644 index 0000000..67a2501 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -0,0 +1,1539 @@ +/* + * Copyright © 2009 + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: + * Daniel Vetter + * + * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c + */ +#include +#include +#include "i915_drv.h" +#include "i915_reg.h" +#include "intel_drv.h" + +/* Limits for overlay size. According to intel doc, the real limits are: + * Y width: 4095, UV width (planar): 2047, Y height: 2047, + * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use + * the mininum of both. */ +#define IMAGE_MAX_WIDTH 2048 +#define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */ +/* on 830 and 845 these large limits result in the card hanging */ +#define IMAGE_MAX_WIDTH_LEGACY 1024 +#define IMAGE_MAX_HEIGHT_LEGACY 1088 + +/* overlay register definitions */ +/* OCMD register */ +#define OCMD_TILED_SURFACE (0x1<<19) +#define OCMD_MIRROR_MASK (0x3<<17) +#define OCMD_MIRROR_MODE (0x3<<17) +#define OCMD_MIRROR_HORIZONTAL (0x1<<17) +#define OCMD_MIRROR_VERTICAL (0x2<<17) +#define OCMD_MIRROR_BOTH (0x3<<17) +#define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */ +#define OCMD_UV_SWAP (0x1<<14) /* YVYU */ +#define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */ +#define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */ +#define OCMD_SOURCE_FORMAT_MASK (0xf<<10) +#define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */ +#define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */ +#define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */ +#define OCMD_YUV_422_PACKED (0x8<<10) +#define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */ +#define OCMD_YUV_420_PLANAR (0xc<<10) +#define OCMD_YUV_422_PLANAR (0xd<<10) +#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */ +#define OCMD_TVSYNCFLIP_PARITY (0x1<<9) +#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7) +#define OCMD_BUF_TYPE_MASK (0x1<<5) +#define OCMD_BUF_TYPE_FRAME (0x0<<5) +#define OCMD_BUF_TYPE_FIELD (0x1<<5) +#define OCMD_TEST_MODE (0x1<<4) +#define OCMD_BUFFER_SELECT (0x3<<2) +#define OCMD_BUFFER0 (0x0<<2) +#define OCMD_BUFFER1 (0x1<<2) +#define OCMD_FIELD_SELECT (0x1<<2) +#define OCMD_FIELD0 (0x0<<1) +#define OCMD_FIELD1 (0x1<<1) +#define OCMD_ENABLE (0x1<<0) + +/* OCONFIG register */ +#define OCONF_PIPE_MASK (0x1<<18) +#define OCONF_PIPE_A (0x0<<18) +#define OCONF_PIPE_B (0x1<<18) +#define OCONF_GAMMA2_ENABLE (0x1<<16) +#define OCONF_CSC_MODE_BT601 (0x0<<5) +#define OCONF_CSC_MODE_BT709 (0x1<<5) +#define OCONF_CSC_BYPASS (0x1<<4) +#define OCONF_CC_OUT_8BIT (0x1<<3) +#define OCONF_TEST_MODE (0x1<<2) +#define OCONF_THREE_LINE_BUFFER (0x1<<0) +#define OCONF_TWO_LINE_BUFFER (0x0<<0) + +/* DCLRKM (dst-key) register */ +#define DST_KEY_ENABLE (0x1<<31) +#define CLK_RGB24_MASK 0x0 +#define CLK_RGB16_MASK 0x070307 +#define CLK_RGB15_MASK 0x070707 +#define CLK_RGB8I_MASK 0xffffff + +#define RGB16_TO_COLORKEY(c) \ + (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3)) +#define RGB15_TO_COLORKEY(c) \ + (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3)) + +/* overlay flip addr flag */ +#define OFC_UPDATE 0x1 + +/* polyphase filter coefficients */ +#define N_HORIZ_Y_TAPS 5 +#define N_VERT_Y_TAPS 3 +#define N_HORIZ_UV_TAPS 3 +#define N_VERT_UV_TAPS 3 +#define N_PHASES 17 +#define MAX_TAPS 5 + +/* memory bufferd overlay registers */ +struct overlay_registers { + u32 OBUF_0Y; + u32 OBUF_1Y; + u32 OBUF_0U; + u32 OBUF_0V; + u32 OBUF_1U; + u32 OBUF_1V; + u32 OSTRIDE; + u32 YRGB_VPH; + u32 UV_VPH; + u32 HORZ_PH; + u32 INIT_PHS; + u32 DWINPOS; + u32 DWINSZ; + u32 SWIDTH; + u32 SWIDTHSW; + u32 SHEIGHT; + u32 YRGBSCALE; + u32 UVSCALE; + u32 OCLRC0; + u32 OCLRC1; + u32 DCLRKV; + u32 DCLRKM; + u32 SCLRKVH; + u32 SCLRKVL; + u32 SCLRKEN; + u32 OCONFIG; + u32 OCMD; + u32 RESERVED1; /* 0x6C */ + u32 OSTART_0Y; + u32 OSTART_1Y; + u32 OSTART_0U; + u32 OSTART_0V; + u32 OSTART_1U; + u32 OSTART_1V; + u32 OTILEOFF_0Y; + u32 OTILEOFF_1Y; + u32 OTILEOFF_0U; + u32 OTILEOFF_0V; + u32 OTILEOFF_1U; + u32 OTILEOFF_1V; + u32 FASTHSCALE; /* 0xA0 */ + u32 UVSCALEV; /* 0xA4 */ + u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */ + u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */ + u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES]; + u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */ + u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES]; + u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */ + u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES]; + u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */ + u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES]; +}; + +struct intel_overlay { + struct drm_device *dev; + struct intel_crtc *crtc; + struct drm_i915_gem_object *vid_bo; + struct drm_i915_gem_object *old_vid_bo; + int active; + int pfit_active; + u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */ + u32 color_key; + u32 brightness, contrast, saturation; + u32 old_xscale, old_yscale; + /* register access */ + u32 flip_addr; + struct drm_i915_gem_object *reg_bo; + /* flip handling */ + uint32_t last_flip_req; + void (*flip_tail)(struct intel_overlay *); +}; + +static struct overlay_registers __iomem * +intel_overlay_map_regs(struct intel_overlay *overlay) +{ + drm_i915_private_t *dev_priv = overlay->dev->dev_private; + struct overlay_registers __iomem *regs; + + if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; + else + regs = io_mapping_map_wc(dev_priv->gtt.mappable, + overlay->reg_bo->gtt_offset); + + return regs; +} + +static void intel_overlay_unmap_regs(struct intel_overlay *overlay, + struct overlay_registers __iomem *regs) +{ + if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + io_mapping_unmap(regs); +} + +static int intel_overlay_do_wait_request(struct intel_overlay *overlay, + void (*tail)(struct intel_overlay *)) +{ + struct drm_device *dev = overlay->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + int ret; + + BUG_ON(overlay->last_flip_req); + ret = i915_add_request(ring, NULL, &overlay->last_flip_req); + if (ret) + return ret; + + overlay->flip_tail = tail; + ret = i915_wait_seqno(ring, overlay->last_flip_req); + if (ret) + return ret; + i915_gem_retire_requests(dev); + + overlay->last_flip_req = 0; + return 0; +} + +/* overlay needs to be disable in OCMD reg */ +static int intel_overlay_on(struct intel_overlay *overlay) +{ + struct drm_device *dev = overlay->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + int ret; + + BUG_ON(overlay->active); + overlay->active = 1; + + WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); + intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE); + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + + return intel_overlay_do_wait_request(overlay, NULL); +} + +/* overlay needs to be enabled in OCMD reg */ +static int intel_overlay_continue(struct intel_overlay *overlay, + bool load_polyphase_filter) +{ + struct drm_device *dev = overlay->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + u32 flip_addr = overlay->flip_addr; + u32 tmp; + int ret; + + BUG_ON(!overlay->active); + + if (load_polyphase_filter) + flip_addr |= OFC_UPDATE; + + /* check for underruns */ + tmp = I915_READ(DOVSTA); + if (tmp & (1 << 17)) + DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); + + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); + intel_ring_emit(ring, flip_addr); + intel_ring_advance(ring); + + return i915_add_request(ring, NULL, &overlay->last_flip_req); +} + +static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) +{ + struct drm_i915_gem_object *obj = overlay->old_vid_bo; + + i915_gem_object_unpin(obj); + drm_gem_object_unreference(&obj->base); + + overlay->old_vid_bo = NULL; +} + +static void intel_overlay_off_tail(struct intel_overlay *overlay) +{ + struct drm_i915_gem_object *obj = overlay->vid_bo; + + /* never have the overlay hw on without showing a frame */ + BUG_ON(!overlay->vid_bo); + + i915_gem_object_unpin(obj); + drm_gem_object_unreference(&obj->base); + overlay->vid_bo = NULL; + + overlay->crtc->overlay = NULL; + overlay->crtc = NULL; + overlay->active = 0; +} + +/* overlay needs to be disabled in OCMD reg */ +static int intel_overlay_off(struct intel_overlay *overlay) +{ + struct drm_device *dev = overlay->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + u32 flip_addr = overlay->flip_addr; + int ret; + + BUG_ON(!overlay->active); + + /* According to intel docs the overlay hw may hang (when switching + * off) without loading the filter coeffs. It is however unclear whether + * this applies to the disabling of the overlay or to the switching off + * of the hw. Do it in both cases */ + flip_addr |= OFC_UPDATE; + + ret = intel_ring_begin(ring, 6); + if (ret) + return ret; + + /* wait for overlay to go idle */ + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); + intel_ring_emit(ring, flip_addr); + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + /* turn overlay off */ + if (IS_I830(dev)) { + /* Workaround: Don't disable the overlay fully, since otherwise + * it dies on the next OVERLAY_ON cmd. */ + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); + } else { + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF); + intel_ring_emit(ring, flip_addr); + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + } + intel_ring_advance(ring); + + return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail); +} + +/* recover from an interruption due to a signal + * We have to be careful not to repeat work forever an make forward progess. */ +static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) +{ + struct drm_device *dev = overlay->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + int ret; + + if (overlay->last_flip_req == 0) + return 0; + + ret = i915_wait_seqno(ring, overlay->last_flip_req); + if (ret) + return ret; + i915_gem_retire_requests(dev); + + if (overlay->flip_tail) + overlay->flip_tail(overlay); + + overlay->last_flip_req = 0; + return 0; +} + +/* Wait for pending overlay flip and release old frame. + * Needs to be called before the overlay register are changed + * via intel_overlay_(un)map_regs + */ +static int intel_overlay_release_old_vid(struct intel_overlay *overlay) +{ + struct drm_device *dev = overlay->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + int ret; + + /* Only wait if there is actually an old frame to release to + * guarantee forward progress. + */ + if (!overlay->old_vid_bo) + return 0; + + if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { + /* synchronous slowpath */ + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + + ret = intel_overlay_do_wait_request(overlay, + intel_overlay_release_old_vid_tail); + if (ret) + return ret; + } + + intel_overlay_release_old_vid_tail(overlay); + return 0; +} + +struct put_image_params { + int format; + short dst_x; + short dst_y; + short dst_w; + short dst_h; + short src_w; + short src_scan_h; + short src_scan_w; + short src_h; + short stride_Y; + short stride_UV; + int offset_Y; + int offset_U; + int offset_V; +}; + +static int packed_depth_bytes(u32 format) +{ + switch (format & I915_OVERLAY_DEPTH_MASK) { + case I915_OVERLAY_YUV422: + return 4; + case I915_OVERLAY_YUV411: + /* return 6; not implemented */ + default: + return -EINVAL; + } +} + +static int packed_width_bytes(u32 format, short width) +{ + switch (format & I915_OVERLAY_DEPTH_MASK) { + case I915_OVERLAY_YUV422: + return width << 1; + default: + return -EINVAL; + } +} + +static int uv_hsubsampling(u32 format) +{ + switch (format & I915_OVERLAY_DEPTH_MASK) { + case I915_OVERLAY_YUV422: + case I915_OVERLAY_YUV420: + return 2; + case I915_OVERLAY_YUV411: + case I915_OVERLAY_YUV410: + return 4; + default: + return -EINVAL; + } +} + +static int uv_vsubsampling(u32 format) +{ + switch (format & I915_OVERLAY_DEPTH_MASK) { + case I915_OVERLAY_YUV420: + case I915_OVERLAY_YUV410: + return 2; + case I915_OVERLAY_YUV422: + case I915_OVERLAY_YUV411: + return 1; + default: + return -EINVAL; + } +} + +static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) +{ + u32 mask, shift, ret; + if (IS_GEN2(dev)) { + mask = 0x1f; + shift = 5; + } else { + mask = 0x3f; + shift = 6; + } + ret = ((offset + width + mask) >> shift) - (offset >> shift); + if (!IS_GEN2(dev)) + ret <<= 1; + ret -= 1; + return ret << 2; +} + +static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = { + 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0, + 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440, + 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0, + 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380, + 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320, + 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0, + 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260, + 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200, + 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0, + 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160, + 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120, + 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0, + 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0, + 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, + 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, + 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, + 0xb000, 0x3000, 0x0800, 0x3000, 0xb000 +}; + +static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = { + 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60, + 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40, + 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880, + 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00, + 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0, + 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0, + 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240, + 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0, + 0x3000, 0x0800, 0x3000 +}; + +static void update_polyphase_filter(struct overlay_registers __iomem *regs) +{ + memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs)); + memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs, + sizeof(uv_static_hcoeffs)); +} + +static bool update_scaling_factors(struct intel_overlay *overlay, + struct overlay_registers __iomem *regs, + struct put_image_params *params) +{ + /* fixed point with a 12 bit shift */ + u32 xscale, yscale, xscale_UV, yscale_UV; +#define FP_SHIFT 12 +#define FRACT_MASK 0xfff + bool scale_changed = false; + int uv_hscale = uv_hsubsampling(params->format); + int uv_vscale = uv_vsubsampling(params->format); + + if (params->dst_w > 1) + xscale = ((params->src_scan_w - 1) << FP_SHIFT) + /(params->dst_w); + else + xscale = 1 << FP_SHIFT; + + if (params->dst_h > 1) + yscale = ((params->src_scan_h - 1) << FP_SHIFT) + /(params->dst_h); + else + yscale = 1 << FP_SHIFT; + + /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/ + xscale_UV = xscale/uv_hscale; + yscale_UV = yscale/uv_vscale; + /* make the Y scale to UV scale ratio an exact multiply */ + xscale = xscale_UV * uv_hscale; + yscale = yscale_UV * uv_vscale; + /*} else { + xscale_UV = 0; + yscale_UV = 0; + }*/ + + if (xscale != overlay->old_xscale || yscale != overlay->old_yscale) + scale_changed = true; + overlay->old_xscale = xscale; + overlay->old_yscale = yscale; + + iowrite32(((yscale & FRACT_MASK) << 20) | + ((xscale >> FP_SHIFT) << 16) | + ((xscale & FRACT_MASK) << 3), + ®s->YRGBSCALE); + + iowrite32(((yscale_UV & FRACT_MASK) << 20) | + ((xscale_UV >> FP_SHIFT) << 16) | + ((xscale_UV & FRACT_MASK) << 3), + ®s->UVSCALE); + + iowrite32((((yscale >> FP_SHIFT) << 16) | + ((yscale_UV >> FP_SHIFT) << 0)), + ®s->UVSCALEV); + + if (scale_changed) + update_polyphase_filter(regs); + + return scale_changed; +} + +static void update_colorkey(struct intel_overlay *overlay, + struct overlay_registers __iomem *regs) +{ + u32 key = overlay->color_key; + + switch (overlay->crtc->base.fb->bits_per_pixel) { + case 8: + iowrite32(0, ®s->DCLRKV); + iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, ®s->DCLRKM); + break; + + case 16: + if (overlay->crtc->base.fb->depth == 15) { + iowrite32(RGB15_TO_COLORKEY(key), ®s->DCLRKV); + iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE, + ®s->DCLRKM); + } else { + iowrite32(RGB16_TO_COLORKEY(key), ®s->DCLRKV); + iowrite32(CLK_RGB16_MASK | DST_KEY_ENABLE, + ®s->DCLRKM); + } + break; + + case 24: + case 32: + iowrite32(key, ®s->DCLRKV); + iowrite32(CLK_RGB24_MASK | DST_KEY_ENABLE, ®s->DCLRKM); + break; + } +} + +static u32 overlay_cmd_reg(struct put_image_params *params) +{ + u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0; + + if (params->format & I915_OVERLAY_YUV_PLANAR) { + switch (params->format & I915_OVERLAY_DEPTH_MASK) { + case I915_OVERLAY_YUV422: + cmd |= OCMD_YUV_422_PLANAR; + break; + case I915_OVERLAY_YUV420: + cmd |= OCMD_YUV_420_PLANAR; + break; + case I915_OVERLAY_YUV411: + case I915_OVERLAY_YUV410: + cmd |= OCMD_YUV_410_PLANAR; + break; + } + } else { /* YUV packed */ + switch (params->format & I915_OVERLAY_DEPTH_MASK) { + case I915_OVERLAY_YUV422: + cmd |= OCMD_YUV_422_PACKED; + break; + case I915_OVERLAY_YUV411: + cmd |= OCMD_YUV_411_PACKED; + break; + } + + switch (params->format & I915_OVERLAY_SWAP_MASK) { + case I915_OVERLAY_NO_SWAP: + break; + case I915_OVERLAY_UV_SWAP: + cmd |= OCMD_UV_SWAP; + break; + case I915_OVERLAY_Y_SWAP: + cmd |= OCMD_Y_SWAP; + break; + case I915_OVERLAY_Y_AND_UV_SWAP: + cmd |= OCMD_Y_AND_UV_SWAP; + break; + } + } + + return cmd; +} + +static int intel_overlay_do_put_image(struct intel_overlay *overlay, + struct drm_i915_gem_object *new_bo, + struct put_image_params *params) +{ + int ret, tmp_width; + struct overlay_registers __iomem *regs; + bool scale_changed = false; + struct drm_device *dev = overlay->dev; + u32 swidth, swidthsw, sheight, ostride; + + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); + BUG_ON(!overlay); + + ret = intel_overlay_release_old_vid(overlay); + if (ret != 0) + return ret; + + ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL); + if (ret != 0) + return ret; + + ret = i915_gem_object_put_fence(new_bo); + if (ret) + goto out_unpin; + + if (!overlay->active) { + u32 oconfig; + regs = intel_overlay_map_regs(overlay); + if (!regs) { + ret = -ENOMEM; + goto out_unpin; + } + oconfig = OCONF_CC_OUT_8BIT; + if (IS_GEN4(overlay->dev)) + oconfig |= OCONF_CSC_MODE_BT709; + oconfig |= overlay->crtc->pipe == 0 ? + OCONF_PIPE_A : OCONF_PIPE_B; + iowrite32(oconfig, ®s->OCONFIG); + intel_overlay_unmap_regs(overlay, regs); + + ret = intel_overlay_on(overlay); + if (ret != 0) + goto out_unpin; + } + + regs = intel_overlay_map_regs(overlay); + if (!regs) { + ret = -ENOMEM; + goto out_unpin; + } + + iowrite32((params->dst_y << 16) | params->dst_x, ®s->DWINPOS); + iowrite32((params->dst_h << 16) | params->dst_w, ®s->DWINSZ); + + if (params->format & I915_OVERLAY_YUV_PACKED) + tmp_width = packed_width_bytes(params->format, params->src_w); + else + tmp_width = params->src_w; + + swidth = params->src_w; + swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); + sheight = params->src_h; + iowrite32(new_bo->gtt_offset + params->offset_Y, ®s->OBUF_0Y); + ostride = params->stride_Y; + + if (params->format & I915_OVERLAY_YUV_PLANAR) { + int uv_hscale = uv_hsubsampling(params->format); + int uv_vscale = uv_vsubsampling(params->format); + u32 tmp_U, tmp_V; + swidth |= (params->src_w/uv_hscale) << 16; + tmp_U = calc_swidthsw(overlay->dev, params->offset_U, + params->src_w/uv_hscale); + tmp_V = calc_swidthsw(overlay->dev, params->offset_V, + params->src_w/uv_hscale); + swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; + sheight |= (params->src_h/uv_vscale) << 16; + iowrite32(new_bo->gtt_offset + params->offset_U, ®s->OBUF_0U); + iowrite32(new_bo->gtt_offset + params->offset_V, ®s->OBUF_0V); + ostride |= params->stride_UV << 16; + } + + iowrite32(swidth, ®s->SWIDTH); + iowrite32(swidthsw, ®s->SWIDTHSW); + iowrite32(sheight, ®s->SHEIGHT); + iowrite32(ostride, ®s->OSTRIDE); + + scale_changed = update_scaling_factors(overlay, regs, params); + + update_colorkey(overlay, regs); + + iowrite32(overlay_cmd_reg(params), ®s->OCMD); + + intel_overlay_unmap_regs(overlay, regs); + + ret = intel_overlay_continue(overlay, scale_changed); + if (ret) + goto out_unpin; + + overlay->old_vid_bo = overlay->vid_bo; + overlay->vid_bo = new_bo; + + return 0; + +out_unpin: + i915_gem_object_unpin(new_bo); + return ret; +} + +int intel_overlay_switch_off(struct intel_overlay *overlay) +{ + struct overlay_registers __iomem *regs; + struct drm_device *dev = overlay->dev; + int ret; + + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); + + ret = intel_overlay_recover_from_interrupt(overlay); + if (ret != 0) + return ret; + + if (!overlay->active) + return 0; + + ret = intel_overlay_release_old_vid(overlay); + if (ret != 0) + return ret; + + regs = intel_overlay_map_regs(overlay); + iowrite32(0, ®s->OCMD); + intel_overlay_unmap_regs(overlay, regs); + + ret = intel_overlay_off(overlay); + if (ret != 0) + return ret; + + intel_overlay_off_tail(overlay); + return 0; +} + +static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, + struct intel_crtc *crtc) +{ + drm_i915_private_t *dev_priv = overlay->dev->dev_private; + + if (!crtc->active) + return -EINVAL; + + /* can't use the overlay with double wide pipe */ + if (INTEL_INFO(overlay->dev)->gen < 4 && + (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE) + return -EINVAL; + + return 0; +} + +static void update_pfit_vscale_ratio(struct intel_overlay *overlay) +{ + struct drm_device *dev = overlay->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + u32 pfit_control = I915_READ(PFIT_CONTROL); + u32 ratio; + + /* XXX: This is not the same logic as in the xorg driver, but more in + * line with the intel documentation for the i965 + */ + if (INTEL_INFO(dev)->gen >= 4) { + /* on i965 use the PGM reg to read out the autoscaler values */ + ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; + } else { + if (pfit_control & VERT_AUTO_SCALE) + ratio = I915_READ(PFIT_AUTO_RATIOS); + else + ratio = I915_READ(PFIT_PGM_RATIOS); + ratio >>= PFIT_VERT_SCALE_SHIFT; + } + + overlay->pfit_vscale_ratio = ratio; +} + +static int check_overlay_dst(struct intel_overlay *overlay, + struct drm_intel_overlay_put_image *rec) +{ + struct drm_display_mode *mode = &overlay->crtc->base.mode; + + if (rec->dst_x < mode->hdisplay && + rec->dst_x + rec->dst_width <= mode->hdisplay && + rec->dst_y < mode->vdisplay && + rec->dst_y + rec->dst_height <= mode->vdisplay) + return 0; + else + return -EINVAL; +} + +static int check_overlay_scaling(struct put_image_params *rec) +{ + u32 tmp; + + /* downscaling limit is 8.0 */ + tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16; + if (tmp > 7) + return -EINVAL; + tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16; + if (tmp > 7) + return -EINVAL; + + return 0; +} + +static int check_overlay_src(struct drm_device *dev, + struct drm_intel_overlay_put_image *rec, + struct drm_i915_gem_object *new_bo) +{ + int uv_hscale = uv_hsubsampling(rec->flags); + int uv_vscale = uv_vsubsampling(rec->flags); + u32 stride_mask; + int depth; + u32 tmp; + + /* check src dimensions */ + if (IS_845G(dev) || IS_I830(dev)) { + if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || + rec->src_width > IMAGE_MAX_WIDTH_LEGACY) + return -EINVAL; + } else { + if (rec->src_height > IMAGE_MAX_HEIGHT || + rec->src_width > IMAGE_MAX_WIDTH) + return -EINVAL; + } + + /* better safe than sorry, use 4 as the maximal subsampling ratio */ + if (rec->src_height < N_VERT_Y_TAPS*4 || + rec->src_width < N_HORIZ_Y_TAPS*4) + return -EINVAL; + + /* check alignment constraints */ + switch (rec->flags & I915_OVERLAY_TYPE_MASK) { + case I915_OVERLAY_RGB: + /* not implemented */ + return -EINVAL; + + case I915_OVERLAY_YUV_PACKED: + if (uv_vscale != 1) + return -EINVAL; + + depth = packed_depth_bytes(rec->flags); + if (depth < 0) + return depth; + + /* ignore UV planes */ + rec->stride_UV = 0; + rec->offset_U = 0; + rec->offset_V = 0; + /* check pixel alignment */ + if (rec->offset_Y % depth) + return -EINVAL; + break; + + case I915_OVERLAY_YUV_PLANAR: + if (uv_vscale < 0 || uv_hscale < 0) + return -EINVAL; + /* no offset restrictions for planar formats */ + break; + + default: + return -EINVAL; + } + + if (rec->src_width % uv_hscale) + return -EINVAL; + + /* stride checking */ + if (IS_I830(dev) || IS_845G(dev)) + stride_mask = 255; + else + stride_mask = 63; + + if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) + return -EINVAL; + if (IS_GEN4(dev) && rec->stride_Y < 512) + return -EINVAL; + + tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? + 4096 : 8192; + if (rec->stride_Y > tmp || rec->stride_UV > 2*1024) + return -EINVAL; + + /* check buffer dimensions */ + switch (rec->flags & I915_OVERLAY_TYPE_MASK) { + case I915_OVERLAY_RGB: + case I915_OVERLAY_YUV_PACKED: + /* always 4 Y values per depth pixels */ + if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y) + return -EINVAL; + + tmp = rec->stride_Y*rec->src_height; + if (rec->offset_Y + tmp > new_bo->base.size) + return -EINVAL; + break; + + case I915_OVERLAY_YUV_PLANAR: + if (rec->src_width > rec->stride_Y) + return -EINVAL; + if (rec->src_width/uv_hscale > rec->stride_UV) + return -EINVAL; + + tmp = rec->stride_Y * rec->src_height; + if (rec->offset_Y + tmp > new_bo->base.size) + return -EINVAL; + + tmp = rec->stride_UV * (rec->src_height / uv_vscale); + if (rec->offset_U + tmp > new_bo->base.size || + rec->offset_V + tmp > new_bo->base.size) + return -EINVAL; + break; + } + + return 0; +} + +/** + * Return the pipe currently connected to the panel fitter, + * or -1 if the panel fitter is not present or not in use + */ +static int intel_panel_fitter_pipe(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pfit_control; + + /* i830 doesn't have a panel fitter */ + if (IS_I830(dev)) + return -1; + + pfit_control = I915_READ(PFIT_CONTROL); + + /* See if the panel fitter is in use */ + if ((pfit_control & PFIT_ENABLE) == 0) + return -1; + + /* 965 can place panel fitter on either pipe */ + if (IS_GEN4(dev)) + return (pfit_control >> 29) & 0x3; + + /* older chips can only use pipe 1 */ + return 1; +} + +int intel_overlay_put_image(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_intel_overlay_put_image *put_image_rec = data; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_overlay *overlay; + struct drm_mode_object *drmmode_obj; + struct intel_crtc *crtc; + struct drm_i915_gem_object *new_bo; + struct put_image_params *params; + int ret; + + /* No need to check for DRIVER_MODESET - we don't set it up then. */ + overlay = dev_priv->overlay; + if (!overlay) { + DRM_DEBUG("userspace bug: no overlay\n"); + return -ENODEV; + } + + if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) { + drm_modeset_lock_all(dev); + mutex_lock(&dev->struct_mutex); + + ret = intel_overlay_switch_off(overlay); + + mutex_unlock(&dev->struct_mutex); + drm_modeset_unlock_all(dev); + + return ret; + } + + params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL); + if (!params) + return -ENOMEM; + + drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id, + DRM_MODE_OBJECT_CRTC); + if (!drmmode_obj) { + ret = -ENOENT; + goto out_free; + } + crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); + + new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv, + put_image_rec->bo_handle)); + if (&new_bo->base == NULL) { + ret = -ENOENT; + goto out_free; + } + + drm_modeset_lock_all(dev); + mutex_lock(&dev->struct_mutex); + + if (new_bo->tiling_mode) { + DRM_ERROR("buffer used for overlay image can not be tiled\n"); + ret = -EINVAL; + goto out_unlock; + } + + ret = intel_overlay_recover_from_interrupt(overlay); + if (ret != 0) + goto out_unlock; + + if (overlay->crtc != crtc) { + struct drm_display_mode *mode = &crtc->base.mode; + ret = intel_overlay_switch_off(overlay); + if (ret != 0) + goto out_unlock; + + ret = check_overlay_possible_on_crtc(overlay, crtc); + if (ret != 0) + goto out_unlock; + + overlay->crtc = crtc; + crtc->overlay = overlay; + + /* line too wide, i.e. one-line-mode */ + if (mode->hdisplay > 1024 && + intel_panel_fitter_pipe(dev) == crtc->pipe) { + overlay->pfit_active = 1; + update_pfit_vscale_ratio(overlay); + } else + overlay->pfit_active = 0; + } + + ret = check_overlay_dst(overlay, put_image_rec); + if (ret != 0) + goto out_unlock; + + if (overlay->pfit_active) { + params->dst_y = ((((u32)put_image_rec->dst_y) << 12) / + overlay->pfit_vscale_ratio); + /* shifting right rounds downwards, so add 1 */ + params->dst_h = ((((u32)put_image_rec->dst_height) << 12) / + overlay->pfit_vscale_ratio) + 1; + } else { + params->dst_y = put_image_rec->dst_y; + params->dst_h = put_image_rec->dst_height; + } + params->dst_x = put_image_rec->dst_x; + params->dst_w = put_image_rec->dst_width; + + params->src_w = put_image_rec->src_width; + params->src_h = put_image_rec->src_height; + params->src_scan_w = put_image_rec->src_scan_width; + params->src_scan_h = put_image_rec->src_scan_height; + if (params->src_scan_h > params->src_h || + params->src_scan_w > params->src_w) { + ret = -EINVAL; + goto out_unlock; + } + + ret = check_overlay_src(dev, put_image_rec, new_bo); + if (ret != 0) + goto out_unlock; + params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK; + params->stride_Y = put_image_rec->stride_Y; + params->stride_UV = put_image_rec->stride_UV; + params->offset_Y = put_image_rec->offset_Y; + params->offset_U = put_image_rec->offset_U; + params->offset_V = put_image_rec->offset_V; + + /* Check scaling after src size to prevent a divide-by-zero. */ + ret = check_overlay_scaling(params); + if (ret != 0) + goto out_unlock; + + ret = intel_overlay_do_put_image(overlay, new_bo, params); + if (ret != 0) + goto out_unlock; + + mutex_unlock(&dev->struct_mutex); + drm_modeset_unlock_all(dev); + + kfree(params); + + return 0; + +out_unlock: + mutex_unlock(&dev->struct_mutex); + drm_modeset_unlock_all(dev); + drm_gem_object_unreference_unlocked(&new_bo->base); +out_free: + kfree(params); + + return ret; +} + +static void update_reg_attrs(struct intel_overlay *overlay, + struct overlay_registers __iomem *regs) +{ + iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff), + ®s->OCLRC0); + iowrite32(overlay->saturation, ®s->OCLRC1); +} + +static bool check_gamma_bounds(u32 gamma1, u32 gamma2) +{ + int i; + + if (gamma1 & 0xff000000 || gamma2 & 0xff000000) + return false; + + for (i = 0; i < 3; i++) { + if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff)) + return false; + } + + return true; +} + +static bool check_gamma5_errata(u32 gamma5) +{ + int i; + + for (i = 0; i < 3; i++) { + if (((gamma5 >> i*8) & 0xff) == 0x80) + return false; + } + + return true; +} + +static int check_gamma(struct drm_intel_overlay_attrs *attrs) +{ + if (!check_gamma_bounds(0, attrs->gamma0) || + !check_gamma_bounds(attrs->gamma0, attrs->gamma1) || + !check_gamma_bounds(attrs->gamma1, attrs->gamma2) || + !check_gamma_bounds(attrs->gamma2, attrs->gamma3) || + !check_gamma_bounds(attrs->gamma3, attrs->gamma4) || + !check_gamma_bounds(attrs->gamma4, attrs->gamma5) || + !check_gamma_bounds(attrs->gamma5, 0x00ffffff)) + return -EINVAL; + + if (!check_gamma5_errata(attrs->gamma5)) + return -EINVAL; + + return 0; +} + +int intel_overlay_attrs(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_intel_overlay_attrs *attrs = data; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_overlay *overlay; + struct overlay_registers __iomem *regs; + int ret; + + /* No need to check for DRIVER_MODESET - we don't set it up then. */ + overlay = dev_priv->overlay; + if (!overlay) { + DRM_DEBUG("userspace bug: no overlay\n"); + return -ENODEV; + } + + drm_modeset_lock_all(dev); + mutex_lock(&dev->struct_mutex); + + ret = -EINVAL; + if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) { + attrs->color_key = overlay->color_key; + attrs->brightness = overlay->brightness; + attrs->contrast = overlay->contrast; + attrs->saturation = overlay->saturation; + + if (!IS_GEN2(dev)) { + attrs->gamma0 = I915_READ(OGAMC0); + attrs->gamma1 = I915_READ(OGAMC1); + attrs->gamma2 = I915_READ(OGAMC2); + attrs->gamma3 = I915_READ(OGAMC3); + attrs->gamma4 = I915_READ(OGAMC4); + attrs->gamma5 = I915_READ(OGAMC5); + } + } else { + if (attrs->brightness < -128 || attrs->brightness > 127) + goto out_unlock; + if (attrs->contrast > 255) + goto out_unlock; + if (attrs->saturation > 1023) + goto out_unlock; + + overlay->color_key = attrs->color_key; + overlay->brightness = attrs->brightness; + overlay->contrast = attrs->contrast; + overlay->saturation = attrs->saturation; + + regs = intel_overlay_map_regs(overlay); + if (!regs) { + ret = -ENOMEM; + goto out_unlock; + } + + update_reg_attrs(overlay, regs); + + intel_overlay_unmap_regs(overlay, regs); + + if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { + if (IS_GEN2(dev)) + goto out_unlock; + + if (overlay->active) { + ret = -EBUSY; + goto out_unlock; + } + + ret = check_gamma(attrs); + if (ret) + goto out_unlock; + + I915_WRITE(OGAMC0, attrs->gamma0); + I915_WRITE(OGAMC1, attrs->gamma1); + I915_WRITE(OGAMC2, attrs->gamma2); + I915_WRITE(OGAMC3, attrs->gamma3); + I915_WRITE(OGAMC4, attrs->gamma4); + I915_WRITE(OGAMC5, attrs->gamma5); + } + } + + ret = 0; +out_unlock: + mutex_unlock(&dev->struct_mutex); + drm_modeset_unlock_all(dev); + + return ret; +} + +void intel_setup_overlay(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_overlay *overlay; + struct drm_i915_gem_object *reg_bo; + struct overlay_registers __iomem *regs; + int ret; + + if (!HAS_OVERLAY(dev)) + return; + + overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL); + if (!overlay) + return; + + mutex_lock(&dev->struct_mutex); + if (WARN_ON(dev_priv->overlay)) + goto out_free; + + overlay->dev = dev; + + reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); + if (reg_bo == NULL) + reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); + if (reg_bo == NULL) + goto out_free; + overlay->reg_bo = reg_bo; + + if (OVERLAY_NEEDS_PHYSICAL(dev)) { + ret = i915_gem_attach_phys_object(dev, reg_bo, + I915_GEM_PHYS_OVERLAY_REGS, + PAGE_SIZE); + if (ret) { + DRM_ERROR("failed to attach phys overlay regs\n"); + goto out_free_bo; + } + overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; + } else { + ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false); + if (ret) { + DRM_ERROR("failed to pin overlay register bo\n"); + goto out_free_bo; + } + overlay->flip_addr = reg_bo->gtt_offset; + + ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); + if (ret) { + DRM_ERROR("failed to move overlay register bo into the GTT\n"); + goto out_unpin_bo; + } + } + + /* init all values */ + overlay->color_key = 0x0101fe; + overlay->brightness = -19; + overlay->contrast = 75; + overlay->saturation = 146; + + regs = intel_overlay_map_regs(overlay); + if (!regs) + goto out_unpin_bo; + + memset_io(regs, 0, sizeof(struct overlay_registers)); + update_polyphase_filter(regs); + update_reg_attrs(overlay, regs); + + intel_overlay_unmap_regs(overlay, regs); + + dev_priv->overlay = overlay; + mutex_unlock(&dev->struct_mutex); + DRM_INFO("initialized overlay support\n"); + return; + +out_unpin_bo: + if (!OVERLAY_NEEDS_PHYSICAL(dev)) + i915_gem_object_unpin(reg_bo); +out_free_bo: + drm_gem_object_unreference(®_bo->base); +out_free: + mutex_unlock(&dev->struct_mutex); + kfree(overlay); + return; +} + +void intel_cleanup_overlay(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + if (!dev_priv->overlay) + return; + + /* The bo's should be free'd by the generic code already. + * Furthermore modesetting teardown happens beforehand so the + * hardware should be off already */ + BUG_ON(dev_priv->overlay->active); + + drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base); + kfree(dev_priv->overlay); +} + +#ifdef CONFIG_DEBUG_FS +#include + +struct intel_overlay_error_state { + struct overlay_registers regs; + unsigned long base; + u32 dovsta; + u32 isr; +}; + +static struct overlay_registers __iomem * +intel_overlay_map_regs_atomic(struct intel_overlay *overlay) +{ + drm_i915_private_t *dev_priv = overlay->dev->dev_private; + struct overlay_registers __iomem *regs; + + if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + /* Cast to make sparse happy, but it's wc memory anyway, so + * equivalent to the wc io mapping on X86. */ + regs = (struct overlay_registers __iomem *) + overlay->reg_bo->phys_obj->handle->vaddr; + else + regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, + overlay->reg_bo->gtt_offset); + + return regs; +} + +static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, + struct overlay_registers __iomem *regs) +{ + if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + io_mapping_unmap_atomic(regs); +} + + +struct intel_overlay_error_state * +intel_overlay_capture_error_state(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_overlay *overlay = dev_priv->overlay; + struct intel_overlay_error_state *error; + struct overlay_registers __iomem *regs; + + if (!overlay || !overlay->active) + return NULL; + + error = kmalloc(sizeof(*error), GFP_ATOMIC); + if (error == NULL) + return NULL; + + error->dovsta = I915_READ(DOVSTA); + error->isr = I915_READ(ISR); + if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; + else + error->base = overlay->reg_bo->gtt_offset; + + regs = intel_overlay_map_regs_atomic(overlay); + if (!regs) + goto err; + + memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers)); + intel_overlay_unmap_regs_atomic(overlay, regs); + + return error; + +err: + kfree(error); + return NULL; +} + +void +intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error) +{ + seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n", + error->dovsta, error->isr); + seq_printf(m, " Register file at 0x%08lx:\n", + error->base); + +#define P(x) seq_printf(m, " " #x ": 0x%08x\n", error->regs.x) + P(OBUF_0Y); + P(OBUF_1Y); + P(OBUF_0U); + P(OBUF_0V); + P(OBUF_1U); + P(OBUF_1V); + P(OSTRIDE); + P(YRGB_VPH); + P(UV_VPH); + P(HORZ_PH); + P(INIT_PHS); + P(DWINPOS); + P(DWINSZ); + P(SWIDTH); + P(SWIDTHSW); + P(SHEIGHT); + P(YRGBSCALE); + P(UVSCALE); + P(OCLRC0); + P(OCLRC1); + P(DCLRKV); + P(DCLRKM); + P(SCLRKVH); + P(SCLRKVL); + P(SCLRKEN); + P(OCONFIG); + P(OCMD); + P(OSTART_0Y); + P(OSTART_1Y); + P(OSTART_0U); + P(OSTART_0V); + P(OSTART_1U); + P(OSTART_1V); + P(OTILEOFF_0Y); + P(OTILEOFF_1Y); + P(OTILEOFF_0U); + P(OTILEOFF_0V); + P(OTILEOFF_1U); + P(OTILEOFF_1V); + P(FASTHSCALE); + P(UVSCALEV); +#undef P +} +#endif diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c new file mode 100644 index 0000000..33cb87f --- /dev/null +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -0,0 +1,493 @@ +/* + * Copyright © 2006-2010 Intel Corporation + * Copyright (c) 2006 Dave Airlie + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * Dave Airlie + * Jesse Barnes + * Chris Wilson + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include "intel_drv.h" + +#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ + +void +intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, + struct drm_display_mode *adjusted_mode) +{ + adjusted_mode->hdisplay = fixed_mode->hdisplay; + adjusted_mode->hsync_start = fixed_mode->hsync_start; + adjusted_mode->hsync_end = fixed_mode->hsync_end; + adjusted_mode->htotal = fixed_mode->htotal; + + adjusted_mode->vdisplay = fixed_mode->vdisplay; + adjusted_mode->vsync_start = fixed_mode->vsync_start; + adjusted_mode->vsync_end = fixed_mode->vsync_end; + adjusted_mode->vtotal = fixed_mode->vtotal; + + adjusted_mode->clock = fixed_mode->clock; +} + +/* adjusted_mode has been preset to be the panel's fixed mode */ +void +intel_pch_panel_fitting(struct drm_device *dev, + int fitting_mode, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int x, y, width, height; + + x = y = width = height = 0; + + /* Native modes don't need fitting */ + if (adjusted_mode->hdisplay == mode->hdisplay && + adjusted_mode->vdisplay == mode->vdisplay) + goto done; + + switch (fitting_mode) { + case DRM_MODE_SCALE_CENTER: + width = mode->hdisplay; + height = mode->vdisplay; + x = (adjusted_mode->hdisplay - width + 1)/2; + y = (adjusted_mode->vdisplay - height + 1)/2; + break; + + case DRM_MODE_SCALE_ASPECT: + /* Scale but preserve the aspect ratio */ + { + u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; + u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; + if (scaled_width > scaled_height) { /* pillar */ + width = scaled_height / mode->vdisplay; + if (width & 1) + width++; + x = (adjusted_mode->hdisplay - width + 1) / 2; + y = 0; + height = adjusted_mode->vdisplay; + } else if (scaled_width < scaled_height) { /* letter */ + height = scaled_width / mode->hdisplay; + if (height & 1) + height++; + y = (adjusted_mode->vdisplay - height + 1) / 2; + x = 0; + width = adjusted_mode->hdisplay; + } else { + x = y = 0; + width = adjusted_mode->hdisplay; + height = adjusted_mode->vdisplay; + } + } + break; + + default: + case DRM_MODE_SCALE_FULLSCREEN: + x = y = 0; + width = adjusted_mode->hdisplay; + height = adjusted_mode->vdisplay; + break; + } + +done: + dev_priv->pch_pf_pos = (x << 16) | y; + dev_priv->pch_pf_size = (width << 16) | height; +} + +static int is_backlight_combination_mode(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (INTEL_INFO(dev)->gen >= 4) + return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; + + if (IS_GEN2(dev)) + return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; + + return 0; +} + +static u32 i915_read_blc_pwm_ctl(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val; + + /* Restore the CTL value if it lost, e.g. GPU reset */ + + if (HAS_PCH_SPLIT(dev_priv->dev)) { + val = I915_READ(BLC_PWM_PCH_CTL2); + if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) { + dev_priv->regfile.saveBLC_PWM_CTL2 = val; + } else if (val == 0) { + val = dev_priv->regfile.saveBLC_PWM_CTL2; + I915_WRITE(BLC_PWM_PCH_CTL2, val); + } + } else { + val = I915_READ(BLC_PWM_CTL); + if (dev_priv->regfile.saveBLC_PWM_CTL == 0) { + dev_priv->regfile.saveBLC_PWM_CTL = val; + if (INTEL_INFO(dev)->gen >= 4) + dev_priv->regfile.saveBLC_PWM_CTL2 = + I915_READ(BLC_PWM_CTL2); + } else if (val == 0) { + val = dev_priv->regfile.saveBLC_PWM_CTL; + I915_WRITE(BLC_PWM_CTL, val); + if (INTEL_INFO(dev)->gen >= 4) + I915_WRITE(BLC_PWM_CTL2, + dev_priv->regfile.saveBLC_PWM_CTL2); + } + } + + return val; +} + +static u32 _intel_panel_get_max_backlight(struct drm_device *dev) +{ + u32 max; + + max = i915_read_blc_pwm_ctl(dev); + + if (HAS_PCH_SPLIT(dev)) { + max >>= 16; + } else { + if (INTEL_INFO(dev)->gen < 4) + max >>= 17; + else + max >>= 16; + + if (is_backlight_combination_mode(dev)) + max *= 0xff; + } + + return max; +} + +u32 intel_panel_get_max_backlight(struct drm_device *dev) +{ + u32 max; + + max = _intel_panel_get_max_backlight(dev); + if (max == 0) { + /* XXX add code here to query mode clock or hardware clock + * and program max PWM appropriately. + */ + pr_warn_once("fixme: max PWM is zero\n"); + return 1; + } + + DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); + return max; +} + +static int i915_panel_invert_brightness; +MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness " + "(-1 force normal, 0 machine defaults, 1 force inversion), please " + "report PCI device ID, subsystem vendor and subsystem device ID " + "to dri-devel@lists.freedesktop.org, if your machine needs it. " + "It will then be included in an upcoming module version."); +module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600); +static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (i915_panel_invert_brightness < 0) + return val; + + if (i915_panel_invert_brightness > 0 || + dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) + return intel_panel_get_max_backlight(dev) - val; + + return val; +} + +static u32 intel_panel_get_backlight(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val; + + if (HAS_PCH_SPLIT(dev)) { + val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; + } else { + val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; + if (INTEL_INFO(dev)->gen < 4) + val >>= 1; + + if (is_backlight_combination_mode(dev)) { + u8 lbpc; + + pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); + val *= lbpc; + } + } + + val = intel_panel_compute_brightness(dev, val); + DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); + return val; +} + +static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; + I915_WRITE(BLC_PWM_CPU_CTL, val | level); +} + +static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 tmp; + + DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); + level = intel_panel_compute_brightness(dev, level); + + if (HAS_PCH_SPLIT(dev)) + return intel_pch_panel_set_backlight(dev, level); + + if (is_backlight_combination_mode(dev)) { + u32 max = intel_panel_get_max_backlight(dev); + u8 lbpc; + + lbpc = level * 0xfe / max + 1; + level /= lbpc; + pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); + } + + tmp = I915_READ(BLC_PWM_CTL); + if (INTEL_INFO(dev)->gen < 4) + level <<= 1; + tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; + I915_WRITE(BLC_PWM_CTL, tmp | level); +} + +void intel_panel_set_backlight(struct drm_device *dev, u32 level) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + dev_priv->backlight.level = level; + if (dev_priv->backlight.device) + dev_priv->backlight.device->props.brightness = level; + + if (dev_priv->backlight.enabled) + intel_panel_actually_set_backlight(dev, level); +} + +void intel_panel_disable_backlight(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + dev_priv->backlight.enabled = false; + intel_panel_actually_set_backlight(dev, 0); + + if (INTEL_INFO(dev)->gen >= 4) { + uint32_t reg, tmp; + + reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2; + + I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE); + + if (HAS_PCH_SPLIT(dev)) { + tmp = I915_READ(BLC_PWM_PCH_CTL1); + tmp &= ~BLM_PCH_PWM_ENABLE; + I915_WRITE(BLC_PWM_PCH_CTL1, tmp); + } + } +} + +void intel_panel_enable_backlight(struct drm_device *dev, + enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->backlight.level == 0) { + dev_priv->backlight.level = intel_panel_get_max_backlight(dev); + if (dev_priv->backlight.device) + dev_priv->backlight.device->props.brightness = + dev_priv->backlight.level; + } + + if (INTEL_INFO(dev)->gen >= 4) { + uint32_t reg, tmp; + + reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2; + + + tmp = I915_READ(reg); + + /* Note that this can also get called through dpms changes. And + * we don't track the backlight dpms state, hence check whether + * we have to do anything first. */ + if (tmp & BLM_PWM_ENABLE) + goto set_level; + + if (INTEL_INFO(dev)->num_pipes == 3) + tmp &= ~BLM_PIPE_SELECT_IVB; + else + tmp &= ~BLM_PIPE_SELECT; + + tmp |= BLM_PIPE(pipe); + tmp &= ~BLM_PWM_ENABLE; + + I915_WRITE(reg, tmp); + POSTING_READ(reg); + I915_WRITE(reg, tmp | BLM_PWM_ENABLE); + + if (HAS_PCH_SPLIT(dev) && + !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) { + tmp = I915_READ(BLC_PWM_PCH_CTL1); + tmp |= BLM_PCH_PWM_ENABLE; + tmp &= ~BLM_PCH_OVERRIDE_ENABLE; + I915_WRITE(BLC_PWM_PCH_CTL1, tmp); + } + } + +set_level: + /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1. + * BLC_PWM_CPU_CTL may be cleared to zero automatically when these + * registers are set. + */ + dev_priv->backlight.enabled = true; + intel_panel_actually_set_backlight(dev, dev_priv->backlight.level); +} + +static void intel_panel_init_backlight(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + dev_priv->backlight.level = intel_panel_get_backlight(dev); + dev_priv->backlight.enabled = dev_priv->backlight.level != 0; +} + +enum drm_connector_status +intel_panel_detect(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Assume that the BIOS does not lie through the OpRegion... */ + if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) { + return ioread32(dev_priv->opregion.lid_state) & 0x1 ? + connector_status_connected : + connector_status_disconnected; + } + + switch (i915_panel_ignore_lid) { + case -2: + return connector_status_connected; + case -1: + return connector_status_disconnected; + default: + return connector_status_unknown; + } +} + +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE +static int intel_panel_update_status(struct backlight_device *bd) +{ + struct drm_device *dev = bl_get_data(bd); + intel_panel_set_backlight(dev, bd->props.brightness); + return 0; +} + +static int intel_panel_get_brightness(struct backlight_device *bd) +{ + struct drm_device *dev = bl_get_data(bd); + return intel_panel_get_backlight(dev); +} + +static const struct backlight_ops intel_panel_bl_ops = { + .update_status = intel_panel_update_status, + .get_brightness = intel_panel_get_brightness, +}; + +int intel_panel_setup_backlight(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct backlight_properties props; + + intel_panel_init_backlight(dev); + + if (WARN_ON(dev_priv->backlight.device)) + return -ENODEV; + + memset(&props, 0, sizeof(props)); + props.type = BACKLIGHT_RAW; + props.brightness = dev_priv->backlight.level; + props.max_brightness = _intel_panel_get_max_backlight(dev); + if (props.max_brightness == 0) { + DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n"); + return -ENODEV; + } + dev_priv->backlight.device = + backlight_device_register("intel_backlight", + &connector->kdev, dev, + &intel_panel_bl_ops, &props); + + if (IS_ERR(dev_priv->backlight.device)) { + DRM_ERROR("Failed to register backlight: %ld\n", + PTR_ERR(dev_priv->backlight.device)); + dev_priv->backlight.device = NULL; + return -ENODEV; + } + return 0; +} + +void intel_panel_destroy_backlight(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + if (dev_priv->backlight.device) { + backlight_device_unregister(dev_priv->backlight.device); + dev_priv->backlight.device = NULL; + } +} +#else +int intel_panel_setup_backlight(struct drm_connector *connector) +{ + intel_panel_init_backlight(connector->dev); + return 0; +} + +void intel_panel_destroy_backlight(struct drm_device *dev) +{ + return; +} +#endif + +int intel_panel_init(struct intel_panel *panel, + struct drm_display_mode *fixed_mode) +{ + panel->fixed_mode = fixed_mode; + + return 0; +} + +void intel_panel_fini(struct intel_panel *panel) +{ + struct intel_connector *intel_connector = + container_of(panel, struct intel_connector, panel); + + if (panel->fixed_mode) + drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode); +} diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c new file mode 100644 index 0000000..94ad6bc --- /dev/null +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -0,0 +1,4657 @@ +/* + * Copyright © 2012 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eugeni Dodonov + * + */ + +#include +#include "i915_drv.h" +#include "intel_drv.h" +#include "../../../platform/x86/intel_ips.h" +#include + +#define FORCEWAKE_ACK_TIMEOUT_MS 2 + +/* FBC, or Frame Buffer Compression, is a technique employed to compress the + * framebuffer contents in-memory, aiming at reducing the required bandwidth + * during in-memory transfers and, therefore, reduce the power packet. + * + * The benefits of FBC are mostly visible with solid backgrounds and + * variation-less patterns. + * + * FBC-related functionality can be enabled by the means of the + * i915.i915_enable_fbc parameter + */ + +static bool intel_crtc_active(struct drm_crtc *crtc) +{ + /* Be paranoid as we can arrive here with only partial + * state retrieved from the hardware during setup. + */ + return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock; +} + +static void i8xx_disable_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 fbc_ctl; + + /* Disable compression */ + fbc_ctl = I915_READ(FBC_CONTROL); + if ((fbc_ctl & FBC_CTL_EN) == 0) + return; + + fbc_ctl &= ~FBC_CTL_EN; + I915_WRITE(FBC_CONTROL, fbc_ctl); + + /* Wait for compressing bit to clear */ + if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { + DRM_DEBUG_KMS("FBC idle timed out\n"); + return; + } + + DRM_DEBUG_KMS("disabled FBC\n"); +} + +static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_framebuffer *fb = crtc->fb; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int cfb_pitch; + int plane, i; + u32 fbc_ctl, fbc_ctl2; + + cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; + if (fb->pitches[0] < cfb_pitch) + cfb_pitch = fb->pitches[0]; + + /* FBC_CTL wants 64B units */ + cfb_pitch = (cfb_pitch / 64) - 1; + plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; + + /* Clear old tags */ + for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) + I915_WRITE(FBC_TAG + (i * 4), 0); + + /* Set it up... */ + fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; + fbc_ctl2 |= plane; + I915_WRITE(FBC_CONTROL2, fbc_ctl2); + I915_WRITE(FBC_FENCE_OFF, crtc->y); + + /* enable it... */ + fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; + if (IS_I945GM(dev)) + fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ + fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; + fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; + fbc_ctl |= obj->fence_reg; + I915_WRITE(FBC_CONTROL, fbc_ctl); + + DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ", + cfb_pitch, crtc->y, intel_crtc->plane); +} + +static bool i8xx_fbc_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + return I915_READ(FBC_CONTROL) & FBC_CTL_EN; +} + +static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_framebuffer *fb = crtc->fb; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; + unsigned long stall_watermark = 200; + u32 dpfc_ctl; + + dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; + dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; + I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); + + I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | + (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | + (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); + I915_WRITE(DPFC_FENCE_YOFF, crtc->y); + + /* enable it... */ + I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); + + DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); +} + +static void g4x_disable_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = I915_READ(DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + I915_WRITE(DPFC_CONTROL, dpfc_ctl); + + DRM_DEBUG_KMS("disabled FBC\n"); + } +} + +static bool g4x_fbc_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; +} + +static void sandybridge_blit_fbc_update(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 blt_ecoskpd; + + /* Make sure blitter notifies FBC of writes */ + gen6_gt_force_wake_get(dev_priv); + blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); + blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << + GEN6_BLITTER_LOCK_SHIFT; + I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); + blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; + I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); + blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << + GEN6_BLITTER_LOCK_SHIFT); + I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); + POSTING_READ(GEN6_BLITTER_ECOSKPD); + gen6_gt_force_wake_put(dev_priv); +} + +static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_framebuffer *fb = crtc->fb; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; + unsigned long stall_watermark = 200; + u32 dpfc_ctl; + + dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); + dpfc_ctl &= DPFC_RESERVED; + dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); + /* Set persistent mode for front-buffer rendering, ala X. */ + dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; + dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg); + I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); + + I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | + (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | + (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); + I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); + I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); + /* enable it... */ + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + + if (IS_GEN6(dev)) { + I915_WRITE(SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | obj->fence_reg); + I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); + sandybridge_blit_fbc_update(dev); + } + + DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); +} + +static void ironlake_disable_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); + + DRM_DEBUG_KMS("disabled FBC\n"); + } +} + +static bool ironlake_fbc_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; +} + +bool intel_fbc_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!dev_priv->display.fbc_enabled) + return false; + + return dev_priv->display.fbc_enabled(dev); +} + +static void intel_fbc_work_fn(struct work_struct *__work) +{ + struct intel_fbc_work *work = + container_of(to_delayed_work(__work), + struct intel_fbc_work, work); + struct drm_device *dev = work->crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + mutex_lock(&dev->struct_mutex); + if (work == dev_priv->fbc_work) { + /* Double check that we haven't switched fb without cancelling + * the prior work. + */ + if (work->crtc->fb == work->fb) { + dev_priv->display.enable_fbc(work->crtc, + work->interval); + + dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane; + dev_priv->cfb_fb = work->crtc->fb->base.id; + dev_priv->cfb_y = work->crtc->y; + } + + dev_priv->fbc_work = NULL; + } + mutex_unlock(&dev->struct_mutex); + + kfree(work); +} + +static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) +{ + if (dev_priv->fbc_work == NULL) + return; + + DRM_DEBUG_KMS("cancelling pending FBC enable\n"); + + /* Synchronisation is provided by struct_mutex and checking of + * dev_priv->fbc_work, so we can perform the cancellation + * entirely asynchronously. + */ + if (cancel_delayed_work(&dev_priv->fbc_work->work)) + /* tasklet was killed before being run, clean up */ + kfree(dev_priv->fbc_work); + + /* Mark the work as no longer wanted so that if it does + * wake-up (because the work was already running and waiting + * for our mutex), it will discover that is no longer + * necessary to run. + */ + dev_priv->fbc_work = NULL; +} + +void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +{ + struct intel_fbc_work *work; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!dev_priv->display.enable_fbc) + return; + + intel_cancel_fbc_work(dev_priv); + + work = kzalloc(sizeof *work, GFP_KERNEL); + if (work == NULL) { + dev_priv->display.enable_fbc(crtc, interval); + return; + } + + work->crtc = crtc; + work->fb = crtc->fb; + work->interval = interval; + INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); + + dev_priv->fbc_work = work; + + DRM_DEBUG_KMS("scheduling delayed FBC enable\n"); + + /* Delay the actual enabling to let pageflipping cease and the + * display to settle before starting the compression. Note that + * this delay also serves a second purpose: it allows for a + * vblank to pass after disabling the FBC before we attempt + * to modify the control registers. + * + * A more complicated solution would involve tracking vblanks + * following the termination of the page-flipping sequence + * and indeed performing the enable as a co-routine and not + * waiting synchronously upon the vblank. + */ + schedule_delayed_work(&work->work, msecs_to_jiffies(50)); +} + +void intel_disable_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + intel_cancel_fbc_work(dev_priv); + + if (!dev_priv->display.disable_fbc) + return; + + dev_priv->display.disable_fbc(dev); + dev_priv->cfb_plane = -1; +} + +/** + * intel_update_fbc - enable/disable FBC as needed + * @dev: the drm_device + * + * Set up the framebuffer compression hardware at mode set time. We + * enable it if possible: + * - plane A only (on pre-965) + * - no pixel mulitply/line duplication + * - no alpha buffer discard + * - no dual wide + * - framebuffer <= 2048 in width, 1536 in height + * + * We can't assume that any compression will take place (worst case), + * so the compressed buffer has to be the same size as the uncompressed + * one. It also must reside (along with the line length buffer) in + * stolen memory. + * + * We need to enable/disable FBC on a global basis. + */ +void intel_update_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = NULL, *tmp_crtc; + struct intel_crtc *intel_crtc; + struct drm_framebuffer *fb; + struct intel_framebuffer *intel_fb; + struct drm_i915_gem_object *obj; + int enable_fbc; + + if (!i915_powersave) + return; + + if (!I915_HAS_FBC(dev)) + return; + + /* + * If FBC is already on, we just have to verify that we can + * keep it that way... + * Need to disable if: + * - more than one pipe is active + * - changing FBC params (stride, fence, mode) + * - new fb is too large to fit in compressed buffer + * - going to an unsupported config (interlace, pixel multiply, etc.) + */ + list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { + if (intel_crtc_active(tmp_crtc) && + !to_intel_crtc(tmp_crtc)->primary_disabled) { + if (crtc) { + DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); + dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; + goto out_disable; + } + crtc = tmp_crtc; + } + } + + if (!crtc || crtc->fb == NULL) { + DRM_DEBUG_KMS("no output, disabling\n"); + dev_priv->no_fbc_reason = FBC_NO_OUTPUT; + goto out_disable; + } + + intel_crtc = to_intel_crtc(crtc); + fb = crtc->fb; + intel_fb = to_intel_framebuffer(fb); + obj = intel_fb->obj; + + enable_fbc = i915_enable_fbc; + if (enable_fbc < 0) { + DRM_DEBUG_KMS("fbc set to per-chip default\n"); + enable_fbc = 1; + if (INTEL_INFO(dev)->gen <= 6) + enable_fbc = 0; + } + if (!enable_fbc) { + DRM_DEBUG_KMS("fbc disabled per module param\n"); + dev_priv->no_fbc_reason = FBC_MODULE_PARAM; + goto out_disable; + } + if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || + (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { + DRM_DEBUG_KMS("mode incompatible with compression, " + "disabling\n"); + dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; + goto out_disable; + } + if ((crtc->mode.hdisplay > 2048) || + (crtc->mode.vdisplay > 1536)) { + DRM_DEBUG_KMS("mode too large for compression, disabling\n"); + dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; + goto out_disable; + } + if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) { + DRM_DEBUG_KMS("plane not 0, disabling compression\n"); + dev_priv->no_fbc_reason = FBC_BAD_PLANE; + goto out_disable; + } + + /* The use of a CPU fence is mandatory in order to detect writes + * by the CPU to the scanout and trigger updates to the FBC. + */ + if (obj->tiling_mode != I915_TILING_X || + obj->fence_reg == I915_FENCE_REG_NONE) { + DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); + dev_priv->no_fbc_reason = FBC_NOT_TILED; + goto out_disable; + } + + /* If the kernel debugger is active, always disable compression */ + if (in_dbg_master()) + goto out_disable; + + if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) { + DRM_INFO("not enough stolen space for compressed buffer (need %zd bytes), disabling\n", intel_fb->obj->base.size); + DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); + DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); + dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; + goto out_disable; + } + + /* If the scanout has not changed, don't modify the FBC settings. + * Note that we make the fundamental assumption that the fb->obj + * cannot be unpinned (and have its GTT offset and fence revoked) + * without first being decoupled from the scanout and FBC disabled. + */ + if (dev_priv->cfb_plane == intel_crtc->plane && + dev_priv->cfb_fb == fb->base.id && + dev_priv->cfb_y == crtc->y) + return; + + if (intel_fbc_enabled(dev)) { + /* We update FBC along two paths, after changing fb/crtc + * configuration (modeswitching) and after page-flipping + * finishes. For the latter, we know that not only did + * we disable the FBC at the start of the page-flip + * sequence, but also more than one vblank has passed. + * + * For the former case of modeswitching, it is possible + * to switch between two FBC valid configurations + * instantaneously so we do need to disable the FBC + * before we can modify its control registers. We also + * have to wait for the next vblank for that to take + * effect. However, since we delay enabling FBC we can + * assume that a vblank has passed since disabling and + * that we can safely alter the registers in the deferred + * callback. + * + * In the scenario that we go from a valid to invalid + * and then back to valid FBC configuration we have + * no strict enforcement that a vblank occurred since + * disabling the FBC. However, along all current pipe + * disabling paths we do need to wait for a vblank at + * some point. And we wait before enabling FBC anyway. + */ + DRM_DEBUG_KMS("disabling active FBC for update\n"); + intel_disable_fbc(dev); + } + + intel_enable_fbc(crtc, 500); + return; + +out_disable: + /* Multiple disables should be harmless */ + if (intel_fbc_enabled(dev)) { + DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); + intel_disable_fbc(dev); + } + i915_gem_stolen_cleanup_compression(dev); +} + +static void i915_pineview_get_mem_freq(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u32 tmp; + + tmp = I915_READ(CLKCFG); + + switch (tmp & CLKCFG_FSB_MASK) { + case CLKCFG_FSB_533: + dev_priv->fsb_freq = 533; /* 133*4 */ + break; + case CLKCFG_FSB_800: + dev_priv->fsb_freq = 800; /* 200*4 */ + break; + case CLKCFG_FSB_667: + dev_priv->fsb_freq = 667; /* 167*4 */ + break; + case CLKCFG_FSB_400: + dev_priv->fsb_freq = 400; /* 100*4 */ + break; + } + + switch (tmp & CLKCFG_MEM_MASK) { + case CLKCFG_MEM_533: + dev_priv->mem_freq = 533; + break; + case CLKCFG_MEM_667: + dev_priv->mem_freq = 667; + break; + case CLKCFG_MEM_800: + dev_priv->mem_freq = 800; + break; + } + + /* detect pineview DDR3 setting */ + tmp = I915_READ(CSHRDDR3CTL); + dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; +} + +static void i915_ironlake_get_mem_freq(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u16 ddrpll, csipll; + + ddrpll = I915_READ16(DDRMPLL1); + csipll = I915_READ16(CSIPLL0); + + switch (ddrpll & 0xff) { + case 0xc: + dev_priv->mem_freq = 800; + break; + case 0x10: + dev_priv->mem_freq = 1066; + break; + case 0x14: + dev_priv->mem_freq = 1333; + break; + case 0x18: + dev_priv->mem_freq = 1600; + break; + default: + DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", + ddrpll & 0xff); + dev_priv->mem_freq = 0; + break; + } + + dev_priv->ips.r_t = dev_priv->mem_freq; + + switch (csipll & 0x3ff) { + case 0x00c: + dev_priv->fsb_freq = 3200; + break; + case 0x00e: + dev_priv->fsb_freq = 3733; + break; + case 0x010: + dev_priv->fsb_freq = 4266; + break; + case 0x012: + dev_priv->fsb_freq = 4800; + break; + case 0x014: + dev_priv->fsb_freq = 5333; + break; + case 0x016: + dev_priv->fsb_freq = 5866; + break; + case 0x018: + dev_priv->fsb_freq = 6400; + break; + default: + DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", + csipll & 0x3ff); + dev_priv->fsb_freq = 0; + break; + } + + if (dev_priv->fsb_freq == 3200) { + dev_priv->ips.c_m = 0; + } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { + dev_priv->ips.c_m = 1; + } else { + dev_priv->ips.c_m = 2; + } +} + +static const struct cxsr_latency cxsr_latency_table[] = { + {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ + {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ + {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ + {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ + {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ + + {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ + {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ + {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ + {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ + {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ + + {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ + {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ + {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ + {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ + {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ + + {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ + {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ + {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ + {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ + {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ + + {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ + {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ + {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ + {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ + {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ + + {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ + {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ + {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ + {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ + {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ +}; + +static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, + int is_ddr3, + int fsb, + int mem) +{ + const struct cxsr_latency *latency; + int i; + + if (fsb == 0 || mem == 0) + return NULL; + + for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { + latency = &cxsr_latency_table[i]; + if (is_desktop == latency->is_desktop && + is_ddr3 == latency->is_ddr3 && + fsb == latency->fsb_freq && mem == latency->mem_freq) + return latency; + } + + DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); + + return NULL; +} + +static void pineview_disable_cxsr(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* deactivate cxsr */ + I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); +} + +/* + * Latency for FIFO fetches is dependent on several factors: + * - memory configuration (speed, channels) + * - chipset + * - current MCH state + * It can be fairly high in some situations, so here we assume a fairly + * pessimal value. It's a tradeoff between extra memory fetches (if we + * set this value too high, the FIFO will fetch frequently to stay full) + * and power consumption (set it too low to save power and we might see + * FIFO underruns and display "flicker"). + * + * A value of 5us seems to be a good balance; safe for very low end + * platforms but not overly aggressive on lower latency configs. + */ +static const int latency_ns = 5000; + +static int i9xx_get_fifo_size(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dsparb = I915_READ(DSPARB); + int size; + + size = dsparb & 0x7f; + if (plane) + size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; + + DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, + plane ? "B" : "A", size); + + return size; +} + +static int i85x_get_fifo_size(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dsparb = I915_READ(DSPARB); + int size; + + size = dsparb & 0x1ff; + if (plane) + size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; + size >>= 1; /* Convert to cachelines */ + + DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, + plane ? "B" : "A", size); + + return size; +} + +static int i845_get_fifo_size(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dsparb = I915_READ(DSPARB); + int size; + + size = dsparb & 0x7f; + size >>= 2; /* Convert to cachelines */ + + DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, + plane ? "B" : "A", + size); + + return size; +} + +static int i830_get_fifo_size(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dsparb = I915_READ(DSPARB); + int size; + + size = dsparb & 0x7f; + size >>= 1; /* Convert to cachelines */ + + DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, + plane ? "B" : "A", size); + + return size; +} + +/* Pineview has different values for various configs */ +static const struct intel_watermark_params pineview_display_wm = { + PINEVIEW_DISPLAY_FIFO, + PINEVIEW_MAX_WM, + PINEVIEW_DFT_WM, + PINEVIEW_GUARD_WM, + PINEVIEW_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params pineview_display_hplloff_wm = { + PINEVIEW_DISPLAY_FIFO, + PINEVIEW_MAX_WM, + PINEVIEW_DFT_HPLLOFF_WM, + PINEVIEW_GUARD_WM, + PINEVIEW_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params pineview_cursor_wm = { + PINEVIEW_CURSOR_FIFO, + PINEVIEW_CURSOR_MAX_WM, + PINEVIEW_CURSOR_DFT_WM, + PINEVIEW_CURSOR_GUARD_WM, + PINEVIEW_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params pineview_cursor_hplloff_wm = { + PINEVIEW_CURSOR_FIFO, + PINEVIEW_CURSOR_MAX_WM, + PINEVIEW_CURSOR_DFT_WM, + PINEVIEW_CURSOR_GUARD_WM, + PINEVIEW_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params g4x_wm_info = { + G4X_FIFO_SIZE, + G4X_MAX_WM, + G4X_MAX_WM, + 2, + G4X_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params g4x_cursor_wm_info = { + I965_CURSOR_FIFO, + I965_CURSOR_MAX_WM, + I965_CURSOR_DFT_WM, + 2, + G4X_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params valleyview_wm_info = { + VALLEYVIEW_FIFO_SIZE, + VALLEYVIEW_MAX_WM, + VALLEYVIEW_MAX_WM, + 2, + G4X_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params valleyview_cursor_wm_info = { + I965_CURSOR_FIFO, + VALLEYVIEW_CURSOR_MAX_WM, + I965_CURSOR_DFT_WM, + 2, + G4X_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params i965_cursor_wm_info = { + I965_CURSOR_FIFO, + I965_CURSOR_MAX_WM, + I965_CURSOR_DFT_WM, + 2, + I915_FIFO_LINE_SIZE, +}; +static const struct intel_watermark_params i945_wm_info = { + I945_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I915_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params i915_wm_info = { + I915_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I915_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params i855_wm_info = { + I855GM_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I830_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params i830_wm_info = { + I830_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I830_FIFO_LINE_SIZE +}; + +static const struct intel_watermark_params ironlake_display_wm_info = { + ILK_DISPLAY_FIFO, + ILK_DISPLAY_MAXWM, + ILK_DISPLAY_DFTWM, + 2, + ILK_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params ironlake_cursor_wm_info = { + ILK_CURSOR_FIFO, + ILK_CURSOR_MAXWM, + ILK_CURSOR_DFTWM, + 2, + ILK_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params ironlake_display_srwm_info = { + ILK_DISPLAY_SR_FIFO, + ILK_DISPLAY_MAX_SRWM, + ILK_DISPLAY_DFT_SRWM, + 2, + ILK_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params ironlake_cursor_srwm_info = { + ILK_CURSOR_SR_FIFO, + ILK_CURSOR_MAX_SRWM, + ILK_CURSOR_DFT_SRWM, + 2, + ILK_FIFO_LINE_SIZE +}; + +static const struct intel_watermark_params sandybridge_display_wm_info = { + SNB_DISPLAY_FIFO, + SNB_DISPLAY_MAXWM, + SNB_DISPLAY_DFTWM, + 2, + SNB_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params sandybridge_cursor_wm_info = { + SNB_CURSOR_FIFO, + SNB_CURSOR_MAXWM, + SNB_CURSOR_DFTWM, + 2, + SNB_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params sandybridge_display_srwm_info = { + SNB_DISPLAY_SR_FIFO, + SNB_DISPLAY_MAX_SRWM, + SNB_DISPLAY_DFT_SRWM, + 2, + SNB_FIFO_LINE_SIZE +}; +static const struct intel_watermark_params sandybridge_cursor_srwm_info = { + SNB_CURSOR_SR_FIFO, + SNB_CURSOR_MAX_SRWM, + SNB_CURSOR_DFT_SRWM, + 2, + SNB_FIFO_LINE_SIZE +}; + + +/** + * intel_calculate_wm - calculate watermark level + * @clock_in_khz: pixel clock + * @wm: chip FIFO params + * @pixel_size: display pixel size + * @latency_ns: memory latency for the platform + * + * Calculate the watermark level (the level at which the display plane will + * start fetching from memory again). Each chip has a different display + * FIFO size and allocation, so the caller needs to figure that out and pass + * in the correct intel_watermark_params structure. + * + * As the pixel clock runs, the FIFO will be drained at a rate that depends + * on the pixel size. When it reaches the watermark level, it'll start + * fetching FIFO line sized based chunks from memory until the FIFO fills + * past the watermark point. If the FIFO drains completely, a FIFO underrun + * will occur, and a display engine hang could result. + */ +static unsigned long intel_calculate_wm(unsigned long clock_in_khz, + const struct intel_watermark_params *wm, + int fifo_size, + int pixel_size, + unsigned long latency_ns) +{ + long entries_required, wm_size; + + /* + * Note: we need to make sure we don't overflow for various clock & + * latency values. + * clocks go from a few thousand to several hundred thousand. + * latency is usually a few thousand + */ + entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / + 1000; + entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); + + DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); + + wm_size = fifo_size - (entries_required + wm->guard_size); + + DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); + + /* Don't promote wm_size to unsigned... */ + if (wm_size > (long)wm->max_wm) + wm_size = wm->max_wm; + if (wm_size <= 0) + wm_size = wm->default_wm; + return wm_size; +} + +static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) +{ + struct drm_crtc *crtc, *enabled = NULL; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if (intel_crtc_active(crtc)) { + if (enabled) + return NULL; + enabled = crtc; + } + } + + return enabled; +} + +static void pineview_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + const struct cxsr_latency *latency; + u32 reg; + unsigned long wm; + + latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, + dev_priv->fsb_freq, dev_priv->mem_freq); + if (!latency) { + DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); + pineview_disable_cxsr(dev); + return; + } + + crtc = single_enabled_crtc(dev); + if (crtc) { + int clock = crtc->mode.clock; + int pixel_size = crtc->fb->bits_per_pixel / 8; + + /* Display SR */ + wm = intel_calculate_wm(clock, &pineview_display_wm, + pineview_display_wm.fifo_size, + pixel_size, latency->display_sr); + reg = I915_READ(DSPFW1); + reg &= ~DSPFW_SR_MASK; + reg |= wm << DSPFW_SR_SHIFT; + I915_WRITE(DSPFW1, reg); + DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); + + /* cursor SR */ + wm = intel_calculate_wm(clock, &pineview_cursor_wm, + pineview_display_wm.fifo_size, + pixel_size, latency->cursor_sr); + reg = I915_READ(DSPFW3); + reg &= ~DSPFW_CURSOR_SR_MASK; + reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; + I915_WRITE(DSPFW3, reg); + + /* Display HPLL off SR */ + wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, + pineview_display_hplloff_wm.fifo_size, + pixel_size, latency->display_hpll_disable); + reg = I915_READ(DSPFW3); + reg &= ~DSPFW_HPLL_SR_MASK; + reg |= wm & DSPFW_HPLL_SR_MASK; + I915_WRITE(DSPFW3, reg); + + /* cursor HPLL off SR */ + wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, + pineview_display_hplloff_wm.fifo_size, + pixel_size, latency->cursor_hpll_disable); + reg = I915_READ(DSPFW3); + reg &= ~DSPFW_HPLL_CURSOR_MASK; + reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; + I915_WRITE(DSPFW3, reg); + DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); + + /* activate cxsr */ + I915_WRITE(DSPFW3, + I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN); + DRM_DEBUG_KMS("Self-refresh is enabled\n"); + } else { + pineview_disable_cxsr(dev); + DRM_DEBUG_KMS("Self-refresh is disabled\n"); + } +} + +static bool g4x_compute_wm0(struct drm_device *dev, + int plane, + const struct intel_watermark_params *display, + int display_latency_ns, + const struct intel_watermark_params *cursor, + int cursor_latency_ns, + int *plane_wm, + int *cursor_wm) +{ + struct drm_crtc *crtc; + int htotal, hdisplay, clock, pixel_size; + int line_time_us, line_count; + int entries, tlb_miss; + + crtc = intel_get_crtc_for_plane(dev, plane); + if (!intel_crtc_active(crtc)) { + *cursor_wm = cursor->guard_size; + *plane_wm = display->guard_size; + return false; + } + + htotal = crtc->mode.htotal; + hdisplay = crtc->mode.hdisplay; + clock = crtc->mode.clock; + pixel_size = crtc->fb->bits_per_pixel / 8; + + /* Use the small buffer method to calculate plane watermark */ + entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; + tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; + if (tlb_miss > 0) + entries += tlb_miss; + entries = DIV_ROUND_UP(entries, display->cacheline_size); + *plane_wm = entries + display->guard_size; + if (*plane_wm > (int)display->max_wm) + *plane_wm = display->max_wm; + + /* Use the large buffer method to calculate cursor watermark */ + line_time_us = ((htotal * 1000) / clock); + line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; + entries = line_count * 64 * pixel_size; + tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; + if (tlb_miss > 0) + entries += tlb_miss; + entries = DIV_ROUND_UP(entries, cursor->cacheline_size); + *cursor_wm = entries + cursor->guard_size; + if (*cursor_wm > (int)cursor->max_wm) + *cursor_wm = (int)cursor->max_wm; + + return true; +} + +/* + * Check the wm result. + * + * If any calculated watermark values is larger than the maximum value that + * can be programmed into the associated watermark register, that watermark + * must be disabled. + */ +static bool g4x_check_srwm(struct drm_device *dev, + int display_wm, int cursor_wm, + const struct intel_watermark_params *display, + const struct intel_watermark_params *cursor) +{ + DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", + display_wm, cursor_wm); + + if (display_wm > display->max_wm) { + DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", + display_wm, display->max_wm); + return false; + } + + if (cursor_wm > cursor->max_wm) { + DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", + cursor_wm, cursor->max_wm); + return false; + } + + if (!(display_wm || cursor_wm)) { + DRM_DEBUG_KMS("SR latency is 0, disabling\n"); + return false; + } + + return true; +} + +static bool g4x_compute_srwm(struct drm_device *dev, + int plane, + int latency_ns, + const struct intel_watermark_params *display, + const struct intel_watermark_params *cursor, + int *display_wm, int *cursor_wm) +{ + struct drm_crtc *crtc; + int hdisplay, htotal, pixel_size, clock; + unsigned long line_time_us; + int line_count, line_size; + int small, large; + int entries; + + if (!latency_ns) { + *display_wm = *cursor_wm = 0; + return false; + } + + crtc = intel_get_crtc_for_plane(dev, plane); + hdisplay = crtc->mode.hdisplay; + htotal = crtc->mode.htotal; + clock = crtc->mode.clock; + pixel_size = crtc->fb->bits_per_pixel / 8; + + line_time_us = (htotal * 1000) / clock; + line_count = (latency_ns / line_time_us + 1000) / 1000; + line_size = hdisplay * pixel_size; + + /* Use the minimum of the small and large buffer method for primary */ + small = ((clock * pixel_size / 1000) * latency_ns) / 1000; + large = line_count * line_size; + + entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); + *display_wm = entries + display->guard_size; + + /* calculate the self-refresh watermark for display cursor */ + entries = line_count * pixel_size * 64; + entries = DIV_ROUND_UP(entries, cursor->cacheline_size); + *cursor_wm = entries + cursor->guard_size; + + return g4x_check_srwm(dev, + *display_wm, *cursor_wm, + display, cursor); +} + +static bool vlv_compute_drain_latency(struct drm_device *dev, + int plane, + int *plane_prec_mult, + int *plane_dl, + int *cursor_prec_mult, + int *cursor_dl) +{ + struct drm_crtc *crtc; + int clock, pixel_size; + int entries; + + crtc = intel_get_crtc_for_plane(dev, plane); + if (!intel_crtc_active(crtc)) + return false; + + clock = crtc->mode.clock; /* VESA DOT Clock */ + pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */ + + entries = (clock / 1000) * pixel_size; + *plane_prec_mult = (entries > 256) ? + DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16; + *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) * + pixel_size); + + entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */ + *cursor_prec_mult = (entries > 256) ? + DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16; + *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4); + + return true; +} + +/* + * Update drain latency registers of memory arbiter + * + * Valleyview SoC has a new memory arbiter and needs drain latency registers + * to be programmed. Each plane has a drain latency multiplier and a drain + * latency value. + */ + +static void vlv_update_drain_latency(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int planea_prec, planea_dl, planeb_prec, planeb_dl; + int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl; + int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is + either 16 or 32 */ + + /* For plane A, Cursor A */ + if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl, + &cursor_prec_mult, &cursora_dl)) { + cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? + DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16; + planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? + DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16; + + I915_WRITE(VLV_DDL1, cursora_prec | + (cursora_dl << DDL_CURSORA_SHIFT) | + planea_prec | planea_dl); + } + + /* For plane B, Cursor B */ + if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl, + &cursor_prec_mult, &cursorb_dl)) { + cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? + DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16; + planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? + DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16; + + I915_WRITE(VLV_DDL2, cursorb_prec | + (cursorb_dl << DDL_CURSORB_SHIFT) | + planeb_prec | planeb_dl); + } +} + +#define single_plane_enabled(mask) is_power_of_2(mask) + +static void valleyview_update_wm(struct drm_device *dev) +{ + static const int sr_latency_ns = 12000; + struct drm_i915_private *dev_priv = dev->dev_private; + int planea_wm, planeb_wm, cursora_wm, cursorb_wm; + int plane_sr, cursor_sr; + int ignore_plane_sr, ignore_cursor_sr; + unsigned int enabled = 0; + + vlv_update_drain_latency(dev); + + if (g4x_compute_wm0(dev, PIPE_A, + &valleyview_wm_info, latency_ns, + &valleyview_cursor_wm_info, latency_ns, + &planea_wm, &cursora_wm)) + enabled |= 1 << PIPE_A; + + if (g4x_compute_wm0(dev, PIPE_B, + &valleyview_wm_info, latency_ns, + &valleyview_cursor_wm_info, latency_ns, + &planeb_wm, &cursorb_wm)) + enabled |= 1 << PIPE_B; + + if (single_plane_enabled(enabled) && + g4x_compute_srwm(dev, ffs(enabled) - 1, + sr_latency_ns, + &valleyview_wm_info, + &valleyview_cursor_wm_info, + &plane_sr, &ignore_cursor_sr) && + g4x_compute_srwm(dev, ffs(enabled) - 1, + 2*sr_latency_ns, + &valleyview_wm_info, + &valleyview_cursor_wm_info, + &ignore_plane_sr, &cursor_sr)) { + I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN); + } else { + I915_WRITE(FW_BLC_SELF_VLV, + I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN); + plane_sr = cursor_sr = 0; + } + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", + planea_wm, cursora_wm, + planeb_wm, cursorb_wm, + plane_sr, cursor_sr); + + I915_WRITE(DSPFW1, + (plane_sr << DSPFW_SR_SHIFT) | + (cursorb_wm << DSPFW_CURSORB_SHIFT) | + (planeb_wm << DSPFW_PLANEB_SHIFT) | + planea_wm); + I915_WRITE(DSPFW2, + (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | + (cursora_wm << DSPFW_CURSORA_SHIFT)); + I915_WRITE(DSPFW3, + (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | + (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); +} + +static void g4x_update_wm(struct drm_device *dev) +{ + static const int sr_latency_ns = 12000; + struct drm_i915_private *dev_priv = dev->dev_private; + int planea_wm, planeb_wm, cursora_wm, cursorb_wm; + int plane_sr, cursor_sr; + unsigned int enabled = 0; + + if (g4x_compute_wm0(dev, PIPE_A, + &g4x_wm_info, latency_ns, + &g4x_cursor_wm_info, latency_ns, + &planea_wm, &cursora_wm)) + enabled |= 1 << PIPE_A; + + if (g4x_compute_wm0(dev, PIPE_B, + &g4x_wm_info, latency_ns, + &g4x_cursor_wm_info, latency_ns, + &planeb_wm, &cursorb_wm)) + enabled |= 1 << PIPE_B; + + if (single_plane_enabled(enabled) && + g4x_compute_srwm(dev, ffs(enabled) - 1, + sr_latency_ns, + &g4x_wm_info, + &g4x_cursor_wm_info, + &plane_sr, &cursor_sr)) { + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + } else { + I915_WRITE(FW_BLC_SELF, + I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); + plane_sr = cursor_sr = 0; + } + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", + planea_wm, cursora_wm, + planeb_wm, cursorb_wm, + plane_sr, cursor_sr); + + I915_WRITE(DSPFW1, + (plane_sr << DSPFW_SR_SHIFT) | + (cursorb_wm << DSPFW_CURSORB_SHIFT) | + (planeb_wm << DSPFW_PLANEB_SHIFT) | + planea_wm); + I915_WRITE(DSPFW2, + (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | + (cursora_wm << DSPFW_CURSORA_SHIFT)); + /* HPLL off in SR has some issues on G4x... disable it */ + I915_WRITE(DSPFW3, + (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | + (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); +} + +static void i965_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + int srwm = 1; + int cursor_sr = 16; + + /* Calc sr entries for one plane configs */ + crtc = single_enabled_crtc(dev); + if (crtc) { + /* self-refresh has much higher latency */ + static const int sr_latency_ns = 12000; + int clock = crtc->mode.clock; + int htotal = crtc->mode.htotal; + int hdisplay = crtc->mode.hdisplay; + int pixel_size = crtc->fb->bits_per_pixel / 8; + unsigned long line_time_us; + int entries; + + line_time_us = ((htotal * 1000) / clock); + + /* Use ns/us then divide to preserve precision */ + entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * hdisplay; + entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); + srwm = I965_FIFO_SIZE - entries; + if (srwm < 0) + srwm = 1; + srwm &= 0x1ff; + DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", + entries, srwm); + + entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * 64; + entries = DIV_ROUND_UP(entries, + i965_cursor_wm_info.cacheline_size); + cursor_sr = i965_cursor_wm_info.fifo_size - + (entries + i965_cursor_wm_info.guard_size); + + if (cursor_sr > i965_cursor_wm_info.max_wm) + cursor_sr = i965_cursor_wm_info.max_wm; + + DRM_DEBUG_KMS("self-refresh watermark: display plane %d " + "cursor %d\n", srwm, cursor_sr); + + if (IS_CRESTLINE(dev)) + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + } else { + /* Turn off self refresh if both pipes are enabled */ + if (IS_CRESTLINE(dev)) + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) + & ~FW_BLC_SELF_EN); + } + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", + srwm); + + /* 965 has limitations... */ + I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | + (8 << 16) | (8 << 8) | (8 << 0)); + I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); + /* update cursor SR watermark */ + I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); +} + +static void i9xx_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + const struct intel_watermark_params *wm_info; + uint32_t fwater_lo; + uint32_t fwater_hi; + int cwm, srwm = 1; + int fifo_size; + int planea_wm, planeb_wm; + struct drm_crtc *crtc, *enabled = NULL; + + if (IS_I945GM(dev)) + wm_info = &i945_wm_info; + else if (!IS_GEN2(dev)) + wm_info = &i915_wm_info; + else + wm_info = &i855_wm_info; + + fifo_size = dev_priv->display.get_fifo_size(dev, 0); + crtc = intel_get_crtc_for_plane(dev, 0); + if (intel_crtc_active(crtc)) { + int cpp = crtc->fb->bits_per_pixel / 8; + if (IS_GEN2(dev)) + cpp = 4; + + planea_wm = intel_calculate_wm(crtc->mode.clock, + wm_info, fifo_size, cpp, + latency_ns); + enabled = crtc; + } else + planea_wm = fifo_size - wm_info->guard_size; + + fifo_size = dev_priv->display.get_fifo_size(dev, 1); + crtc = intel_get_crtc_for_plane(dev, 1); + if (intel_crtc_active(crtc)) { + int cpp = crtc->fb->bits_per_pixel / 8; + if (IS_GEN2(dev)) + cpp = 4; + + planeb_wm = intel_calculate_wm(crtc->mode.clock, + wm_info, fifo_size, cpp, + latency_ns); + if (enabled == NULL) + enabled = crtc; + else + enabled = NULL; + } else + planeb_wm = fifo_size - wm_info->guard_size; + + DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); + + /* + * Overlay gets an aggressive default since video jitter is bad. + */ + cwm = 2; + + /* Play safe and disable self-refresh before adjusting watermarks. */ + if (IS_I945G(dev) || IS_I945GM(dev)) + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); + else if (IS_I915GM(dev)) + I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); + + /* Calc sr entries for one plane configs */ + if (HAS_FW_BLC(dev) && enabled) { + /* self-refresh has much higher latency */ + static const int sr_latency_ns = 6000; + int clock = enabled->mode.clock; + int htotal = enabled->mode.htotal; + int hdisplay = enabled->mode.hdisplay; + int pixel_size = enabled->fb->bits_per_pixel / 8; + unsigned long line_time_us; + int entries; + + line_time_us = (htotal * 1000) / clock; + + /* Use ns/us then divide to preserve precision */ + entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * hdisplay; + entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); + DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); + srwm = wm_info->fifo_size - entries; + if (srwm < 0) + srwm = 1; + + if (IS_I945G(dev) || IS_I945GM(dev)) + I915_WRITE(FW_BLC_SELF, + FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); + else if (IS_I915GM(dev)) + I915_WRITE(FW_BLC_SELF, srwm & 0x3f); + } + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", + planea_wm, planeb_wm, cwm, srwm); + + fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); + fwater_hi = (cwm & 0x1f); + + /* Set request length to 8 cachelines per fetch */ + fwater_lo = fwater_lo | (1 << 24) | (1 << 8); + fwater_hi = fwater_hi | (1 << 8); + + I915_WRITE(FW_BLC, fwater_lo); + I915_WRITE(FW_BLC2, fwater_hi); + + if (HAS_FW_BLC(dev)) { + if (enabled) { + if (IS_I945G(dev) || IS_I945GM(dev)) + I915_WRITE(FW_BLC_SELF, + FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); + else if (IS_I915GM(dev)) + I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); + DRM_DEBUG_KMS("memory self refresh enabled\n"); + } else + DRM_DEBUG_KMS("memory self refresh disabled\n"); + } +} + +static void i830_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + uint32_t fwater_lo; + int planea_wm; + + crtc = single_enabled_crtc(dev); + if (crtc == NULL) + return; + + planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, + dev_priv->display.get_fifo_size(dev, 0), + 4, latency_ns); + fwater_lo = I915_READ(FW_BLC) & ~0xfff; + fwater_lo |= (3<<8) | planea_wm; + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); + + I915_WRITE(FW_BLC, fwater_lo); +} + +#define ILK_LP0_PLANE_LATENCY 700 +#define ILK_LP0_CURSOR_LATENCY 1300 + +/* + * Check the wm result. + * + * If any calculated watermark values is larger than the maximum value that + * can be programmed into the associated watermark register, that watermark + * must be disabled. + */ +static bool ironlake_check_srwm(struct drm_device *dev, int level, + int fbc_wm, int display_wm, int cursor_wm, + const struct intel_watermark_params *display, + const struct intel_watermark_params *cursor) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," + " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); + + if (fbc_wm > SNB_FBC_MAX_SRWM) { + DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", + fbc_wm, SNB_FBC_MAX_SRWM, level); + + /* fbc has it's own way to disable FBC WM */ + I915_WRITE(DISP_ARB_CTL, + I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); + return false; + } + + if (display_wm > display->max_wm) { + DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", + display_wm, SNB_DISPLAY_MAX_SRWM, level); + return false; + } + + if (cursor_wm > cursor->max_wm) { + DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", + cursor_wm, SNB_CURSOR_MAX_SRWM, level); + return false; + } + + if (!(fbc_wm || display_wm || cursor_wm)) { + DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); + return false; + } + + return true; +} + +/* + * Compute watermark values of WM[1-3], + */ +static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, + int latency_ns, + const struct intel_watermark_params *display, + const struct intel_watermark_params *cursor, + int *fbc_wm, int *display_wm, int *cursor_wm) +{ + struct drm_crtc *crtc; + unsigned long line_time_us; + int hdisplay, htotal, pixel_size, clock; + int line_count, line_size; + int small, large; + int entries; + + if (!latency_ns) { + *fbc_wm = *display_wm = *cursor_wm = 0; + return false; + } + + crtc = intel_get_crtc_for_plane(dev, plane); + hdisplay = crtc->mode.hdisplay; + htotal = crtc->mode.htotal; + clock = crtc->mode.clock; + pixel_size = crtc->fb->bits_per_pixel / 8; + + line_time_us = (htotal * 1000) / clock; + line_count = (latency_ns / line_time_us + 1000) / 1000; + line_size = hdisplay * pixel_size; + + /* Use the minimum of the small and large buffer method for primary */ + small = ((clock * pixel_size / 1000) * latency_ns) / 1000; + large = line_count * line_size; + + entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); + *display_wm = entries + display->guard_size; + + /* + * Spec says: + * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 + */ + *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; + + /* calculate the self-refresh watermark for display cursor */ + entries = line_count * pixel_size * 64; + entries = DIV_ROUND_UP(entries, cursor->cacheline_size); + *cursor_wm = entries + cursor->guard_size; + + return ironlake_check_srwm(dev, level, + *fbc_wm, *display_wm, *cursor_wm, + display, cursor); +} + +static void ironlake_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int fbc_wm, plane_wm, cursor_wm; + unsigned int enabled; + + enabled = 0; + if (g4x_compute_wm0(dev, PIPE_A, + &ironlake_display_wm_info, + ILK_LP0_PLANE_LATENCY, + &ironlake_cursor_wm_info, + ILK_LP0_CURSOR_LATENCY, + &plane_wm, &cursor_wm)) { + I915_WRITE(WM0_PIPEA_ILK, + (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); + DRM_DEBUG_KMS("FIFO watermarks For pipe A -" + " plane %d, " "cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 1 << PIPE_A; + } + + if (g4x_compute_wm0(dev, PIPE_B, + &ironlake_display_wm_info, + ILK_LP0_PLANE_LATENCY, + &ironlake_cursor_wm_info, + ILK_LP0_CURSOR_LATENCY, + &plane_wm, &cursor_wm)) { + I915_WRITE(WM0_PIPEB_ILK, + (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); + DRM_DEBUG_KMS("FIFO watermarks For pipe B -" + " plane %d, cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 1 << PIPE_B; + } + + /* + * Calculate and update the self-refresh watermark only when one + * display plane is used. + */ + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + if (!single_plane_enabled(enabled)) + return; + enabled = ffs(enabled) - 1; + + /* WM1 */ + if (!ironlake_compute_srwm(dev, 1, enabled, + ILK_READ_WM1_LATENCY() * 500, + &ironlake_display_srwm_info, + &ironlake_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM1_LP_ILK, + WM1_LP_SR_EN | + (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* WM2 */ + if (!ironlake_compute_srwm(dev, 2, enabled, + ILK_READ_WM2_LATENCY() * 500, + &ironlake_display_srwm_info, + &ironlake_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM2_LP_ILK, + WM2_LP_EN | + (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* + * WM3 is unsupported on ILK, probably because we don't have latency + * data for that power state + */ +} + +static void sandybridge_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ + u32 val; + int fbc_wm, plane_wm, cursor_wm; + unsigned int enabled; + + enabled = 0; + if (g4x_compute_wm0(dev, PIPE_A, + &sandybridge_display_wm_info, latency, + &sandybridge_cursor_wm_info, latency, + &plane_wm, &cursor_wm)) { + val = I915_READ(WM0_PIPEA_ILK); + val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); + I915_WRITE(WM0_PIPEA_ILK, val | + ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); + DRM_DEBUG_KMS("FIFO watermarks For pipe A -" + " plane %d, " "cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 1 << PIPE_A; + } + + if (g4x_compute_wm0(dev, PIPE_B, + &sandybridge_display_wm_info, latency, + &sandybridge_cursor_wm_info, latency, + &plane_wm, &cursor_wm)) { + val = I915_READ(WM0_PIPEB_ILK); + val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); + I915_WRITE(WM0_PIPEB_ILK, val | + ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); + DRM_DEBUG_KMS("FIFO watermarks For pipe B -" + " plane %d, cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 1 << PIPE_B; + } + + /* + * Calculate and update the self-refresh watermark only when one + * display plane is used. + * + * SNB support 3 levels of watermark. + * + * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, + * and disabled in the descending order + * + */ + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + if (!single_plane_enabled(enabled) || + dev_priv->sprite_scaling_enabled) + return; + enabled = ffs(enabled) - 1; + + /* WM1 */ + if (!ironlake_compute_srwm(dev, 1, enabled, + SNB_READ_WM1_LATENCY() * 500, + &sandybridge_display_srwm_info, + &sandybridge_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM1_LP_ILK, + WM1_LP_SR_EN | + (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* WM2 */ + if (!ironlake_compute_srwm(dev, 2, enabled, + SNB_READ_WM2_LATENCY() * 500, + &sandybridge_display_srwm_info, + &sandybridge_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM2_LP_ILK, + WM2_LP_EN | + (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* WM3 */ + if (!ironlake_compute_srwm(dev, 3, enabled, + SNB_READ_WM3_LATENCY() * 500, + &sandybridge_display_srwm_info, + &sandybridge_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM3_LP_ILK, + WM3_LP_EN | + (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); +} + +static void ivybridge_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ + u32 val; + int fbc_wm, plane_wm, cursor_wm; + int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm; + unsigned int enabled; + + enabled = 0; + if (g4x_compute_wm0(dev, PIPE_A, + &sandybridge_display_wm_info, latency, + &sandybridge_cursor_wm_info, latency, + &plane_wm, &cursor_wm)) { + val = I915_READ(WM0_PIPEA_ILK); + val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); + I915_WRITE(WM0_PIPEA_ILK, val | + ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); + DRM_DEBUG_KMS("FIFO watermarks For pipe A -" + " plane %d, " "cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 1 << PIPE_A; + } + + if (g4x_compute_wm0(dev, PIPE_B, + &sandybridge_display_wm_info, latency, + &sandybridge_cursor_wm_info, latency, + &plane_wm, &cursor_wm)) { + val = I915_READ(WM0_PIPEB_ILK); + val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); + I915_WRITE(WM0_PIPEB_ILK, val | + ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); + DRM_DEBUG_KMS("FIFO watermarks For pipe B -" + " plane %d, cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 1 << PIPE_B; + } + + if (g4x_compute_wm0(dev, PIPE_C, + &sandybridge_display_wm_info, latency, + &sandybridge_cursor_wm_info, latency, + &plane_wm, &cursor_wm)) { + val = I915_READ(WM0_PIPEC_IVB); + val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); + I915_WRITE(WM0_PIPEC_IVB, val | + ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); + DRM_DEBUG_KMS("FIFO watermarks For pipe C -" + " plane %d, cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 1 << PIPE_C; + } + + /* + * Calculate and update the self-refresh watermark only when one + * display plane is used. + * + * SNB support 3 levels of watermark. + * + * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, + * and disabled in the descending order + * + */ + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + if (!single_plane_enabled(enabled) || + dev_priv->sprite_scaling_enabled) + return; + enabled = ffs(enabled) - 1; + + /* WM1 */ + if (!ironlake_compute_srwm(dev, 1, enabled, + SNB_READ_WM1_LATENCY() * 500, + &sandybridge_display_srwm_info, + &sandybridge_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM1_LP_ILK, + WM1_LP_SR_EN | + (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* WM2 */ + if (!ironlake_compute_srwm(dev, 2, enabled, + SNB_READ_WM2_LATENCY() * 500, + &sandybridge_display_srwm_info, + &sandybridge_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM2_LP_ILK, + WM2_LP_EN | + (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* WM3, note we have to correct the cursor latency */ + if (!ironlake_compute_srwm(dev, 3, enabled, + SNB_READ_WM3_LATENCY() * 500, + &sandybridge_display_srwm_info, + &sandybridge_cursor_srwm_info, + &fbc_wm, &plane_wm, &ignore_cursor_wm) || + !ironlake_compute_srwm(dev, 3, enabled, + 2 * SNB_READ_WM3_LATENCY() * 500, + &sandybridge_display_srwm_info, + &sandybridge_cursor_srwm_info, + &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM3_LP_ILK, + WM3_LP_EN | + (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); +} + +static void +haswell_update_linetime_wm(struct drm_device *dev, int pipe, + struct drm_display_mode *mode) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 temp; + + temp = I915_READ(PIPE_WM_LINETIME(pipe)); + temp &= ~PIPE_WM_LINETIME_MASK; + + /* The WM are computed with base on how long it takes to fill a single + * row at the given clock rate, multiplied by 8. + * */ + temp |= PIPE_WM_LINETIME_TIME( + ((mode->crtc_hdisplay * 1000) / mode->clock) * 8); + + /* IPS watermarks are only used by pipe A, and are ignored by + * pipes B and C. They are calculated similarly to the common + * linetime values, except that we are using CD clock frequency + * in MHz instead of pixel rate for the division. + * + * This is a placeholder for the IPS watermark calculation code. + */ + + I915_WRITE(PIPE_WM_LINETIME(pipe), temp); +} + +static bool +sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, + uint32_t sprite_width, int pixel_size, + const struct intel_watermark_params *display, + int display_latency_ns, int *sprite_wm) +{ + struct drm_crtc *crtc; + int clock; + int entries, tlb_miss; + + crtc = intel_get_crtc_for_plane(dev, plane); + if (!intel_crtc_active(crtc)) { + *sprite_wm = display->guard_size; + return false; + } + + clock = crtc->mode.clock; + + /* Use the small buffer method to calculate the sprite watermark */ + entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; + tlb_miss = display->fifo_size*display->cacheline_size - + sprite_width * 8; + if (tlb_miss > 0) + entries += tlb_miss; + entries = DIV_ROUND_UP(entries, display->cacheline_size); + *sprite_wm = entries + display->guard_size; + if (*sprite_wm > (int)display->max_wm) + *sprite_wm = display->max_wm; + + return true; +} + +static bool +sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane, + uint32_t sprite_width, int pixel_size, + const struct intel_watermark_params *display, + int latency_ns, int *sprite_wm) +{ + struct drm_crtc *crtc; + unsigned long line_time_us; + int clock; + int line_count, line_size; + int small, large; + int entries; + + if (!latency_ns) { + *sprite_wm = 0; + return false; + } + + crtc = intel_get_crtc_for_plane(dev, plane); + clock = crtc->mode.clock; + if (!clock) { + *sprite_wm = 0; + return false; + } + + line_time_us = (sprite_width * 1000) / clock; + if (!line_time_us) { + *sprite_wm = 0; + return false; + } + + line_count = (latency_ns / line_time_us + 1000) / 1000; + line_size = sprite_width * pixel_size; + + /* Use the minimum of the small and large buffer method for primary */ + small = ((clock * pixel_size / 1000) * latency_ns) / 1000; + large = line_count * line_size; + + entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); + *sprite_wm = entries + display->guard_size; + + return *sprite_wm > 0x3ff ? false : true; +} + +static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, + uint32_t sprite_width, int pixel_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ + u32 val; + int sprite_wm, reg; + int ret; + + switch (pipe) { + case 0: + reg = WM0_PIPEA_ILK; + break; + case 1: + reg = WM0_PIPEB_ILK; + break; + case 2: + reg = WM0_PIPEC_IVB; + break; + default: + return; /* bad pipe */ + } + + ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size, + &sandybridge_display_wm_info, + latency, &sprite_wm); + if (!ret) { + DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n", + pipe); + return; + } + + val = I915_READ(reg); + val &= ~WM0_PIPE_SPRITE_MASK; + I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT)); + DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm); + + + ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, + pixel_size, + &sandybridge_display_srwm_info, + SNB_READ_WM1_LATENCY() * 500, + &sprite_wm); + if (!ret) { + DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n", + pipe); + return; + } + I915_WRITE(WM1S_LP_ILK, sprite_wm); + + /* Only IVB has two more LP watermarks for sprite */ + if (!IS_IVYBRIDGE(dev)) + return; + + ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, + pixel_size, + &sandybridge_display_srwm_info, + SNB_READ_WM2_LATENCY() * 500, + &sprite_wm); + if (!ret) { + DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n", + pipe); + return; + } + I915_WRITE(WM2S_LP_IVB, sprite_wm); + + ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, + pixel_size, + &sandybridge_display_srwm_info, + SNB_READ_WM3_LATENCY() * 500, + &sprite_wm); + if (!ret) { + DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n", + pipe); + return; + } + I915_WRITE(WM3S_LP_IVB, sprite_wm); +} + +/** + * intel_update_watermarks - update FIFO watermark values based on current modes + * + * Calculate watermark values for the various WM regs based on current mode + * and plane configuration. + * + * There are several cases to deal with here: + * - normal (i.e. non-self-refresh) + * - self-refresh (SR) mode + * - lines are large relative to FIFO size (buffer can hold up to 2) + * - lines are small relative to FIFO size (buffer can hold more than 2 + * lines), so need to account for TLB latency + * + * The normal calculation is: + * watermark = dotclock * bytes per pixel * latency + * where latency is platform & configuration dependent (we assume pessimal + * values here). + * + * The SR calculation is: + * watermark = (trunc(latency/line time)+1) * surface width * + * bytes per pixel + * where + * line time = htotal / dotclock + * surface width = hdisplay for normal plane and 64 for cursor + * and latency is assumed to be high, as above. + * + * The final value programmed to the register should always be rounded up, + * and include an extra 2 entries to account for clock crossings. + * + * We don't use the sprite, so we can ignore that. And on Crestline we have + * to set the non-SR watermarks to 8. + */ +void intel_update_watermarks(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->display.update_wm) + dev_priv->display.update_wm(dev); +} + +void intel_update_linetime_watermarks(struct drm_device *dev, + int pipe, struct drm_display_mode *mode) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->display.update_linetime_wm) + dev_priv->display.update_linetime_wm(dev, pipe, mode); +} + +void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, + uint32_t sprite_width, int pixel_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->display.update_sprite_wm) + dev_priv->display.update_sprite_wm(dev, pipe, sprite_width, + pixel_size); +} + +static struct drm_i915_gem_object * +intel_alloc_context_page(struct drm_device *dev) +{ + struct drm_i915_gem_object *ctx; + int ret; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + ctx = i915_gem_alloc_object(dev, 4096); + if (!ctx) { + DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); + return NULL; + } + + ret = i915_gem_object_pin(ctx, 4096, true, false); + if (ret) { + DRM_ERROR("failed to pin power context: %d\n", ret); + goto err_unref; + } + + ret = i915_gem_object_set_to_gtt_domain(ctx, 1); + if (ret) { + DRM_ERROR("failed to set-domain on power context: %d\n", ret); + goto err_unpin; + } + + return ctx; + +err_unpin: + i915_gem_object_unpin(ctx); +err_unref: + drm_gem_object_unreference(&ctx->base); + return NULL; +} + +/** + * Lock protecting IPS related data structures + */ +DEFINE_SPINLOCK(mchdev_lock); + +/* Global for IPS driver to get at the current i915 device. Protected by + * mchdev_lock. */ +static struct drm_i915_private *i915_mch_dev; + +bool ironlake_set_drps(struct drm_device *dev, u8 val) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u16 rgvswctl; + + assert_spin_locked(&mchdev_lock); + + rgvswctl = I915_READ16(MEMSWCTL); + if (rgvswctl & MEMCTL_CMD_STS) { + DRM_DEBUG("gpu busy, RCS change rejected\n"); + return false; /* still busy with another command */ + } + + rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | + (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; + I915_WRITE16(MEMSWCTL, rgvswctl); + POSTING_READ16(MEMSWCTL); + + rgvswctl |= MEMCTL_CMD_STS; + I915_WRITE16(MEMSWCTL, rgvswctl); + + return true; +} + +static void ironlake_enable_drps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 rgvmodectl = I915_READ(MEMMODECTL); + u8 fmax, fmin, fstart, vstart; + + spin_lock_irq(&mchdev_lock); + + /* Enable temp reporting */ + I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); + I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); + + /* 100ms RC evaluation intervals */ + I915_WRITE(RCUPEI, 100000); + I915_WRITE(RCDNEI, 100000); + + /* Set max/min thresholds to 90ms and 80ms respectively */ + I915_WRITE(RCBMAXAVG, 90000); + I915_WRITE(RCBMINAVG, 80000); + + I915_WRITE(MEMIHYST, 1); + + /* Set up min, max, and cur for interrupt handling */ + fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; + fmin = (rgvmodectl & MEMMODE_FMIN_MASK); + fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> + MEMMODE_FSTART_SHIFT; + + vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> + PXVFREQ_PX_SHIFT; + + dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ + dev_priv->ips.fstart = fstart; + + dev_priv->ips.max_delay = fstart; + dev_priv->ips.min_delay = fmin; + dev_priv->ips.cur_delay = fstart; + + DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", + fmax, fmin, fstart); + + I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); + + /* + * Interrupts will be enabled in ironlake_irq_postinstall + */ + + I915_WRITE(VIDSTART, vstart); + POSTING_READ(VIDSTART); + + rgvmodectl |= MEMMODE_SWMODE_EN; + I915_WRITE(MEMMODECTL, rgvmodectl); + + if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) + DRM_ERROR("stuck trying to change perf mode\n"); + mdelay(1); + + ironlake_set_drps(dev, fstart); + + dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + + I915_READ(0x112e0); + dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); + dev_priv->ips.last_count2 = I915_READ(0x112f4); + getrawmonotonic(&dev_priv->ips.last_time2); + + spin_unlock_irq(&mchdev_lock); +} + +static void ironlake_disable_drps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u16 rgvswctl; + + spin_lock_irq(&mchdev_lock); + + rgvswctl = I915_READ16(MEMSWCTL); + + /* Ack interrupts, disable EFC interrupt */ + I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); + I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); + I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); + I915_WRITE(DEIIR, DE_PCU_EVENT); + I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); + + /* Go back to the starting frequency */ + ironlake_set_drps(dev, dev_priv->ips.fstart); + mdelay(1); + rgvswctl |= MEMCTL_CMD_STS; + I915_WRITE(MEMSWCTL, rgvswctl); + mdelay(1); + + spin_unlock_irq(&mchdev_lock); +} + +/* There's a funny hw issue where the hw returns all 0 when reading from + * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value + * ourselves, instead of doing a rmw cycle (which might result in us clearing + * all limits and the gpu stuck at whatever frequency it is at atm). + */ +static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val) +{ + u32 limits; + + limits = 0; + + if (*val >= dev_priv->rps.max_delay) + *val = dev_priv->rps.max_delay; + limits |= dev_priv->rps.max_delay << 24; + + /* Only set the down limit when we've reached the lowest level to avoid + * getting more interrupts, otherwise leave this clear. This prevents a + * race in the hw when coming out of rc6: There's a tiny window where + * the hw runs at the minimal clock before selecting the desired + * frequency, if the down threshold expires in that window we will not + * receive a down interrupt. */ + if (*val <= dev_priv->rps.min_delay) { + *val = dev_priv->rps.min_delay; + limits |= dev_priv->rps.min_delay << 16; + } + + return limits; +} + +void gen6_set_rps(struct drm_device *dev, u8 val) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 limits = gen6_rps_limits(dev_priv, &val); + + WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(val > dev_priv->rps.max_delay); + WARN_ON(val < dev_priv->rps.min_delay); + + if (val == dev_priv->rps.cur_delay) + return; + + if (IS_HASWELL(dev)) + I915_WRITE(GEN6_RPNSWREQ, + HSW_FREQUENCY(val)); + else + I915_WRITE(GEN6_RPNSWREQ, + GEN6_FREQUENCY(val) | + GEN6_OFFSET(0) | + GEN6_AGGRESSIVE_TURBO); + + /* Make sure we continue to get interrupts + * until we hit the minimum or maximum frequencies. + */ + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); + + POSTING_READ(GEN6_RPNSWREQ); + + dev_priv->rps.cur_delay = val; + + trace_intel_gpu_freq_change(val * 50); +} + +static void gen6_disable_rps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(GEN6_RC_CONTROL, 0); + I915_WRITE(GEN6_RPNSWREQ, 1 << 31); + I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); + I915_WRITE(GEN6_PMIER, 0); + /* Complete PM interrupt masking here doesn't race with the rps work + * item again unmasking PM interrupts because that is using a different + * register (PMIMR) to mask PM interrupts. The only risk is in leaving + * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ + + spin_lock_irq(&dev_priv->rps.lock); + dev_priv->rps.pm_iir = 0; + spin_unlock_irq(&dev_priv->rps.lock); + + I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); +} + +int intel_enable_rc6(const struct drm_device *dev) +{ + /* Respect the kernel parameter if it is set */ + if (i915_enable_rc6 >= 0) + return i915_enable_rc6; + + /* Disable RC6 on Ironlake */ + if (INTEL_INFO(dev)->gen == 5) + return 0; + + if (IS_HASWELL(dev)) { + DRM_DEBUG_DRIVER("Haswell: only RC6 available\n"); + return INTEL_RC6_ENABLE; + } + + /* snb/ivb have more than one rc6 state. */ + if (INTEL_INFO(dev)->gen == 6) { + DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); + return INTEL_RC6_ENABLE; + } + + DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); + return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); +} + +static void gen6_enable_rps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + u32 rp_state_cap; + u32 gt_perf_status; + u32 rc6vids, pcu_mbox, rc6_mask = 0; + u32 gtfifodbg; + int rc6_mode; + int i, ret; + + WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + + /* Here begins a magic sequence of register writes to enable + * auto-downclocking. + * + * Perhaps there might be some value in exposing these to + * userspace... + */ + I915_WRITE(GEN6_RC_STATE, 0); + + /* Clear the DBG now so we don't confuse earlier errors */ + if ((gtfifodbg = I915_READ(GTFIFODBG))) { + DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); + I915_WRITE(GTFIFODBG, gtfifodbg); + } + + gen6_gt_force_wake_get(dev_priv); + + rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); + + /* In units of 50MHz */ + dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff; + dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16; + dev_priv->rps.cur_delay = 0; + + /* disable the counters and set deterministic thresholds */ + I915_WRITE(GEN6_RC_CONTROL, 0); + + I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); + I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); + I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); + I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); + I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); + + for_each_ring(ring, dev_priv, i) + I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); + + I915_WRITE(GEN6_RC_SLEEP, 0); + I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); + I915_WRITE(GEN6_RC6_THRESHOLD, 50000); + I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); + I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ + + /* Check if we are enabling RC6 */ + rc6_mode = intel_enable_rc6(dev_priv->dev); + if (rc6_mode & INTEL_RC6_ENABLE) + rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; + + /* We don't use those on Haswell */ + if (!IS_HASWELL(dev)) { + if (rc6_mode & INTEL_RC6p_ENABLE) + rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; + + if (rc6_mode & INTEL_RC6pp_ENABLE) + rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; + } + + DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", + (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", + (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", + (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); + + I915_WRITE(GEN6_RC_CONTROL, + rc6_mask | + GEN6_RC_CTL_EI_MODE(1) | + GEN6_RC_CTL_HW_ENABLE); + + if (IS_HASWELL(dev)) { + I915_WRITE(GEN6_RPNSWREQ, + HSW_FREQUENCY(10)); + I915_WRITE(GEN6_RC_VIDEO_FREQ, + HSW_FREQUENCY(12)); + } else { + I915_WRITE(GEN6_RPNSWREQ, + GEN6_FREQUENCY(10) | + GEN6_OFFSET(0) | + GEN6_AGGRESSIVE_TURBO); + I915_WRITE(GEN6_RC_VIDEO_FREQ, + GEN6_FREQUENCY(12)); + } + + I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, + dev_priv->rps.max_delay << 24 | + dev_priv->rps.min_delay << 16); + + I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); + I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); + I915_WRITE(GEN6_RP_UP_EI, 66000); + I915_WRITE(GEN6_RP_DOWN_EI, 350000); + + I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); + I915_WRITE(GEN6_RP_CONTROL, + GEN6_RP_MEDIA_TURBO | + GEN6_RP_MEDIA_HW_NORMAL_MODE | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT)); + + ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); + if (!ret) { + pcu_mbox = 0; + ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); + if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ + DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n", + (dev_priv->rps.max_delay & 0xff) * 50, + (pcu_mbox & 0xff) * 50); + dev_priv->rps.hw_max = pcu_mbox & 0xff; + } + } else { + DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); + } + + gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); + + /* requires MSI enabled */ + I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS); + spin_lock_irq(&dev_priv->rps.lock); + WARN_ON(dev_priv->rps.pm_iir != 0); + I915_WRITE(GEN6_PMIMR, 0); + spin_unlock_irq(&dev_priv->rps.lock); + /* enable all PM interrupts */ + I915_WRITE(GEN6_PMINTRMSK, 0); + + rc6vids = 0; + ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); + if (IS_GEN6(dev) && ret) { + DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); + } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { + DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", + GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); + rc6vids &= 0xffff00; + rc6vids |= GEN6_ENCODE_RC6_VID(450); + ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); + if (ret) + DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); + } + + gen6_gt_force_wake_put(dev_priv); +} + +static void gen6_update_ring_freq(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int min_freq = 15; + unsigned int gpu_freq; + unsigned int max_ia_freq, min_ring_freq; + int scaling_factor = 180; + + WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + + max_ia_freq = cpufreq_quick_get_max(0); + /* + * Default to measured freq if none found, PCU will ensure we don't go + * over + */ + if (!max_ia_freq) + max_ia_freq = tsc_khz; + + /* Convert from kHz to MHz */ + max_ia_freq /= 1000; + + min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK); + /* convert DDR frequency from units of 133.3MHz to bandwidth */ + min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3; + + /* + * For each potential GPU frequency, load a ring frequency we'd like + * to use for memory access. We do this by specifying the IA frequency + * the PCU should use as a reference to determine the ring frequency. + */ + for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay; + gpu_freq--) { + int diff = dev_priv->rps.max_delay - gpu_freq; + unsigned int ia_freq = 0, ring_freq = 0; + + if (IS_HASWELL(dev)) { + ring_freq = (gpu_freq * 5 + 3) / 4; + ring_freq = max(min_ring_freq, ring_freq); + /* leave ia_freq as the default, chosen by cpufreq */ + } else { + /* On older processors, there is no separate ring + * clock domain, so in order to boost the bandwidth + * of the ring, we need to upclock the CPU (ia_freq). + * + * For GPU frequencies less than 750MHz, + * just use the lowest ring freq. + */ + if (gpu_freq < min_freq) + ia_freq = 800; + else + ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); + ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); + } + + sandybridge_pcode_write(dev_priv, + GEN6_PCODE_WRITE_MIN_FREQ_TABLE, + ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | + ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | + gpu_freq); + } +} + +void ironlake_teardown_rc6(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->ips.renderctx) { + i915_gem_object_unpin(dev_priv->ips.renderctx); + drm_gem_object_unreference(&dev_priv->ips.renderctx->base); + dev_priv->ips.renderctx = NULL; + } + + if (dev_priv->ips.pwrctx) { + i915_gem_object_unpin(dev_priv->ips.pwrctx); + drm_gem_object_unreference(&dev_priv->ips.pwrctx->base); + dev_priv->ips.pwrctx = NULL; + } +} + +static void ironlake_disable_rc6(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (I915_READ(PWRCTXA)) { + /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ + I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); + wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), + 50); + + I915_WRITE(PWRCTXA, 0); + POSTING_READ(PWRCTXA); + + I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); + POSTING_READ(RSTDBYCTL); + } +} + +static int ironlake_setup_rc6(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->ips.renderctx == NULL) + dev_priv->ips.renderctx = intel_alloc_context_page(dev); + if (!dev_priv->ips.renderctx) + return -ENOMEM; + + if (dev_priv->ips.pwrctx == NULL) + dev_priv->ips.pwrctx = intel_alloc_context_page(dev); + if (!dev_priv->ips.pwrctx) { + ironlake_teardown_rc6(dev); + return -ENOMEM; + } + + return 0; +} + +static void ironlake_enable_rc6(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + bool was_interruptible; + int ret; + + /* rc6 disabled by default due to repeated reports of hanging during + * boot and resume. + */ + if (!intel_enable_rc6(dev)) + return; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + ret = ironlake_setup_rc6(dev); + if (ret) + return; + + was_interruptible = dev_priv->mm.interruptible; + dev_priv->mm.interruptible = false; + + /* + * GPU can automatically power down the render unit if given a page + * to save state. + */ + ret = intel_ring_begin(ring, 6); + if (ret) { + ironlake_teardown_rc6(dev); + dev_priv->mm.interruptible = was_interruptible; + return; + } + + intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); + intel_ring_emit(ring, MI_SET_CONTEXT); + intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset | + MI_MM_SPACE_GTT | + MI_SAVE_EXT_STATE_EN | + MI_RESTORE_EXT_STATE_EN | + MI_RESTORE_INHIBIT); + intel_ring_emit(ring, MI_SUSPEND_FLUSH); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_FLUSH); + intel_ring_advance(ring); + + /* + * Wait for the command parser to advance past MI_SET_CONTEXT. The HW + * does an implicit flush, combined with MI_FLUSH above, it should be + * safe to assume that renderctx is valid + */ + ret = intel_ring_idle(ring); + dev_priv->mm.interruptible = was_interruptible; + if (ret) { + DRM_ERROR("failed to enable ironlake power savings\n"); + ironlake_teardown_rc6(dev); + return; + } + + I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN); + I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); +} + +static unsigned long intel_pxfreq(u32 vidfreq) +{ + unsigned long freq; + int div = (vidfreq & 0x3f0000) >> 16; + int post = (vidfreq & 0x3000) >> 12; + int pre = (vidfreq & 0x7); + + if (!pre) + return 0; + + freq = ((div * 133333) / ((1<ips.last_time1; + + /* Prevent division-by-zero if we are asking too fast. + * Also, we don't get interesting results if we are polling + * faster than once in 10ms, so just return the saved value + * in such cases. + */ + if (diff1 <= 10) + return dev_priv->ips.chipset_power; + + count1 = I915_READ(DMIEC); + count2 = I915_READ(DDREC); + count3 = I915_READ(CSIEC); + + total_count = count1 + count2 + count3; + + /* FIXME: handle per-counter overflow */ + if (total_count < dev_priv->ips.last_count1) { + diff = ~0UL - dev_priv->ips.last_count1; + diff += total_count; + } else { + diff = total_count - dev_priv->ips.last_count1; + } + + for (i = 0; i < ARRAY_SIZE(cparams); i++) { + if (cparams[i].i == dev_priv->ips.c_m && + cparams[i].t == dev_priv->ips.r_t) { + m = cparams[i].m; + c = cparams[i].c; + break; + } + } + + diff = div_u64(diff, diff1); + ret = ((m * diff) + c); + ret = div_u64(ret, 10); + + dev_priv->ips.last_count1 = total_count; + dev_priv->ips.last_time1 = now; + + dev_priv->ips.chipset_power = ret; + + return ret; +} + +unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) +{ + unsigned long val; + + if (dev_priv->info->gen != 5) + return 0; + + spin_lock_irq(&mchdev_lock); + + val = __i915_chipset_val(dev_priv); + + spin_unlock_irq(&mchdev_lock); + + return val; +} + +unsigned long i915_mch_val(struct drm_i915_private *dev_priv) +{ + unsigned long m, x, b; + u32 tsfs; + + tsfs = I915_READ(TSFS); + + m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); + x = I915_READ8(TR1); + + b = tsfs & TSFS_INTR_MASK; + + return ((m * x) / 127) - b; +} + +static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) +{ + static const struct v_table { + u16 vd; /* in .1 mil */ + u16 vm; /* in .1 mil */ + } v_table[] = { + { 0, 0, }, + { 375, 0, }, + { 500, 0, }, + { 625, 0, }, + { 750, 0, }, + { 875, 0, }, + { 1000, 0, }, + { 1125, 0, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4250, 3125, }, + { 4375, 3250, }, + { 4500, 3375, }, + { 4625, 3500, }, + { 4750, 3625, }, + { 4875, 3750, }, + { 5000, 3875, }, + { 5125, 4000, }, + { 5250, 4125, }, + { 5375, 4250, }, + { 5500, 4375, }, + { 5625, 4500, }, + { 5750, 4625, }, + { 5875, 4750, }, + { 6000, 4875, }, + { 6125, 5000, }, + { 6250, 5125, }, + { 6375, 5250, }, + { 6500, 5375, }, + { 6625, 5500, }, + { 6750, 5625, }, + { 6875, 5750, }, + { 7000, 5875, }, + { 7125, 6000, }, + { 7250, 6125, }, + { 7375, 6250, }, + { 7500, 6375, }, + { 7625, 6500, }, + { 7750, 6625, }, + { 7875, 6750, }, + { 8000, 6875, }, + { 8125, 7000, }, + { 8250, 7125, }, + { 8375, 7250, }, + { 8500, 7375, }, + { 8625, 7500, }, + { 8750, 7625, }, + { 8875, 7750, }, + { 9000, 7875, }, + { 9125, 8000, }, + { 9250, 8125, }, + { 9375, 8250, }, + { 9500, 8375, }, + { 9625, 8500, }, + { 9750, 8625, }, + { 9875, 8750, }, + { 10000, 8875, }, + { 10125, 9000, }, + { 10250, 9125, }, + { 10375, 9250, }, + { 10500, 9375, }, + { 10625, 9500, }, + { 10750, 9625, }, + { 10875, 9750, }, + { 11000, 9875, }, + { 11125, 10000, }, + { 11250, 10125, }, + { 11375, 10250, }, + { 11500, 10375, }, + { 11625, 10500, }, + { 11750, 10625, }, + { 11875, 10750, }, + { 12000, 10875, }, + { 12125, 11000, }, + { 12250, 11125, }, + { 12375, 11250, }, + { 12500, 11375, }, + { 12625, 11500, }, + { 12750, 11625, }, + { 12875, 11750, }, + { 13000, 11875, }, + { 13125, 12000, }, + { 13250, 12125, }, + { 13375, 12250, }, + { 13500, 12375, }, + { 13625, 12500, }, + { 13750, 12625, }, + { 13875, 12750, }, + { 14000, 12875, }, + { 14125, 13000, }, + { 14250, 13125, }, + { 14375, 13250, }, + { 14500, 13375, }, + { 14625, 13500, }, + { 14750, 13625, }, + { 14875, 13750, }, + { 15000, 13875, }, + { 15125, 14000, }, + { 15250, 14125, }, + { 15375, 14250, }, + { 15500, 14375, }, + { 15625, 14500, }, + { 15750, 14625, }, + { 15875, 14750, }, + { 16000, 14875, }, + { 16125, 15000, }, + }; + if (dev_priv->info->is_mobile) + return v_table[pxvid].vm; + else + return v_table[pxvid].vd; +} + +static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) +{ + struct timespec now, diff1; + u64 diff; + unsigned long diffms; + u32 count; + + assert_spin_locked(&mchdev_lock); + + getrawmonotonic(&now); + diff1 = timespec_sub(now, dev_priv->ips.last_time2); + + /* Don't divide by 0 */ + diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; + if (!diffms) + return; + + count = I915_READ(GFXEC); + + if (count < dev_priv->ips.last_count2) { + diff = ~0UL - dev_priv->ips.last_count2; + diff += count; + } else { + diff = count - dev_priv->ips.last_count2; + } + + dev_priv->ips.last_count2 = count; + dev_priv->ips.last_time2 = now; + + /* More magic constants... */ + diff = diff * 1181; + diff = div_u64(diff, diffms * 10); + dev_priv->ips.gfx_power = diff; +} + +void i915_update_gfx_val(struct drm_i915_private *dev_priv) +{ + if (dev_priv->info->gen != 5) + return; + + spin_lock_irq(&mchdev_lock); + + __i915_update_gfx_val(dev_priv); + + spin_unlock_irq(&mchdev_lock); +} + +static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) +{ + unsigned long t, corr, state1, corr2, state2; + u32 pxvid, ext_v; + + assert_spin_locked(&mchdev_lock); + + pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4)); + pxvid = (pxvid >> 24) & 0x7f; + ext_v = pvid_to_extvid(dev_priv, pxvid); + + state1 = ext_v; + + t = i915_mch_val(dev_priv); + + /* Revel in the empirically derived constants */ + + /* Correction factor in 1/100000 units */ + if (t > 80) + corr = ((t * 2349) + 135940); + else if (t >= 50) + corr = ((t * 964) + 29317); + else /* < 50 */ + corr = ((t * 301) + 1004); + + corr = corr * ((150142 * state1) / 10000 - 78642); + corr /= 100000; + corr2 = (corr * dev_priv->ips.corr); + + state2 = (corr2 * state1) / 10000; + state2 /= 100; /* convert to mW */ + + __i915_update_gfx_val(dev_priv); + + return dev_priv->ips.gfx_power + state2; +} + +unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) +{ + unsigned long val; + + if (dev_priv->info->gen != 5) + return 0; + + spin_lock_irq(&mchdev_lock); + + val = __i915_gfx_val(dev_priv); + + spin_unlock_irq(&mchdev_lock); + + return val; +} + +/** + * i915_read_mch_val - return value for IPS use + * + * Calculate and return a value for the IPS driver to use when deciding whether + * we have thermal and power headroom to increase CPU or GPU power budget. + */ +unsigned long i915_read_mch_val(void) +{ + struct drm_i915_private *dev_priv; + unsigned long chipset_val, graphics_val, ret = 0; + + spin_lock_irq(&mchdev_lock); + if (!i915_mch_dev) + goto out_unlock; + dev_priv = i915_mch_dev; + + chipset_val = __i915_chipset_val(dev_priv); + graphics_val = __i915_gfx_val(dev_priv); + + ret = chipset_val + graphics_val; + +out_unlock: + spin_unlock_irq(&mchdev_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(i915_read_mch_val); + +/** + * i915_gpu_raise - raise GPU frequency limit + * + * Raise the limit; IPS indicates we have thermal headroom. + */ +bool i915_gpu_raise(void) +{ + struct drm_i915_private *dev_priv; + bool ret = true; + + spin_lock_irq(&mchdev_lock); + if (!i915_mch_dev) { + ret = false; + goto out_unlock; + } + dev_priv = i915_mch_dev; + + if (dev_priv->ips.max_delay > dev_priv->ips.fmax) + dev_priv->ips.max_delay--; + +out_unlock: + spin_unlock_irq(&mchdev_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(i915_gpu_raise); + +/** + * i915_gpu_lower - lower GPU frequency limit + * + * IPS indicates we're close to a thermal limit, so throttle back the GPU + * frequency maximum. + */ +bool i915_gpu_lower(void) +{ + struct drm_i915_private *dev_priv; + bool ret = true; + + spin_lock_irq(&mchdev_lock); + if (!i915_mch_dev) { + ret = false; + goto out_unlock; + } + dev_priv = i915_mch_dev; + + if (dev_priv->ips.max_delay < dev_priv->ips.min_delay) + dev_priv->ips.max_delay++; + +out_unlock: + spin_unlock_irq(&mchdev_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(i915_gpu_lower); + +/** + * i915_gpu_busy - indicate GPU business to IPS + * + * Tell the IPS driver whether or not the GPU is busy. + */ +bool i915_gpu_busy(void) +{ + struct drm_i915_private *dev_priv; + struct intel_ring_buffer *ring; + bool ret = false; + int i; + + spin_lock_irq(&mchdev_lock); + if (!i915_mch_dev) + goto out_unlock; + dev_priv = i915_mch_dev; + + for_each_ring(ring, dev_priv, i) + ret |= !list_empty(&ring->request_list); + +out_unlock: + spin_unlock_irq(&mchdev_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(i915_gpu_busy); + +/** + * i915_gpu_turbo_disable - disable graphics turbo + * + * Disable graphics turbo by resetting the max frequency and setting the + * current frequency to the default. + */ +bool i915_gpu_turbo_disable(void) +{ + struct drm_i915_private *dev_priv; + bool ret = true; + + spin_lock_irq(&mchdev_lock); + if (!i915_mch_dev) { + ret = false; + goto out_unlock; + } + dev_priv = i915_mch_dev; + + dev_priv->ips.max_delay = dev_priv->ips.fstart; + + if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart)) + ret = false; + +out_unlock: + spin_unlock_irq(&mchdev_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); + +/** + * Tells the intel_ips driver that the i915 driver is now loaded, if + * IPS got loaded first. + * + * This awkward dance is so that neither module has to depend on the + * other in order for IPS to do the appropriate communication of + * GPU turbo limits to i915. + */ +static void +ips_ping_for_i915_load(void) +{ + void (*link)(void); + + link = symbol_get(ips_link_to_i915_driver); + if (link) { + link(); + symbol_put(ips_link_to_i915_driver); + } +} + +void intel_gpu_ips_init(struct drm_i915_private *dev_priv) +{ + /* We only register the i915 ips part with intel-ips once everything is + * set up, to avoid intel-ips sneaking in and reading bogus values. */ + spin_lock_irq(&mchdev_lock); + i915_mch_dev = dev_priv; + spin_unlock_irq(&mchdev_lock); + + ips_ping_for_i915_load(); +} + +void intel_gpu_ips_teardown(void) +{ + spin_lock_irq(&mchdev_lock); + i915_mch_dev = NULL; + spin_unlock_irq(&mchdev_lock); +} +static void intel_init_emon(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 lcfuse; + u8 pxw[16]; + int i; + + /* Disable to program */ + I915_WRITE(ECR, 0); + POSTING_READ(ECR); + + /* Program energy weights for various events */ + I915_WRITE(SDEW, 0x15040d00); + I915_WRITE(CSIEW0, 0x007f0000); + I915_WRITE(CSIEW1, 0x1e220004); + I915_WRITE(CSIEW2, 0x04000004); + + for (i = 0; i < 5; i++) + I915_WRITE(PEW + (i * 4), 0); + for (i = 0; i < 3; i++) + I915_WRITE(DEW + (i * 4), 0); + + /* Program P-state weights to account for frequency power adjustment */ + for (i = 0; i < 16; i++) { + u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); + unsigned long freq = intel_pxfreq(pxvidfreq); + unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> + PXVFREQ_PX_SHIFT; + unsigned long val; + + val = vid * vid; + val *= (freq / 1000); + val *= 255; + val /= (127*127*900); + if (val > 0xff) + DRM_ERROR("bad pxval: %ld\n", val); + pxw[i] = val; + } + /* Render standby states get 0 weight */ + pxw[14] = 0; + pxw[15] = 0; + + for (i = 0; i < 4; i++) { + u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | + (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); + I915_WRITE(PXW + (i * 4), val); + } + + /* Adjust magic regs to magic values (more experimental results) */ + I915_WRITE(OGW0, 0); + I915_WRITE(OGW1, 0); + I915_WRITE(EG0, 0x00007f00); + I915_WRITE(EG1, 0x0000000e); + I915_WRITE(EG2, 0x000e0000); + I915_WRITE(EG3, 0x68000300); + I915_WRITE(EG4, 0x42000000); + I915_WRITE(EG5, 0x00140031); + I915_WRITE(EG6, 0); + I915_WRITE(EG7, 0); + + for (i = 0; i < 8; i++) + I915_WRITE(PXWL + (i * 4), 0); + + /* Enable PMON + select events */ + I915_WRITE(ECR, 0x80000019); + + lcfuse = I915_READ(LCFUSE02); + + dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); +} + +void intel_disable_gt_powersave(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_IRONLAKE_M(dev)) { + ironlake_disable_drps(dev); + ironlake_disable_rc6(dev); + } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) { + cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); + mutex_lock(&dev_priv->rps.hw_lock); + gen6_disable_rps(dev); + mutex_unlock(&dev_priv->rps.hw_lock); + } +} + +static void intel_gen6_powersave_work(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, + rps.delayed_resume_work.work); + struct drm_device *dev = dev_priv->dev; + + mutex_lock(&dev_priv->rps.hw_lock); + gen6_enable_rps(dev); + gen6_update_ring_freq(dev); + mutex_unlock(&dev_priv->rps.hw_lock); +} + +void intel_enable_gt_powersave(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_IRONLAKE_M(dev)) { + ironlake_enable_drps(dev); + ironlake_enable_rc6(dev); + intel_init_emon(dev); + } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { + /* + * PCU communication is slow and this doesn't need to be + * done at any specific time, so do this out of our fast path + * to make resume and init faster. + */ + schedule_delayed_work(&dev_priv->rps.delayed_resume_work, + round_jiffies_up_relative(HZ)); + } +} + +static void ibx_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* + * On Ibex Peak and Cougar Point, we need to disable clock + * gating for the panel power sequencer or it will fail to + * start up when no ports are active. + */ + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); +} + +static void ironlake_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; + + /* Required for FBC */ + dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | + ILK_DPFCUNIT_CLOCK_GATE_DISABLE | + ILK_DPFDUNIT_CLOCK_GATE_ENABLE; + + I915_WRITE(PCH_3DCGDIS0, + MARIUNIT_CLOCK_GATE_DISABLE | + SVSMUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(PCH_3DCGDIS1, + VFMUNIT_CLOCK_GATE_DISABLE); + + /* + * According to the spec the following bits should be set in + * order to enable memory self-refresh + * The bit 22/21 of 0x42004 + * The bit 5 of 0x42020 + * The bit 15 of 0x45000 + */ + I915_WRITE(ILK_DISPLAY_CHICKEN2, + (I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_DPARB_GATE | ILK_VSDPFD_FULL)); + dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; + I915_WRITE(DISP_ARB_CTL, + (I915_READ(DISP_ARB_CTL) | + DISP_FBC_WM_DIS)); + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + /* + * Based on the document from hardware guys the following bits + * should be set unconditionally in order to enable FBC. + * The bit 22 of 0x42000 + * The bit 22 of 0x42004 + * The bit 7,8,9 of 0x42020. + */ + if (IS_IRONLAKE_M(dev)) { + I915_WRITE(ILK_DISPLAY_CHICKEN1, + I915_READ(ILK_DISPLAY_CHICKEN1) | + ILK_FBCQ_DIS); + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_DPARB_GATE); + } + + I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); + + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_ELPIN_409_SELECT); + I915_WRITE(_3D_CHICKEN2, + _3D_CHICKEN2_WM_READ_PIPELINED << 16 | + _3D_CHICKEN2_WM_READ_PIPELINED); + + /* WaDisableRenderCachePipelinedFlush */ + I915_WRITE(CACHE_MODE_0, + _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); + + ibx_init_clock_gating(dev); +} + +static void cpt_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + uint32_t val; + + /* + * On Ibex Peak and Cougar Point, we need to disable clock + * gating for the panel power sequencer or it will fail to + * start up when no ports are active. + */ + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | + DPLS_EDP_PPS_FIX_DIS); + /* The below fixes the weird display corruption, a few pixels shifted + * downward, on (only) LVDS of some HP laptops with IVY. + */ + for_each_pipe(pipe) { + val = I915_READ(TRANS_CHICKEN2(pipe)); + val |= TRANS_CHICKEN2_TIMING_OVERRIDE; + val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; + if (dev_priv->fdi_rx_polarity_inverted) + val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; + val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; + val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; + val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH; + I915_WRITE(TRANS_CHICKEN2(pipe), val); + } + /* WADP0ClockGatingDisable */ + for_each_pipe(pipe) { + I915_WRITE(TRANS_CHICKEN1(pipe), + TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); + } +} + +static void gen6_check_mch_setup(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t tmp; + + tmp = I915_READ(MCH_SSKPD); + if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) { + DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp); + DRM_INFO("This can cause pipe underruns and display issues.\n"); + DRM_INFO("Please upgrade your BIOS to fix this.\n"); + } +} + +static void gen6_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; + + I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); + + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_ELPIN_409_SELECT); + + /* WaDisableHiZPlanesWhenMSAAEnabled */ + I915_WRITE(_3D_CHICKEN, + _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); + + /* WaSetupGtModeTdRowDispatch */ + if (IS_SNB_GT1(dev)) + I915_WRITE(GEN6_GT_MODE, + _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); + + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + I915_WRITE(CACHE_MODE_0, + _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); + + I915_WRITE(GEN6_UCGCTL1, + I915_READ(GEN6_UCGCTL1) | + GEN6_BLBUNIT_CLOCK_GATE_DISABLE | + GEN6_CSUNIT_CLOCK_GATE_DISABLE); + + /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock + * gating disable must be set. Failure to set it results in + * flickering pixels due to Z write ordering failures after + * some amount of runtime in the Mesa "fire" demo, and Unigine + * Sanctuary and Tropics, and apparently anything else with + * alpha test or pixel discard. + * + * According to the spec, bit 11 (RCCUNIT) must also be set, + * but we didn't debug actual testcases to find it out. + * + * Also apply WaDisableVDSUnitClockGating and + * WaDisableRCPBUnitClockGating. + */ + I915_WRITE(GEN6_UCGCTL2, + GEN7_VDSUNIT_CLOCK_GATE_DISABLE | + GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | + GEN6_RCCUNIT_CLOCK_GATE_DISABLE); + + /* Bspec says we need to always set all mask bits. */ + I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) | + _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL); + + /* + * According to the spec the following bits should be + * set in order to enable memory self-refresh and fbc: + * The bit21 and bit22 of 0x42000 + * The bit21 and bit22 of 0x42004 + * The bit5 and bit7 of 0x42020 + * The bit14 of 0x70180 + * The bit14 of 0x71180 + */ + I915_WRITE(ILK_DISPLAY_CHICKEN1, + I915_READ(ILK_DISPLAY_CHICKEN1) | + ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_DPARB_GATE | ILK_VSDPFD_FULL); + I915_WRITE(ILK_DSPCLK_GATE_D, + I915_READ(ILK_DSPCLK_GATE_D) | + ILK_DPARBUNIT_CLOCK_GATE_ENABLE | + ILK_DPFDUNIT_CLOCK_GATE_ENABLE); + + /* WaMbcDriverBootEnable */ + I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | + GEN6_MBCTL_ENABLE_BOOT_FETCH); + + for_each_pipe(pipe) { + I915_WRITE(DSPCNTR(pipe), + I915_READ(DSPCNTR(pipe)) | + DISPPLANE_TRICKLE_FEED_DISABLE); + intel_flush_display_plane(dev_priv, pipe); + } + + /* The default value should be 0x200 according to docs, but the two + * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */ + I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff)); + I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI)); + + cpt_init_clock_gating(dev); + + gen6_check_mch_setup(dev); +} + +static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) +{ + uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); + + reg &= ~GEN7_FF_SCHED_MASK; + reg |= GEN7_FF_TS_SCHED_HW; + reg |= GEN7_FF_VS_SCHED_HW; + reg |= GEN7_FF_DS_SCHED_HW; + + /* WaVSRefCountFullforceMissDisable */ + if (IS_HASWELL(dev_priv->dev)) + reg &= ~GEN7_FF_VS_REF_CNT_FFME; + + I915_WRITE(GEN7_FF_THREAD_MODE, reg); +} + +static void lpt_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* + * TODO: this bit should only be enabled when really needed, then + * disabled when not needed anymore in order to save power. + */ + if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) + I915_WRITE(SOUTH_DSPCLK_GATE_D, + I915_READ(SOUTH_DSPCLK_GATE_D) | + PCH_LP_PARTITION_LEVEL_DISABLE); +} + +static void haswell_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. + * This implements the WaDisableRCZUnitClockGating workaround. + */ + I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); + + /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ + I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, + GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); + + /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ + I915_WRITE(GEN7_L3CNTLREG1, + GEN7_WA_FOR_GEN7_L3_CONTROL); + I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, + GEN7_WA_L3_CHICKEN_MODE); + + /* This is required by WaCatErrorRejectionIssue */ + I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, + I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | + GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); + + for_each_pipe(pipe) { + I915_WRITE(DSPCNTR(pipe), + I915_READ(DSPCNTR(pipe)) | + DISPPLANE_TRICKLE_FEED_DISABLE); + intel_flush_display_plane(dev_priv, pipe); + } + + gen7_setup_fixed_func_scheduler(dev_priv); + + /* WaDisable4x2SubspanOptimization */ + I915_WRITE(CACHE_MODE_1, + _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); + + /* WaMbcDriverBootEnable */ + I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | + GEN6_MBCTL_ENABLE_BOOT_FETCH); + + /* WaSwitchSolVfFArbitrationPriority */ + I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); + + /* XXX: This is a workaround for early silicon revisions and should be + * removed later. + */ + I915_WRITE(WM_DBG, + I915_READ(WM_DBG) | + WM_DBG_DISALLOW_MULTIPLE_LP | + WM_DBG_DISALLOW_SPRITE | + WM_DBG_DISALLOW_MAXFIFO); + + lpt_init_clock_gating(dev); +} + +static void ivybridge_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + uint32_t snpcr; + + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); + + /* WaDisableEarlyCull */ + I915_WRITE(_3D_CHICKEN3, + _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); + + /* WaDisableBackToBackFlipFix */ + I915_WRITE(IVB_CHICKEN3, + CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | + CHICKEN3_DGMG_DONE_FIX_DISABLE); + + /* WaDisablePSDDualDispatchEnable */ + if (IS_IVB_GT1(dev)) + I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, + _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); + else + I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2, + _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); + + /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ + I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, + GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); + + /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ + I915_WRITE(GEN7_L3CNTLREG1, + GEN7_WA_FOR_GEN7_L3_CONTROL); + I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, + GEN7_WA_L3_CHICKEN_MODE); + if (IS_IVB_GT1(dev)) + I915_WRITE(GEN7_ROW_CHICKEN2, + _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); + else + I915_WRITE(GEN7_ROW_CHICKEN2_GT2, + _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); + + + /* WaForceL3Serialization */ + I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & + ~L3SQ_URB_READ_CAM_MATCH_DISABLE); + + /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock + * gating disable must be set. Failure to set it results in + * flickering pixels due to Z write ordering failures after + * some amount of runtime in the Mesa "fire" demo, and Unigine + * Sanctuary and Tropics, and apparently anything else with + * alpha test or pixel discard. + * + * According to the spec, bit 11 (RCCUNIT) must also be set, + * but we didn't debug actual testcases to find it out. + * + * According to the spec, bit 13 (RCZUNIT) must be set on IVB. + * This implements the WaDisableRCZUnitClockGating workaround. + */ + I915_WRITE(GEN6_UCGCTL2, + GEN6_RCZUNIT_CLOCK_GATE_DISABLE | + GEN6_RCCUNIT_CLOCK_GATE_DISABLE); + + /* This is required by WaCatErrorRejectionIssue */ + I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, + I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | + GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); + + for_each_pipe(pipe) { + I915_WRITE(DSPCNTR(pipe), + I915_READ(DSPCNTR(pipe)) | + DISPPLANE_TRICKLE_FEED_DISABLE); + intel_flush_display_plane(dev_priv, pipe); + } + + /* WaMbcDriverBootEnable */ + I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | + GEN6_MBCTL_ENABLE_BOOT_FETCH); + + gen7_setup_fixed_func_scheduler(dev_priv); + + /* WaDisable4x2SubspanOptimization */ + I915_WRITE(CACHE_MODE_1, + _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); + + snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); + snpcr &= ~GEN6_MBC_SNPCR_MASK; + snpcr |= GEN6_MBC_SNPCR_MED; + I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); + + if (!HAS_PCH_NOP(dev)) + cpt_init_clock_gating(dev); + + gen6_check_mch_setup(dev); +} + +static void valleyview_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); + + /* WaDisableEarlyCull */ + I915_WRITE(_3D_CHICKEN3, + _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); + + /* WaDisableBackToBackFlipFix */ + I915_WRITE(IVB_CHICKEN3, + CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | + CHICKEN3_DGMG_DONE_FIX_DISABLE); + + /* WaDisablePSDDualDispatchEnable */ + I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, + _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | + GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); + + /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ + I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, + GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); + + /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ + I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS); + I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); + + /* WaForceL3Serialization */ + I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & + ~L3SQ_URB_READ_CAM_MATCH_DISABLE); + + /* WaDisableDopClockGating */ + I915_WRITE(GEN7_ROW_CHICKEN2, + _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); + + /* WaForceL3Serialization */ + I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & + ~L3SQ_URB_READ_CAM_MATCH_DISABLE); + + /* This is required by WaCatErrorRejectionIssue */ + I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, + I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | + GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); + + /* WaMbcDriverBootEnable */ + I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | + GEN6_MBCTL_ENABLE_BOOT_FETCH); + + + /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock + * gating disable must be set. Failure to set it results in + * flickering pixels due to Z write ordering failures after + * some amount of runtime in the Mesa "fire" demo, and Unigine + * Sanctuary and Tropics, and apparently anything else with + * alpha test or pixel discard. + * + * According to the spec, bit 11 (RCCUNIT) must also be set, + * but we didn't debug actual testcases to find it out. + * + * According to the spec, bit 13 (RCZUNIT) must be set on IVB. + * This implements the WaDisableRCZUnitClockGating workaround. + * + * Also apply WaDisableVDSUnitClockGating and + * WaDisableRCPBUnitClockGating. + */ + I915_WRITE(GEN6_UCGCTL2, + GEN7_VDSUNIT_CLOCK_GATE_DISABLE | + GEN7_TDLUNIT_CLOCK_GATE_DISABLE | + GEN6_RCZUNIT_CLOCK_GATE_DISABLE | + GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | + GEN6_RCCUNIT_CLOCK_GATE_DISABLE); + + I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE); + + for_each_pipe(pipe) { + I915_WRITE(DSPCNTR(pipe), + I915_READ(DSPCNTR(pipe)) | + DISPPLANE_TRICKLE_FEED_DISABLE); + intel_flush_display_plane(dev_priv, pipe); + } + + I915_WRITE(CACHE_MODE_1, + _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); + + /* + * WaDisableVLVClockGating_VBIIssue + * Disable clock gating on th GCFG unit to prevent a delay + * in the reporting of vblank events. + */ + I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff); + + /* Conservative clock gating settings for now */ + I915_WRITE(0x9400, 0xffffffff); + I915_WRITE(0x9404, 0xffffffff); + I915_WRITE(0x9408, 0xffffffff); + I915_WRITE(0x940c, 0xffffffff); + I915_WRITE(0x9410, 0xffffffff); + I915_WRITE(0x9414, 0xffffffff); + I915_WRITE(0x9418, 0xffffffff); +} + +static void g4x_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dspclk_gate; + + I915_WRITE(RENCLK_GATE_D1, 0); + I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | + GS_UNIT_CLOCK_GATE_DISABLE | + CL_UNIT_CLOCK_GATE_DISABLE); + I915_WRITE(RAMCLK_GATE_D, 0); + dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | + OVRUNIT_CLOCK_GATE_DISABLE | + OVCUNIT_CLOCK_GATE_DISABLE; + if (IS_GM45(dev)) + dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; + I915_WRITE(DSPCLK_GATE_D, dspclk_gate); + + /* WaDisableRenderCachePipelinedFlush */ + I915_WRITE(CACHE_MODE_0, + _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); +} + +static void crestline_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); + I915_WRITE(RENCLK_GATE_D2, 0); + I915_WRITE(DSPCLK_GATE_D, 0); + I915_WRITE(RAMCLK_GATE_D, 0); + I915_WRITE16(DEUC, 0); +} + +static void broadwater_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | + I965_RCC_CLOCK_GATE_DISABLE | + I965_RCPB_CLOCK_GATE_DISABLE | + I965_ISC_CLOCK_GATE_DISABLE | + I965_FBC_CLOCK_GATE_DISABLE); + I915_WRITE(RENCLK_GATE_D2, 0); +} + +static void gen3_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dstate = I915_READ(D_STATE); + + dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | + DSTATE_DOT_CLOCK_GATING; + I915_WRITE(D_STATE, dstate); + + if (IS_PINEVIEW(dev)) + I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); + + /* IIR "flip pending" means done if this bit is set */ + I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); +} + +static void i85x_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); +} + +static void i830_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); +} + +void intel_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + dev_priv->display.init_clock_gating(dev); +} + +/** + * We should only use the power well if we explicitly asked the hardware to + * enable it, so check if it's enabled and also check if we've requested it to + * be enabled. + */ +bool intel_using_power_well(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_HASWELL(dev)) + return I915_READ(HSW_PWR_WELL_DRIVER) == + (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE); + else + return true; +} + +void intel_set_power_well(struct drm_device *dev, bool enable) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + bool is_enabled, enable_requested; + uint32_t tmp; + + if (!HAS_POWER_WELL(dev)) + return; + + if (!i915_disable_power_well && !enable) + return; + + tmp = I915_READ(HSW_PWR_WELL_DRIVER); + is_enabled = tmp & HSW_PWR_WELL_STATE; + enable_requested = tmp & HSW_PWR_WELL_ENABLE; + + if (enable) { + if (!enable_requested) + I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE); + + if (!is_enabled) { + DRM_DEBUG_KMS("Enabling power well\n"); + if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & + HSW_PWR_WELL_STATE), 20)) + DRM_ERROR("Timeout enabling power well\n"); + } + } else { + if (enable_requested) { + I915_WRITE(HSW_PWR_WELL_DRIVER, 0); + DRM_DEBUG_KMS("Requesting to disable the power well\n"); + } + } +} + +/* + * Starting with Haswell, we have a "Power Down Well" that can be turned off + * when not needed anymore. We have 4 registers that can request the power well + * to be enabled, and it will only be disabled if none of the registers is + * requesting it to be enabled. + */ +void intel_init_power_well(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!HAS_POWER_WELL(dev)) + return; + + /* For now, we need the power well to be always enabled. */ + intel_set_power_well(dev, true); + + /* We're taking over the BIOS, so clear any requests made by it since + * the driver is in charge now. */ + if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE) + I915_WRITE(HSW_PWR_WELL_BIOS, 0); +} + +/* Set up chip specific power management-related functions */ +void intel_init_pm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (I915_HAS_FBC(dev)) { + if (HAS_PCH_SPLIT(dev)) { + dev_priv->display.fbc_enabled = ironlake_fbc_enabled; + dev_priv->display.enable_fbc = ironlake_enable_fbc; + dev_priv->display.disable_fbc = ironlake_disable_fbc; + } else if (IS_GM45(dev)) { + dev_priv->display.fbc_enabled = g4x_fbc_enabled; + dev_priv->display.enable_fbc = g4x_enable_fbc; + dev_priv->display.disable_fbc = g4x_disable_fbc; + } else if (IS_CRESTLINE(dev)) { + dev_priv->display.fbc_enabled = i8xx_fbc_enabled; + dev_priv->display.enable_fbc = i8xx_enable_fbc; + dev_priv->display.disable_fbc = i8xx_disable_fbc; + } + /* 855GM needs testing */ + } + + /* For cxsr */ + if (IS_PINEVIEW(dev)) + i915_pineview_get_mem_freq(dev); + else if (IS_GEN5(dev)) + i915_ironlake_get_mem_freq(dev); + + /* For FIFO watermark updates */ + if (HAS_PCH_SPLIT(dev)) { + if (IS_GEN5(dev)) { + if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) + dev_priv->display.update_wm = ironlake_update_wm; + else { + DRM_DEBUG_KMS("Failed to get proper latency. " + "Disable CxSR\n"); + dev_priv->display.update_wm = NULL; + } + dev_priv->display.init_clock_gating = ironlake_init_clock_gating; + } else if (IS_GEN6(dev)) { + if (SNB_READ_WM0_LATENCY()) { + dev_priv->display.update_wm = sandybridge_update_wm; + dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; + } else { + DRM_DEBUG_KMS("Failed to read display plane latency. " + "Disable CxSR\n"); + dev_priv->display.update_wm = NULL; + } + dev_priv->display.init_clock_gating = gen6_init_clock_gating; + } else if (IS_IVYBRIDGE(dev)) { + if (SNB_READ_WM0_LATENCY()) { + dev_priv->display.update_wm = ivybridge_update_wm; + dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; + } else { + DRM_DEBUG_KMS("Failed to read display plane latency. " + "Disable CxSR\n"); + dev_priv->display.update_wm = NULL; + } + dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; + } else if (IS_HASWELL(dev)) { + if (SNB_READ_WM0_LATENCY()) { + dev_priv->display.update_wm = sandybridge_update_wm; + dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; + dev_priv->display.update_linetime_wm = haswell_update_linetime_wm; + } else { + DRM_DEBUG_KMS("Failed to read display plane latency. " + "Disable CxSR\n"); + dev_priv->display.update_wm = NULL; + } + dev_priv->display.init_clock_gating = haswell_init_clock_gating; + } else + dev_priv->display.update_wm = NULL; + } else if (IS_VALLEYVIEW(dev)) { + dev_priv->display.update_wm = valleyview_update_wm; + dev_priv->display.init_clock_gating = + valleyview_init_clock_gating; + } else if (IS_PINEVIEW(dev)) { + if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), + dev_priv->is_ddr3, + dev_priv->fsb_freq, + dev_priv->mem_freq)) { + DRM_INFO("failed to find known CxSR latency " + "(found ddr%s fsb freq %d, mem freq %d), " + "disabling CxSR\n", + (dev_priv->is_ddr3 == 1) ? "3" : "2", + dev_priv->fsb_freq, dev_priv->mem_freq); + /* Disable CxSR and never update its watermark again */ + pineview_disable_cxsr(dev); + dev_priv->display.update_wm = NULL; + } else + dev_priv->display.update_wm = pineview_update_wm; + dev_priv->display.init_clock_gating = gen3_init_clock_gating; + } else if (IS_G4X(dev)) { + dev_priv->display.update_wm = g4x_update_wm; + dev_priv->display.init_clock_gating = g4x_init_clock_gating; + } else if (IS_GEN4(dev)) { + dev_priv->display.update_wm = i965_update_wm; + if (IS_CRESTLINE(dev)) + dev_priv->display.init_clock_gating = crestline_init_clock_gating; + else if (IS_BROADWATER(dev)) + dev_priv->display.init_clock_gating = broadwater_init_clock_gating; + } else if (IS_GEN3(dev)) { + dev_priv->display.update_wm = i9xx_update_wm; + dev_priv->display.get_fifo_size = i9xx_get_fifo_size; + dev_priv->display.init_clock_gating = gen3_init_clock_gating; + } else if (IS_I865G(dev)) { + dev_priv->display.update_wm = i830_update_wm; + dev_priv->display.init_clock_gating = i85x_init_clock_gating; + dev_priv->display.get_fifo_size = i830_get_fifo_size; + } else if (IS_I85X(dev)) { + dev_priv->display.update_wm = i9xx_update_wm; + dev_priv->display.get_fifo_size = i85x_get_fifo_size; + dev_priv->display.init_clock_gating = i85x_init_clock_gating; + } else { + dev_priv->display.update_wm = i830_update_wm; + dev_priv->display.init_clock_gating = i830_init_clock_gating; + if (IS_845G(dev)) + dev_priv->display.get_fifo_size = i845_get_fifo_size; + else + dev_priv->display.get_fifo_size = i830_get_fifo_size; + } +} + +static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) +{ + u32 gt_thread_status_mask; + + if (IS_HASWELL(dev_priv->dev)) + gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; + else + gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; + + /* w/a for a sporadic read returning 0 by waiting for the GT + * thread to wake up. + */ + if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) + DRM_ERROR("GT thread status wait timed out\n"); +} + +static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE, 0); + POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ +} + +static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) +{ + if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0, + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); + + I915_WRITE_NOTRACE(FORCEWAKE, 1); + POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ + + if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1), + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); + + __gen6_gt_wait_for_thread_c0(dev_priv); +} + +static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); + /* something from same cacheline, but !FORCEWAKE_MT */ + POSTING_READ(ECOBUS); +} + +static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) +{ + u32 forcewake_ack; + + if (IS_HASWELL(dev_priv->dev)) + forcewake_ack = FORCEWAKE_ACK_HSW; + else + forcewake_ack = FORCEWAKE_MT_ACK; + + if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0, + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); + + I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); + /* something from same cacheline, but !FORCEWAKE_MT */ + POSTING_READ(ECOBUS); + + if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL), + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); + + __gen6_gt_wait_for_thread_c0(dev_priv); +} + +/* + * Generally this is called implicitly by the register read function. However, + * if some sequence requires the GT to not power down then this function should + * be called at the beginning of the sequence followed by a call to + * gen6_gt_force_wake_put() at the end of the sequence. + */ +void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) +{ + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->gt_lock, irqflags); + if (dev_priv->forcewake_count++ == 0) + dev_priv->gt.force_wake_get(dev_priv); + spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); +} + +void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) +{ + u32 gtfifodbg; + gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); + if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, + "MMIO read or write has been dropped %x\n", gtfifodbg)) + I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); +} + +static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE, 0); + /* something from same cacheline, but !FORCEWAKE */ + POSTING_READ(ECOBUS); + gen6_gt_check_fifodbg(dev_priv); +} + +static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); + /* something from same cacheline, but !FORCEWAKE_MT */ + POSTING_READ(ECOBUS); + gen6_gt_check_fifodbg(dev_priv); +} + +/* + * see gen6_gt_force_wake_get() + */ +void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) +{ + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->gt_lock, irqflags); + if (--dev_priv->forcewake_count == 0) + dev_priv->gt.force_wake_put(dev_priv); + spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); +} + +int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) +{ + int ret = 0; + + if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { + int loop = 500; + u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { + udelay(10); + fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + } + if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) + ++ret; + dev_priv->gt_fifo_count = fifo; + } + dev_priv->gt_fifo_count--; + + return ret; +} + +static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); + /* something from same cacheline, but !FORCEWAKE_VLV */ + POSTING_READ(FORCEWAKE_ACK_VLV); +} + +static void vlv_force_wake_get(struct drm_i915_private *dev_priv) +{ + if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0, + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); + + I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); + I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV, + _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); + + if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL), + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n"); + + if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) & + FORCEWAKE_KERNEL), + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for media to ack forcewake request.\n"); + + __gen6_gt_wait_for_thread_c0(dev_priv); +} + +static void vlv_force_wake_put(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); + I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV, + _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); + /* The below doubles as a POSTING_READ */ + gen6_gt_check_fifodbg(dev_priv); +} + +void intel_gt_sanitize(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_VALLEYVIEW(dev)) { + vlv_force_wake_reset(dev_priv); + } else if (INTEL_INFO(dev)->gen >= 6) { + __gen6_gt_force_wake_reset(dev_priv); + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) + __gen6_gt_force_wake_mt_reset(dev_priv); + } + + /* BIOS often leaves RC6 enabled, but disable it for hw init */ + if (INTEL_INFO(dev)->gen >= 6) + intel_disable_gt_powersave(dev); +} + +void intel_gt_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_VALLEYVIEW(dev)) { + dev_priv->gt.force_wake_get = vlv_force_wake_get; + dev_priv->gt.force_wake_put = vlv_force_wake_put; + } else if (IS_HASWELL(dev)) { + dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get; + dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put; + } else if (IS_IVYBRIDGE(dev)) { + u32 ecobus; + + /* IVB configs may use multi-threaded forcewake */ + + /* A small trick here - if the bios hasn't configured + * MT forcewake, and if the device is in RC6, then + * force_wake_mt_get will not wake the device and the + * ECOBUS read will return zero. Which will be + * (correctly) interpreted by the test below as MT + * forcewake being disabled. + */ + mutex_lock(&dev->struct_mutex); + __gen6_gt_force_wake_mt_get(dev_priv); + ecobus = I915_READ_NOTRACE(ECOBUS); + __gen6_gt_force_wake_mt_put(dev_priv); + mutex_unlock(&dev->struct_mutex); + + if (ecobus & FORCEWAKE_MT_ENABLE) { + dev_priv->gt.force_wake_get = + __gen6_gt_force_wake_mt_get; + dev_priv->gt.force_wake_put = + __gen6_gt_force_wake_mt_put; + } else { + DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); + DRM_INFO("when using vblank-synced partial screen updates.\n"); + dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; + dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; + } + } else if (IS_GEN6(dev)) { + dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; + dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; + } +} + +void intel_pm_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, + intel_gen6_powersave_work); +} + +int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) +{ + WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + + if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { + DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); + return -EAGAIN; + } + + I915_WRITE(GEN6_PCODE_DATA, *val); + I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); + + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) { + DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); + return -ETIMEDOUT; + } + + *val = I915_READ(GEN6_PCODE_DATA); + I915_WRITE(GEN6_PCODE_DATA, 0); + + return 0; +} + +int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val) +{ + WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + + if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { + DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); + return -EAGAIN; + } + + I915_WRITE(GEN6_PCODE_DATA, val); + I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); + + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) { + DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); + return -ETIMEDOUT; + } + + I915_WRITE(GEN6_PCODE_DATA, 0); + + return 0; +} + +static int vlv_punit_rw(struct drm_i915_private *dev_priv, u8 opcode, + u8 addr, u32 *val) +{ + u32 cmd, devfn, port, be, bar; + + bar = 0; + be = 0xf; + port = IOSF_PORT_PUNIT; + devfn = PCI_DEVFN(2, 0); + + cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) | + (port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) | + (bar << IOSF_BAR_SHIFT); + + WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + + if (I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) { + DRM_DEBUG_DRIVER("warning: pcode (%s) mailbox access failed\n", + opcode == PUNIT_OPCODE_REG_READ ? + "read" : "write"); + return -EAGAIN; + } + + I915_WRITE(VLV_IOSF_ADDR, addr); + if (opcode == PUNIT_OPCODE_REG_WRITE) + I915_WRITE(VLV_IOSF_DATA, *val); + I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd); + + if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, + 500)) { + DRM_ERROR("timeout waiting for pcode %s (%d) to finish\n", + opcode == PUNIT_OPCODE_REG_READ ? "read" : "write", + addr); + return -ETIMEDOUT; + } + + if (opcode == PUNIT_OPCODE_REG_READ) + *val = I915_READ(VLV_IOSF_DATA); + I915_WRITE(VLV_IOSF_DATA, 0); + + return 0; +} + +int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val) +{ + return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_READ, addr, val); +} + +int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val) +{ + return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_WRITE, addr, &val); +} diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c new file mode 100644 index 0000000..48fe23e --- /dev/null +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -0,0 +1,1931 @@ +/* + * Copyright © 2008-2010 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * Zou Nan hai + * Xiang Hai hao + * + */ + +#include +#include "i915_drv.h" +#include +#include "i915_trace.h" +#include "intel_drv.h" + +/* + * 965+ support PIPE_CONTROL commands, which provide finer grained control + * over cache flushing. + */ +struct pipe_control { + struct drm_i915_gem_object *obj; + volatile u32 *cpu_page; + u32 gtt_offset; +}; + +static inline int ring_space(struct intel_ring_buffer *ring) +{ + int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); + if (space < 0) + space += ring->size; + return space; +} + +static int +gen2_render_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) +{ + u32 cmd; + int ret; + + cmd = MI_FLUSH; + if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) + cmd |= MI_NO_WRITE_FLUSH; + + if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) + cmd |= MI_READ_FLUSH; + + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + + return 0; +} + +static int +gen4_render_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) +{ + struct drm_device *dev = ring->dev; + u32 cmd; + int ret; + + /* + * read/write caches: + * + * I915_GEM_DOMAIN_RENDER is always invalidated, but is + * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is + * also flushed at 2d versus 3d pipeline switches. + * + * read-only caches: + * + * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if + * MI_READ_FLUSH is set, and is always flushed on 965. + * + * I915_GEM_DOMAIN_COMMAND may not exist? + * + * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is + * invalidated when MI_EXE_FLUSH is set. + * + * I915_GEM_DOMAIN_VERTEX, which exists on 965, is + * invalidated with every MI_FLUSH. + * + * TLBs: + * + * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND + * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and + * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER + * are flushed at any MI_FLUSH. + */ + + cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; + if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) + cmd &= ~MI_NO_WRITE_FLUSH; + if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) + cmd |= MI_EXE_FLUSH; + + if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && + (IS_G4X(dev) || IS_GEN5(dev))) + cmd |= MI_INVALIDATE_ISP; + + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + + return 0; +} + +/** + * Emits a PIPE_CONTROL with a non-zero post-sync operation, for + * implementing two workarounds on gen6. From section 1.4.7.1 + * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: + * + * [DevSNB-C+{W/A}] Before any depth stall flush (including those + * produced by non-pipelined state commands), software needs to first + * send a PIPE_CONTROL with no bits set except Post-Sync Operation != + * 0. + * + * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable + * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. + * + * And the workaround for these two requires this workaround first: + * + * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent + * BEFORE the pipe-control with a post-sync op and no write-cache + * flushes. + * + * And this last workaround is tricky because of the requirements on + * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM + * volume 2 part 1: + * + * "1 of the following must also be set: + * - Render Target Cache Flush Enable ([12] of DW1) + * - Depth Cache Flush Enable ([0] of DW1) + * - Stall at Pixel Scoreboard ([1] of DW1) + * - Depth Stall ([13] of DW1) + * - Post-Sync Operation ([13] of DW1) + * - Notify Enable ([8] of DW1)" + * + * The cache flushes require the workaround flush that triggered this + * one, so we can't use it. Depth stall would trigger the same. + * Post-sync nonzero is what triggered this second workaround, so we + * can't use that one either. Notify enable is IRQs, which aren't + * really our business. That leaves only stall at scoreboard. + */ +static int +intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) +{ + struct pipe_control *pc = ring->private; + u32 scratch_addr = pc->gtt_offset + 128; + int ret; + + + ret = intel_ring_begin(ring, 6); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); + intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_STALL_AT_SCOREBOARD); + intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ + intel_ring_emit(ring, 0); /* low dword */ + intel_ring_emit(ring, 0); /* high dword */ + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + + ret = intel_ring_begin(ring, 6); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); + intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); + intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + + return 0; +} + +static int +gen6_render_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate_domains, u32 flush_domains) +{ + u32 flags = 0; + struct pipe_control *pc = ring->private; + u32 scratch_addr = pc->gtt_offset + 128; + int ret; + + /* Force SNB workarounds for PIPE_CONTROL flushes */ + ret = intel_emit_post_sync_nonzero_flush(ring); + if (ret) + return ret; + + /* Just flush everything. Experiments have shown that reducing the + * number of bits based on the write domains has little performance + * impact. + */ + if (flush_domains) { + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + /* + * Ensure that any following seqno writes only happen + * when the render cache is indeed flushed. + */ + flags |= PIPE_CONTROL_CS_STALL; + } + if (invalidate_domains) { + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + /* + * TLB invalidate requires a post-sync write. + */ + flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; + } + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); + intel_ring_emit(ring, flags); + intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + + return 0; +} + +static int +gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) +{ + int ret; + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); + intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_STALL_AT_SCOREBOARD); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + + return 0; +} + +static int +gen7_render_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate_domains, u32 flush_domains) +{ + u32 flags = 0; + struct pipe_control *pc = ring->private; + u32 scratch_addr = pc->gtt_offset + 128; + int ret; + + /* + * Ensure that any following seqno writes only happen when the render + * cache is indeed flushed. + * + * Workaround: 4th PIPE_CONTROL command (except the ones with only + * read-cache invalidate bits set) must have the CS_STALL bit set. We + * don't try to be clever and just set it unconditionally. + */ + flags |= PIPE_CONTROL_CS_STALL; + + /* Just flush everything. Experiments have shown that reducing the + * number of bits based on the write domains has little performance + * impact. + */ + if (flush_domains) { + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + } + if (invalidate_domains) { + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + /* + * TLB invalidate requires a post-sync write. + */ + flags |= PIPE_CONTROL_QW_WRITE; + flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + + /* Workaround: we must issue a pipe_control with CS-stall bit + * set before a pipe_control command that has the state cache + * invalidate bit set. */ + gen7_render_ring_cs_stall_wa(ring); + } + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); + intel_ring_emit(ring, flags); + intel_ring_emit(ring, scratch_addr); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + + return 0; +} + +static void ring_write_tail(struct intel_ring_buffer *ring, + u32 value) +{ + drm_i915_private_t *dev_priv = ring->dev->dev_private; + I915_WRITE_TAIL(ring, value); +} + +u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = ring->dev->dev_private; + u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? + RING_ACTHD(ring->mmio_base) : ACTHD; + + return I915_READ(acthd_reg); +} + +static int init_ring_common(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj = ring->obj; + int ret = 0; + u32 head; + + if (HAS_FORCE_WAKE(dev)) + gen6_gt_force_wake_get(dev_priv); + + /* Stop the ring if it's running. */ + I915_WRITE_CTL(ring, 0); + I915_WRITE_HEAD(ring, 0); + ring->write_tail(ring, 0); + + head = I915_READ_HEAD(ring) & HEAD_ADDR; + + /* G45 ring initialization fails to reset head to zero */ + if (head != 0) { + DRM_DEBUG_KMS("%s head not reset to zero " + "ctl %08x head %08x tail %08x start %08x\n", + ring->name, + I915_READ_CTL(ring), + I915_READ_HEAD(ring), + I915_READ_TAIL(ring), + I915_READ_START(ring)); + + I915_WRITE_HEAD(ring, 0); + + if (I915_READ_HEAD(ring) & HEAD_ADDR) { + DRM_ERROR("failed to set %s head to zero " + "ctl %08x head %08x tail %08x start %08x\n", + ring->name, + I915_READ_CTL(ring), + I915_READ_HEAD(ring), + I915_READ_TAIL(ring), + I915_READ_START(ring)); + } + } + + /* Initialize the ring. This must happen _after_ we've cleared the ring + * registers with the above sequence (the readback of the HEAD registers + * also enforces ordering), otherwise the hw might lose the new ring + * register values. */ + I915_WRITE_START(ring, obj->gtt_offset); + I915_WRITE_CTL(ring, + ((ring->size - PAGE_SIZE) & RING_NR_PAGES) + | RING_VALID); + + /* If the head is still not zero, the ring is dead */ + if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && + I915_READ_START(ring) == obj->gtt_offset && + (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { + DRM_ERROR("%s initialization failed " + "ctl %08x head %08x tail %08x start %08x\n", + ring->name, + I915_READ_CTL(ring), + I915_READ_HEAD(ring), + I915_READ_TAIL(ring), + I915_READ_START(ring)); + ret = -EIO; + goto out; + } + + if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) + i915_kernel_lost_context(ring->dev); + else { + ring->head = I915_READ_HEAD(ring); + ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; + ring->space = ring_space(ring); + ring->last_retired_head = -1; + } + +out: + if (HAS_FORCE_WAKE(dev)) + gen6_gt_force_wake_put(dev_priv); + + return ret; +} + +static int +init_pipe_control(struct intel_ring_buffer *ring) +{ + struct pipe_control *pc; + struct drm_i915_gem_object *obj; + int ret; + + if (ring->private) + return 0; + + pc = kmalloc(sizeof(*pc), GFP_KERNEL); + if (!pc) + return -ENOMEM; + + obj = i915_gem_alloc_object(ring->dev, 4096); + if (obj == NULL) { + DRM_ERROR("Failed to allocate seqno page\n"); + ret = -ENOMEM; + goto err; + } + + i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + + ret = i915_gem_object_pin(obj, 4096, true, false); + if (ret) + goto err_unref; + + pc->gtt_offset = obj->gtt_offset; + pc->cpu_page = kmap(sg_page(obj->pages->sgl)); + if (pc->cpu_page == NULL) + goto err_unpin; + + DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", + ring->name, pc->gtt_offset); + + pc->obj = obj; + ring->private = pc; + return 0; + +err_unpin: + i915_gem_object_unpin(obj); +err_unref: + drm_gem_object_unreference(&obj->base); +err: + kfree(pc); + return ret; +} + +static void +cleanup_pipe_control(struct intel_ring_buffer *ring) +{ + struct pipe_control *pc = ring->private; + struct drm_i915_gem_object *obj; + + obj = pc->obj; + + kunmap(sg_page(obj->pages->sgl)); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(&obj->base); + + kfree(pc); +} + +static int init_render_ring(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret = init_ring_common(ring); + + if (INTEL_INFO(dev)->gen > 3) + I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); + + /* We need to disable the AsyncFlip performance optimisations in order + * to use MI_WAIT_FOR_EVENT within the CS. It should already be + * programmed to '1' on all products. + */ + if (INTEL_INFO(dev)->gen >= 6) + I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); + + /* Required for the hardware to program scanline values for waiting */ + if (INTEL_INFO(dev)->gen == 6) + I915_WRITE(GFX_MODE, + _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS)); + + if (IS_GEN7(dev)) + I915_WRITE(GFX_MODE_GEN7, + _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | + _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); + + if (INTEL_INFO(dev)->gen >= 5) { + ret = init_pipe_control(ring); + if (ret) + return ret; + } + + if (IS_GEN6(dev)) { + /* From the Sandybridge PRM, volume 1 part 3, page 24: + * "If this bit is set, STCunit will have LRA as replacement + * policy. [...] This bit must be reset. LRA replacement + * policy is not supported." + */ + I915_WRITE(CACHE_MODE_0, + _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); + + /* This is not explicitly set for GEN6, so read the register. + * see intel_ring_mi_set_context() for why we care. + * TODO: consider explicitly setting the bit for GEN5 + */ + ring->itlb_before_ctx_switch = + !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS); + } + + if (INTEL_INFO(dev)->gen >= 6) + I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); + + if (HAS_L3_GPU_CACHE(dev)) + I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); + + return ret; +} + +static void render_ring_cleanup(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + + if (!ring->private) + return; + + if (HAS_BROKEN_CS_TLB(dev)) + drm_gem_object_unreference(to_gem_object(ring->private)); + + if (INTEL_INFO(dev)->gen >= 5) + cleanup_pipe_control(ring); + + ring->private = NULL; +} + +static void +update_mboxes(struct intel_ring_buffer *ring, + u32 mmio_offset) +{ + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, mmio_offset); + intel_ring_emit(ring, ring->outstanding_lazy_request); +} + +/** + * gen6_add_request - Update the semaphore mailbox registers + * + * @ring - ring that is adding a request + * @seqno - return seqno stuck into the ring + * + * Update the mailbox registers in the *other* rings with the current seqno. + * This acts like a signal in the canonical semaphore. + */ +static int +gen6_add_request(struct intel_ring_buffer *ring) +{ + u32 mbox1_reg; + u32 mbox2_reg; + int ret; + + ret = intel_ring_begin(ring, 10); + if (ret) + return ret; + + mbox1_reg = ring->signal_mbox[0]; + mbox2_reg = ring->signal_mbox[1]; + + update_mboxes(ring, mbox1_reg); + update_mboxes(ring, mbox2_reg); + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, ring->outstanding_lazy_request); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_advance(ring); + + return 0; +} + +static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, + u32 seqno) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + return dev_priv->last_seqno < seqno; +} + +/** + * intel_ring_sync - sync the waiter to the signaller on seqno + * + * @waiter - ring that is waiting + * @signaller - ring which has, or will signal + * @seqno - seqno which the waiter will block on + */ +static int +gen6_ring_sync(struct intel_ring_buffer *waiter, + struct intel_ring_buffer *signaller, + u32 seqno) +{ + int ret; + u32 dw1 = MI_SEMAPHORE_MBOX | + MI_SEMAPHORE_COMPARE | + MI_SEMAPHORE_REGISTER; + + /* Throughout all of the GEM code, seqno passed implies our current + * seqno is >= the last seqno executed. However for hardware the + * comparison is strictly greater than. + */ + seqno -= 1; + + WARN_ON(signaller->semaphore_register[waiter->id] == + MI_SEMAPHORE_SYNC_INVALID); + + ret = intel_ring_begin(waiter, 4); + if (ret) + return ret; + + /* If seqno wrap happened, omit the wait with no-ops */ + if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { + intel_ring_emit(waiter, + dw1 | + signaller->semaphore_register[waiter->id]); + intel_ring_emit(waiter, seqno); + intel_ring_emit(waiter, 0); + intel_ring_emit(waiter, MI_NOOP); + } else { + intel_ring_emit(waiter, MI_NOOP); + intel_ring_emit(waiter, MI_NOOP); + intel_ring_emit(waiter, MI_NOOP); + intel_ring_emit(waiter, MI_NOOP); + } + intel_ring_advance(waiter); + + return 0; +} + +#define PIPE_CONTROL_FLUSH(ring__, addr__) \ +do { \ + intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ + PIPE_CONTROL_DEPTH_STALL); \ + intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ + intel_ring_emit(ring__, 0); \ + intel_ring_emit(ring__, 0); \ +} while (0) + +static int +pc_render_add_request(struct intel_ring_buffer *ring) +{ + struct pipe_control *pc = ring->private; + u32 scratch_addr = pc->gtt_offset + 128; + int ret; + + /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently + * incoherent with writes to memory, i.e. completely fubar, + * so we need to use PIPE_NOTIFY instead. + * + * However, we also need to workaround the qword write + * incoherence by flushing the 6 PIPE_NOTIFY buffers out to + * memory before requesting an interrupt. + */ + ret = intel_ring_begin(ring, 32); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WRITE_FLUSH | + PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); + intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, ring->outstanding_lazy_request); + intel_ring_emit(ring, 0); + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; /* write to separate cachelines */ + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WRITE_FLUSH | + PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | + PIPE_CONTROL_NOTIFY); + intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, ring->outstanding_lazy_request); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + + return 0; +} + +static u32 +gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) +{ + /* Workaround to force correct ordering between irq and seqno writes on + * ivb (and maybe also on snb) by reading from a CS register (like + * ACTHD) before reading the status page. */ + if (!lazy_coherency) + intel_ring_get_active_head(ring); + return intel_read_status_page(ring, I915_GEM_HWS_INDEX); +} + +static u32 +ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) +{ + return intel_read_status_page(ring, I915_GEM_HWS_INDEX); +} + +static void +ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno) +{ + intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); +} + +static u32 +pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) +{ + struct pipe_control *pc = ring->private; + return pc->cpu_page[0]; +} + +static void +pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) +{ + struct pipe_control *pc = ring->private; + pc->cpu_page[0] = seqno; +} + +static bool +gen5_ring_get_irq(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; + + if (!dev->irq_enabled) + return false; + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (ring->irq_refcount++ == 0) { + dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + POSTING_READ(GTIMR); + } + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + + return true; +} + +static void +gen5_ring_put_irq(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (--ring->irq_refcount == 0) { + dev_priv->gt_irq_mask |= ring->irq_enable_mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + POSTING_READ(GTIMR); + } + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); +} + +static bool +i9xx_ring_get_irq(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; + + if (!dev->irq_enabled) + return false; + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (ring->irq_refcount++ == 0) { + dev_priv->irq_mask &= ~ring->irq_enable_mask; + I915_WRITE(IMR, dev_priv->irq_mask); + POSTING_READ(IMR); + } + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + + return true; +} + +static void +i9xx_ring_put_irq(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (--ring->irq_refcount == 0) { + dev_priv->irq_mask |= ring->irq_enable_mask; + I915_WRITE(IMR, dev_priv->irq_mask); + POSTING_READ(IMR); + } + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); +} + +static bool +i8xx_ring_get_irq(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; + + if (!dev->irq_enabled) + return false; + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (ring->irq_refcount++ == 0) { + dev_priv->irq_mask &= ~ring->irq_enable_mask; + I915_WRITE16(IMR, dev_priv->irq_mask); + POSTING_READ16(IMR); + } + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + + return true; +} + +static void +i8xx_ring_put_irq(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (--ring->irq_refcount == 0) { + dev_priv->irq_mask |= ring->irq_enable_mask; + I915_WRITE16(IMR, dev_priv->irq_mask); + POSTING_READ16(IMR); + } + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); +} + +void intel_ring_setup_status_page(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = ring->dev->dev_private; + u32 mmio = 0; + + /* The ring status page addresses are no longer next to the rest of + * the ring registers as of gen7. + */ + if (IS_GEN7(dev)) { + switch (ring->id) { + case RCS: + mmio = RENDER_HWS_PGA_GEN7; + break; + case BCS: + mmio = BLT_HWS_PGA_GEN7; + break; + case VCS: + mmio = BSD_HWS_PGA_GEN7; + break; + } + } else if (IS_GEN6(ring->dev)) { + mmio = RING_HWS_PGA_GEN6(ring->mmio_base); + } else { + mmio = RING_HWS_PGA(ring->mmio_base); + } + + I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); + POSTING_READ(mmio); + + /* Flush the TLB for this page */ + if (INTEL_INFO(dev)->gen >= 6) { + u32 reg = RING_INSTPM(ring->mmio_base); + I915_WRITE(reg, + _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | + INSTPM_SYNC_FLUSH)); + if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, + 1000)) + DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", + ring->name); + } +} + +static int +bsd_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) +{ + int ret; + + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, MI_FLUSH); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + return 0; +} + +static int +i9xx_add_request(struct intel_ring_buffer *ring) +{ + int ret; + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, ring->outstanding_lazy_request); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_advance(ring); + + return 0; +} + +static bool +gen6_ring_get_irq(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; + + if (!dev->irq_enabled) + return false; + + /* It looks like we need to prevent the gt from suspending while waiting + * for an notifiy irq, otherwise irqs seem to get lost on at least the + * blt/bsd rings on ivb. */ + gen6_gt_force_wake_get(dev_priv); + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (ring->irq_refcount++ == 0) { + if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) + I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | + GEN6_RENDER_L3_PARITY_ERROR)); + else + I915_WRITE_IMR(ring, ~ring->irq_enable_mask); + dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + POSTING_READ(GTIMR); + } + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + + return true; +} + +static void +gen6_ring_put_irq(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (--ring->irq_refcount == 0) { + if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) + I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); + else + I915_WRITE_IMR(ring, ~0); + dev_priv->gt_irq_mask |= ring->irq_enable_mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + POSTING_READ(GTIMR); + } + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + + gen6_gt_force_wake_put(dev_priv); +} + +static int +i965_dispatch_execbuffer(struct intel_ring_buffer *ring, + u32 offset, u32 length, + unsigned flags) +{ + int ret; + + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, + MI_BATCH_BUFFER_START | + MI_BATCH_GTT | + (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); + intel_ring_emit(ring, offset); + intel_ring_advance(ring); + + return 0; +} + +/* Just userspace ABI convention to limit the wa batch bo to a resonable size */ +#define I830_BATCH_LIMIT (256*1024) +static int +i830_dispatch_execbuffer(struct intel_ring_buffer *ring, + u32 offset, u32 len, + unsigned flags) +{ + int ret; + + if (flags & I915_DISPATCH_PINNED) { + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); + intel_ring_emit(ring, offset + len - 8); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + } else { + struct drm_i915_gem_object *obj = ring->private; + u32 cs_offset = obj->gtt_offset; + + if (len > I830_BATCH_LIMIT) + return -ENOSPC; + + ret = intel_ring_begin(ring, 9+3); + if (ret) + return ret; + /* Blit the batch (which has now all relocs applied) to the stable batch + * scratch bo area (so that the CS never stumbles over its tlb + * invalidation bug) ... */ + intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | + XY_SRC_COPY_BLT_WRITE_ALPHA | + XY_SRC_COPY_BLT_WRITE_RGB); + intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); + intel_ring_emit(ring, cs_offset); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 4096); + intel_ring_emit(ring, offset); + intel_ring_emit(ring, MI_FLUSH); + + /* ... and execute it. */ + intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); + intel_ring_emit(ring, cs_offset + len - 8); + intel_ring_advance(ring); + } + + return 0; +} + +static int +i915_dispatch_execbuffer(struct intel_ring_buffer *ring, + u32 offset, u32 len, + unsigned flags) +{ + int ret; + + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); + intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); + intel_ring_advance(ring); + + return 0; +} + +static void cleanup_status_page(struct intel_ring_buffer *ring) +{ + struct drm_i915_gem_object *obj; + + obj = ring->status_page.obj; + if (obj == NULL) + return; + + kunmap(sg_page(obj->pages->sgl)); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(&obj->base); + ring->status_page.obj = NULL; +} + +static int init_status_page(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_gem_object *obj; + int ret; + + obj = i915_gem_alloc_object(dev, 4096); + if (obj == NULL) { + DRM_ERROR("Failed to allocate status page\n"); + ret = -ENOMEM; + goto err; + } + + i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + + ret = i915_gem_object_pin(obj, 4096, true, false); + if (ret != 0) { + goto err_unref; + } + + ring->status_page.gfx_addr = obj->gtt_offset; + ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); + if (ring->status_page.page_addr == NULL) { + ret = -ENOMEM; + goto err_unpin; + } + ring->status_page.obj = obj; + memset(ring->status_page.page_addr, 0, PAGE_SIZE); + + intel_ring_setup_status_page(ring); + DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", + ring->name, ring->status_page.gfx_addr); + + return 0; + +err_unpin: + i915_gem_object_unpin(obj); +err_unref: + drm_gem_object_unreference(&obj->base); +err: + return ret; +} + +static int init_phys_hws_pga(struct intel_ring_buffer *ring) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + u32 addr; + + if (!dev_priv->status_page_dmah) { + dev_priv->status_page_dmah = + drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); + if (!dev_priv->status_page_dmah) + return -ENOMEM; + } + + addr = dev_priv->status_page_dmah->busaddr; + if (INTEL_INFO(ring->dev)->gen >= 4) + addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; + I915_WRITE(HWS_PGA, addr); + + ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; + memset(ring->status_page.page_addr, 0, PAGE_SIZE); + + return 0; +} + +static int intel_init_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + struct drm_i915_gem_object *obj; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ring->dev = dev; + INIT_LIST_HEAD(&ring->active_list); + INIT_LIST_HEAD(&ring->request_list); + ring->size = 32 * PAGE_SIZE; + memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno)); + + init_waitqueue_head(&ring->irq_queue); + + if (I915_NEED_GFX_HWS(dev)) { + ret = init_status_page(ring); + if (ret) + return ret; + } else { + BUG_ON(ring->id != RCS); + ret = init_phys_hws_pga(ring); + if (ret) + return ret; + } + + obj = NULL; + if (!HAS_LLC(dev)) + obj = i915_gem_object_create_stolen(dev, ring->size); + if (obj == NULL) + obj = i915_gem_alloc_object(dev, ring->size); + if (obj == NULL) { + DRM_ERROR("Failed to allocate ringbuffer\n"); + ret = -ENOMEM; + goto err_hws; + } + + ring->obj = obj; + + ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); + if (ret) + goto err_unref; + + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + goto err_unpin; + + ring->virtual_start = + ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, + ring->size); + if (ring->virtual_start == NULL) { + DRM_ERROR("Failed to map ringbuffer.\n"); + ret = -EINVAL; + goto err_unpin; + } + + ret = ring->init(ring); + if (ret) + goto err_unmap; + + /* Workaround an erratum on the i830 which causes a hang if + * the TAIL pointer points to within the last 2 cachelines + * of the buffer. + */ + ring->effective_size = ring->size; + if (IS_I830(ring->dev) || IS_845G(ring->dev)) + ring->effective_size -= 128; + + return 0; + +err_unmap: + iounmap(ring->virtual_start); +err_unpin: + i915_gem_object_unpin(obj); +err_unref: + drm_gem_object_unreference(&obj->base); + ring->obj = NULL; +err_hws: + cleanup_status_page(ring); + return ret; +} + +void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) +{ + struct drm_i915_private *dev_priv; + int ret; + + if (ring->obj == NULL) + return; + + /* Disable the ring buffer. The ring must be idle at this point */ + dev_priv = ring->dev->dev_private; + ret = intel_ring_idle(ring); + if (ret) + DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", + ring->name, ret); + + I915_WRITE_CTL(ring, 0); + + iounmap(ring->virtual_start); + + i915_gem_object_unpin(ring->obj); + drm_gem_object_unreference(&ring->obj->base); + ring->obj = NULL; + + if (ring->cleanup) + ring->cleanup(ring); + + cleanup_status_page(ring); +} + +static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) +{ + int ret; + + ret = i915_wait_seqno(ring, seqno); + if (!ret) + i915_gem_retire_requests_ring(ring); + + return ret; +} + +static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) +{ + struct drm_i915_gem_request *request; + u32 seqno = 0; + int ret; + + i915_gem_retire_requests_ring(ring); + + if (ring->last_retired_head != -1) { + ring->head = ring->last_retired_head; + ring->last_retired_head = -1; + ring->space = ring_space(ring); + if (ring->space >= n) + return 0; + } + + list_for_each_entry(request, &ring->request_list, list) { + int space; + + if (request->tail == -1) + continue; + + space = request->tail - (ring->tail + I915_RING_FREE_SPACE); + if (space < 0) + space += ring->size; + if (space >= n) { + seqno = request->seqno; + break; + } + + /* Consume this request in case we need more space than + * is available and so need to prevent a race between + * updating last_retired_head and direct reads of + * I915_RING_HEAD. It also provides a nice sanity check. + */ + request->tail = -1; + } + + if (seqno == 0) + return -ENOSPC; + + ret = intel_ring_wait_seqno(ring, seqno); + if (ret) + return ret; + + if (WARN_ON(ring->last_retired_head == -1)) + return -ENOSPC; + + ring->head = ring->last_retired_head; + ring->last_retired_head = -1; + ring->space = ring_space(ring); + if (WARN_ON(ring->space < n)) + return -ENOSPC; + + return 0; +} + +static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long end; + int ret; + + ret = intel_ring_wait_request(ring, n); + if (ret != -ENOSPC) + return ret; + + trace_i915_ring_wait_begin(ring); + /* With GEM the hangcheck timer should kick us out of the loop, + * leaving it early runs the risk of corrupting GEM state (due + * to running on almost untested codepaths). But on resume + * timers don't work yet, so prevent a complete hang in that + * case by choosing an insanely large timeout. */ + end = jiffies + 60 * HZ; + + do { + ring->head = I915_READ_HEAD(ring); + ring->space = ring_space(ring); + if (ring->space >= n) { + trace_i915_ring_wait_end(ring); + return 0; + } + + if (dev->primary->master) { + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + if (master_priv->sarea_priv) + master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; + } + + msleep(1); + + ret = i915_gem_check_wedge(&dev_priv->gpu_error, + dev_priv->mm.interruptible); + if (ret) + return ret; + } while (!time_after(jiffies, end)); + trace_i915_ring_wait_end(ring); + return -EBUSY; +} + +static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) +{ + uint32_t __iomem *virt; + int rem = ring->size - ring->tail; + + if (ring->space < rem) { + int ret = ring_wait_for_space(ring, rem); + if (ret) + return ret; + } + + virt = ring->virtual_start + ring->tail; + rem /= 4; + while (rem--) + iowrite32(MI_NOOP, virt++); + + ring->tail = 0; + ring->space = ring_space(ring); + + return 0; +} + +int intel_ring_idle(struct intel_ring_buffer *ring) +{ + u32 seqno; + int ret; + + /* We need to add any requests required to flush the objects and ring */ + if (ring->outstanding_lazy_request) { + ret = i915_add_request(ring, NULL, NULL); + if (ret) + return ret; + } + + /* Wait upon the last request to be completed */ + if (list_empty(&ring->request_list)) + return 0; + + seqno = list_entry(ring->request_list.prev, + struct drm_i915_gem_request, + list)->seqno; + + return i915_wait_seqno(ring, seqno); +} + +static int +intel_ring_alloc_seqno(struct intel_ring_buffer *ring) +{ + if (ring->outstanding_lazy_request) + return 0; + + return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request); +} + +static int __intel_ring_begin(struct intel_ring_buffer *ring, + int bytes) +{ + int ret; + + if (unlikely(ring->tail + bytes > ring->effective_size)) { + ret = intel_wrap_ring_buffer(ring); + if (unlikely(ret)) + return ret; + } + + if (unlikely(ring->space < bytes)) { + ret = ring_wait_for_space(ring, bytes); + if (unlikely(ret)) + return ret; + } + + ring->space -= bytes; + return 0; +} + +int intel_ring_begin(struct intel_ring_buffer *ring, + int num_dwords) +{ + drm_i915_private_t *dev_priv = ring->dev->dev_private; + int ret; + + ret = i915_gem_check_wedge(&dev_priv->gpu_error, + dev_priv->mm.interruptible); + if (ret) + return ret; + + /* Preallocate the olr before touching the ring */ + ret = intel_ring_alloc_seqno(ring); + if (ret) + return ret; + + return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t)); +} + +void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + + BUG_ON(ring->outstanding_lazy_request); + + if (INTEL_INFO(ring->dev)->gen >= 6) { + I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); + I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); + } + + ring->set_seqno(ring, seqno); +} + +void intel_ring_advance(struct intel_ring_buffer *ring) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + + ring->tail &= ring->size - 1; + if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring)) + return; + ring->write_tail(ring, ring->tail); +} + + +static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, + u32 value) +{ + drm_i915_private_t *dev_priv = ring->dev->dev_private; + + /* Every tail move must follow the sequence below */ + + /* Disable notification that the ring is IDLE. The GT + * will then assume that it is busy and bring it out of rc6. + */ + I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, + _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + + /* Clear the context id. Here be magic! */ + I915_WRITE64(GEN6_BSD_RNCID, 0x0); + + /* Wait for the ring not to be idle, i.e. for it to wake up. */ + if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & + GEN6_BSD_SLEEP_INDICATOR) == 0, + 50)) + DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); + + /* Now that the ring is fully powered up, update the tail */ + I915_WRITE_TAIL(ring, value); + POSTING_READ(RING_TAIL(ring->mmio_base)); + + /* Let the ring send IDLE messages to the GT again, + * and so let it sleep to conserve power when idle. + */ + I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, + _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); +} + +static int gen6_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate, u32 flush) +{ + uint32_t cmd; + int ret; + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + cmd = MI_FLUSH_DW; + /* + * Bspec vol 1c.5 - video engine command streamer: + * "If ENABLED, all TLBs will be invalidated once the flush + * operation is complete. This bit is only valid when the + * Post-Sync Operation field is a value of 1h or 3h." + */ + if (invalidate & I915_GEM_GPU_DOMAINS) + cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | + MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + return 0; +} + +static int +hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, + u32 offset, u32 len, + unsigned flags) +{ + int ret; + + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, + MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | + (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); + /* bit0-7 is the length on GEN6+ */ + intel_ring_emit(ring, offset); + intel_ring_advance(ring); + + return 0; +} + +static int +gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, + u32 offset, u32 len, + unsigned flags) +{ + int ret; + + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, + MI_BATCH_BUFFER_START | + (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); + /* bit0-7 is the length on GEN6+ */ + intel_ring_emit(ring, offset); + intel_ring_advance(ring); + + return 0; +} + +/* Blitter support (SandyBridge+) */ + +static int blt_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate, u32 flush) +{ + uint32_t cmd; + int ret; + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + cmd = MI_FLUSH_DW; + /* + * Bspec vol 1c.3 - blitter engine command streamer: + * "If ENABLED, all TLBs will be invalidated once the flush + * operation is complete. This bit is only valid when the + * Post-Sync Operation field is a value of 1h or 3h." + */ + if (invalidate & I915_GEM_DOMAIN_RENDER) + cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | + MI_FLUSH_DW_OP_STOREDW; + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + return 0; +} + +int intel_init_render_ring_buffer(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + + ring->name = "render ring"; + ring->id = RCS; + ring->mmio_base = RENDER_RING_BASE; + + if (INTEL_INFO(dev)->gen >= 6) { + ring->add_request = gen6_add_request; + ring->flush = gen7_render_ring_flush; + if (INTEL_INFO(dev)->gen == 6) + ring->flush = gen6_render_ring_flush; + ring->irq_get = gen6_ring_get_irq; + ring->irq_put = gen6_ring_put_irq; + ring->irq_enable_mask = GT_USER_INTERRUPT; + ring->get_seqno = gen6_ring_get_seqno; + ring->set_seqno = ring_set_seqno; + ring->sync_to = gen6_ring_sync; + ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; + ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB; + ring->signal_mbox[0] = GEN6_VRSYNC; + ring->signal_mbox[1] = GEN6_BRSYNC; + } else if (IS_GEN5(dev)) { + ring->add_request = pc_render_add_request; + ring->flush = gen4_render_ring_flush; + ring->get_seqno = pc_render_get_seqno; + ring->set_seqno = pc_render_set_seqno; + ring->irq_get = gen5_ring_get_irq; + ring->irq_put = gen5_ring_put_irq; + ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; + } else { + ring->add_request = i9xx_add_request; + if (INTEL_INFO(dev)->gen < 4) + ring->flush = gen2_render_ring_flush; + else + ring->flush = gen4_render_ring_flush; + ring->get_seqno = ring_get_seqno; + ring->set_seqno = ring_set_seqno; + if (IS_GEN2(dev)) { + ring->irq_get = i8xx_ring_get_irq; + ring->irq_put = i8xx_ring_put_irq; + } else { + ring->irq_get = i9xx_ring_get_irq; + ring->irq_put = i9xx_ring_put_irq; + } + ring->irq_enable_mask = I915_USER_INTERRUPT; + } + ring->write_tail = ring_write_tail; + if (IS_HASWELL(dev)) + ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; + else if (INTEL_INFO(dev)->gen >= 6) + ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + else if (INTEL_INFO(dev)->gen >= 4) + ring->dispatch_execbuffer = i965_dispatch_execbuffer; + else if (IS_I830(dev) || IS_845G(dev)) + ring->dispatch_execbuffer = i830_dispatch_execbuffer; + else + ring->dispatch_execbuffer = i915_dispatch_execbuffer; + ring->init = init_render_ring; + ring->cleanup = render_ring_cleanup; + + /* Workaround batchbuffer to combat CS tlb bug. */ + if (HAS_BROKEN_CS_TLB(dev)) { + struct drm_i915_gem_object *obj; + int ret; + + obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); + if (obj == NULL) { + DRM_ERROR("Failed to allocate batch bo\n"); + return -ENOMEM; + } + + ret = i915_gem_object_pin(obj, 0, true, false); + if (ret != 0) { + drm_gem_object_unreference(&obj->base); + DRM_ERROR("Failed to ping batch bo\n"); + return ret; + } + + ring->private = obj; + } + + return intel_init_ring_buffer(dev, ring); +} + +int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; + int ret; + + ring->name = "render ring"; + ring->id = RCS; + ring->mmio_base = RENDER_RING_BASE; + + if (INTEL_INFO(dev)->gen >= 6) { + /* non-kms not supported on gen6+ */ + return -ENODEV; + } + + /* Note: gem is not supported on gen5/ilk without kms (the corresponding + * gem_init ioctl returns with -ENODEV). Hence we do not need to set up + * the special gen5 functions. */ + ring->add_request = i9xx_add_request; + if (INTEL_INFO(dev)->gen < 4) + ring->flush = gen2_render_ring_flush; + else + ring->flush = gen4_render_ring_flush; + ring->get_seqno = ring_get_seqno; + ring->set_seqno = ring_set_seqno; + if (IS_GEN2(dev)) { + ring->irq_get = i8xx_ring_get_irq; + ring->irq_put = i8xx_ring_put_irq; + } else { + ring->irq_get = i9xx_ring_get_irq; + ring->irq_put = i9xx_ring_put_irq; + } + ring->irq_enable_mask = I915_USER_INTERRUPT; + ring->write_tail = ring_write_tail; + if (INTEL_INFO(dev)->gen >= 4) + ring->dispatch_execbuffer = i965_dispatch_execbuffer; + else if (IS_I830(dev) || IS_845G(dev)) + ring->dispatch_execbuffer = i830_dispatch_execbuffer; + else + ring->dispatch_execbuffer = i915_dispatch_execbuffer; + ring->init = init_render_ring; + ring->cleanup = render_ring_cleanup; + + ring->dev = dev; + INIT_LIST_HEAD(&ring->active_list); + INIT_LIST_HEAD(&ring->request_list); + + ring->size = size; + ring->effective_size = ring->size; + if (IS_I830(ring->dev) || IS_845G(ring->dev)) + ring->effective_size -= 128; + + ring->virtual_start = ioremap_wc(start, size); + if (ring->virtual_start == NULL) { + DRM_ERROR("can not ioremap virtual address for" + " ring buffer\n"); + return -ENOMEM; + } + + if (!I915_NEED_GFX_HWS(dev)) { + ret = init_phys_hws_pga(ring); + if (ret) + return ret; + } + + return 0; +} + +int intel_init_bsd_ring_buffer(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; + + ring->name = "bsd ring"; + ring->id = VCS; + + ring->write_tail = ring_write_tail; + if (IS_GEN6(dev) || IS_GEN7(dev)) { + ring->mmio_base = GEN6_BSD_RING_BASE; + /* gen6 bsd needs a special wa for tail updates */ + if (IS_GEN6(dev)) + ring->write_tail = gen6_bsd_ring_write_tail; + ring->flush = gen6_ring_flush; + ring->add_request = gen6_add_request; + ring->get_seqno = gen6_ring_get_seqno; + ring->set_seqno = ring_set_seqno; + ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; + ring->irq_get = gen6_ring_get_irq; + ring->irq_put = gen6_ring_put_irq; + ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + ring->sync_to = gen6_ring_sync; + ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR; + ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB; + ring->signal_mbox[0] = GEN6_RVSYNC; + ring->signal_mbox[1] = GEN6_BVSYNC; + } else { + ring->mmio_base = BSD_RING_BASE; + ring->flush = bsd_ring_flush; + ring->add_request = i9xx_add_request; + ring->get_seqno = ring_get_seqno; + ring->set_seqno = ring_set_seqno; + if (IS_GEN5(dev)) { + ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; + ring->irq_get = gen5_ring_get_irq; + ring->irq_put = gen5_ring_put_irq; + } else { + ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; + ring->irq_get = i9xx_ring_get_irq; + ring->irq_put = i9xx_ring_put_irq; + } + ring->dispatch_execbuffer = i965_dispatch_execbuffer; + } + ring->init = init_ring_common; + + return intel_init_ring_buffer(dev, ring); +} + +int intel_init_blt_ring_buffer(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; + + ring->name = "blitter ring"; + ring->id = BCS; + + ring->mmio_base = BLT_RING_BASE; + ring->write_tail = ring_write_tail; + ring->flush = blt_ring_flush; + ring->add_request = gen6_add_request; + ring->get_seqno = gen6_ring_get_seqno; + ring->set_seqno = ring_set_seqno; + ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; + ring->irq_get = gen6_ring_get_irq; + ring->irq_put = gen6_ring_put_irq; + ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + ring->sync_to = gen6_ring_sync; + ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR; + ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV; + ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID; + ring->signal_mbox[0] = GEN6_RBSYNC; + ring->signal_mbox[1] = GEN6_VBSYNC; + ring->init = init_ring_common; + + return intel_init_ring_buffer(dev, ring); +} + +int +intel_ring_flush_all_caches(struct intel_ring_buffer *ring) +{ + int ret; + + if (!ring->gpu_caches_dirty) + return 0; + + ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); + if (ret) + return ret; + + trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); + + ring->gpu_caches_dirty = false; + return 0; +} + +int +intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) +{ + uint32_t flush_domains; + int ret; + + flush_domains = 0; + if (ring->gpu_caches_dirty) + flush_domains = I915_GEM_GPU_DOMAINS; + + ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); + if (ret) + return ret; + + trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); + + ring->gpu_caches_dirty = false; + return 0; +} diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h new file mode 100644 index 0000000..d66208c --- /dev/null +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -0,0 +1,251 @@ +#ifndef _INTEL_RINGBUFFER_H_ +#define _INTEL_RINGBUFFER_H_ + +/* + * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" + * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" + * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" + * + * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same + * cacheline, the Head Pointer must not be greater than the Tail + * Pointer." + */ +#define I915_RING_FREE_SPACE 64 + +struct intel_hw_status_page { + u32 *page_addr; + unsigned int gfx_addr; + struct drm_i915_gem_object *obj; +}; + +#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) +#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) + +#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) +#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) + +#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) +#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) + +#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) +#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) + +#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) +#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) + +#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) +#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) +#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) + +struct intel_ring_buffer { + const char *name; + enum intel_ring_id { + RCS = 0x0, + VCS, + BCS, + } id; +#define I915_NUM_RINGS 3 + u32 mmio_base; + void __iomem *virtual_start; + struct drm_device *dev; + struct drm_i915_gem_object *obj; + + u32 head; + u32 tail; + int space; + int size; + int effective_size; + struct intel_hw_status_page status_page; + + /** We track the position of the requests in the ring buffer, and + * when each is retired we increment last_retired_head as the GPU + * must have finished processing the request and so we know we + * can advance the ringbuffer up to that position. + * + * last_retired_head is set to -1 after the value is consumed so + * we can detect new retirements. + */ + u32 last_retired_head; + + u32 irq_refcount; /* protected by dev_priv->irq_lock */ + u32 irq_enable_mask; /* bitmask to enable ring interrupt */ + u32 trace_irq_seqno; + u32 sync_seqno[I915_NUM_RINGS-1]; + bool __must_check (*irq_get)(struct intel_ring_buffer *ring); + void (*irq_put)(struct intel_ring_buffer *ring); + + int (*init)(struct intel_ring_buffer *ring); + + void (*write_tail)(struct intel_ring_buffer *ring, + u32 value); + int __must_check (*flush)(struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains); + int (*add_request)(struct intel_ring_buffer *ring); + /* Some chipsets are not quite as coherent as advertised and need + * an expensive kick to force a true read of the up-to-date seqno. + * However, the up-to-date seqno is not always required and the last + * seen value is good enough. Note that the seqno will always be + * monotonic, even if not coherent. + */ + u32 (*get_seqno)(struct intel_ring_buffer *ring, + bool lazy_coherency); + void (*set_seqno)(struct intel_ring_buffer *ring, + u32 seqno); + int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, + u32 offset, u32 length, + unsigned flags); +#define I915_DISPATCH_SECURE 0x1 +#define I915_DISPATCH_PINNED 0x2 + void (*cleanup)(struct intel_ring_buffer *ring); + int (*sync_to)(struct intel_ring_buffer *ring, + struct intel_ring_buffer *to, + u32 seqno); + + u32 semaphore_register[3]; /*our mbox written by others */ + u32 signal_mbox[2]; /* mboxes this ring signals to */ + /** + * List of objects currently involved in rendering from the + * ringbuffer. + * + * Includes buffers having the contents of their GPU caches + * flushed, not necessarily primitives. last_rendering_seqno + * represents when the rendering involved will be completed. + * + * A reference is held on the buffer while on this list. + */ + struct list_head active_list; + + /** + * List of breadcrumbs associated with GPU requests currently + * outstanding. + */ + struct list_head request_list; + + /** + * Do we have some not yet emitted requests outstanding? + */ + u32 outstanding_lazy_request; + bool gpu_caches_dirty; + + wait_queue_head_t irq_queue; + + /** + * Do an explicit TLB flush before MI_SET_CONTEXT + */ + bool itlb_before_ctx_switch; + struct i915_hw_context *default_context; + struct drm_i915_gem_object *last_context_obj; + + void *private; +}; + +static inline bool +intel_ring_initialized(struct intel_ring_buffer *ring) +{ + return ring->obj != NULL; +} + +static inline unsigned +intel_ring_flag(struct intel_ring_buffer *ring) +{ + return 1 << ring->id; +} + +static inline u32 +intel_ring_sync_index(struct intel_ring_buffer *ring, + struct intel_ring_buffer *other) +{ + int idx; + + /* + * cs -> 0 = vcs, 1 = bcs + * vcs -> 0 = bcs, 1 = cs, + * bcs -> 0 = cs, 1 = vcs. + */ + + idx = (other - ring) - 1; + if (idx < 0) + idx += I915_NUM_RINGS; + + return idx; +} + +static inline u32 +intel_read_status_page(struct intel_ring_buffer *ring, + int reg) +{ + /* Ensure that the compiler doesn't optimize away the load. */ + barrier(); + return ring->status_page.page_addr[reg]; +} + +static inline void +intel_write_status_page(struct intel_ring_buffer *ring, + int reg, u32 value) +{ + ring->status_page.page_addr[reg] = value; +} + +/** + * Reads a dword out of the status page, which is written to from the command + * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or + * MI_STORE_DATA_IMM. + * + * The following dwords have a reserved meaning: + * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. + * 0x04: ring 0 head pointer + * 0x05: ring 1 head pointer (915-class) + * 0x06: ring 2 head pointer (915-class) + * 0x10-0x1b: Context status DWords (GM45) + * 0x1f: Last written status offset. (GM45) + * + * The area from dword 0x20 to 0x3ff is available for driver usage. + */ +#define I915_GEM_HWS_INDEX 0x20 +#define I915_GEM_HWS_SCRATCH_INDEX 0x30 +#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) + +void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); + +int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); +static inline void intel_ring_emit(struct intel_ring_buffer *ring, + u32 data) +{ + iowrite32(data, ring->virtual_start + ring->tail); + ring->tail += 4; +} +void intel_ring_advance(struct intel_ring_buffer *ring); +int __must_check intel_ring_idle(struct intel_ring_buffer *ring); +void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno); +int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); +int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); + +int intel_init_render_ring_buffer(struct drm_device *dev); +int intel_init_bsd_ring_buffer(struct drm_device *dev); +int intel_init_blt_ring_buffer(struct drm_device *dev); + +u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); +void intel_ring_setup_status_page(struct intel_ring_buffer *ring); + +static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) +{ + return ring->tail; +} + +static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) +{ + BUG_ON(ring->outstanding_lazy_request == 0); + return ring->outstanding_lazy_request; +} + +static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) +{ + if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) + ring->trace_irq_seqno = seqno; +} + +/* DRI warts */ +int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); + +#endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c new file mode 100644 index 0000000..d4ea6c2 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -0,0 +1,2871 @@ +/* + * Copyright 2006 Dave Airlie + * Copyright © 2006-2007 Intel Corporation + * Jesse Barnes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + */ +#include +#include +#include +#include +#include +#include +#include +#include "intel_drv.h" +#include +#include "i915_drv.h" +#include "intel_sdvo_regs.h" + +#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1) +#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1) +#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1) +#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0) + +#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\ + SDVO_TV_MASK) + +#define IS_TV(c) (c->output_flag & SDVO_TV_MASK) +#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) +#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) +#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) +#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK)) + + +static const char *tv_format_names[] = { + "NTSC_M" , "NTSC_J" , "NTSC_443", + "PAL_B" , "PAL_D" , "PAL_G" , + "PAL_H" , "PAL_I" , "PAL_M" , + "PAL_N" , "PAL_NC" , "PAL_60" , + "SECAM_B" , "SECAM_D" , "SECAM_G" , + "SECAM_K" , "SECAM_K1", "SECAM_L" , + "SECAM_60" +}; + +#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names)) + +struct intel_sdvo { + struct intel_encoder base; + + struct i2c_adapter *i2c; + u8 slave_addr; + + struct i2c_adapter ddc; + + /* Register for the SDVO device: SDVOB or SDVOC */ + uint32_t sdvo_reg; + + /* Active outputs controlled by this SDVO output */ + uint16_t controlled_output; + + /* + * Capabilities of the SDVO device returned by + * i830_sdvo_get_capabilities() + */ + struct intel_sdvo_caps caps; + + /* Pixel clock limitations reported by the SDVO device, in kHz */ + int pixel_clock_min, pixel_clock_max; + + /* + * For multiple function SDVO device, + * this is for current attached outputs. + */ + uint16_t attached_output; + + /* + * Hotplug activation bits for this device + */ + uint16_t hotplug_active; + + /** + * This is used to select the color range of RBG outputs in HDMI mode. + * It is only valid when using TMDS encoding and 8 bit per color mode. + */ + uint32_t color_range; + bool color_range_auto; + + /** + * This is set if we're going to treat the device as TV-out. + * + * While we have these nice friendly flags for output types that ought + * to decide this for us, the S-Video output on our HDMI+S-Video card + * shows up as RGB1 (VGA). + */ + bool is_tv; + + /* On different gens SDVOB is at different places. */ + bool is_sdvob; + + /* This is for current tv format name */ + int tv_format_index; + + /** + * This is set if we treat the device as HDMI, instead of DVI. + */ + bool is_hdmi; + bool has_hdmi_monitor; + bool has_hdmi_audio; + bool rgb_quant_range_selectable; + + /** + * This is set if we detect output of sdvo device as LVDS and + * have a valid fixed mode to use with the panel. + */ + bool is_lvds; + + /** + * This is sdvo fixed pannel mode pointer + */ + struct drm_display_mode *sdvo_lvds_fixed_mode; + + /* DDC bus used by this SDVO encoder */ + uint8_t ddc_bus; + + /* + * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd + */ + uint8_t dtd_sdvo_flags; +}; + +struct intel_sdvo_connector { + struct intel_connector base; + + /* Mark the type of connector */ + uint16_t output_flag; + + enum hdmi_force_audio force_audio; + + /* This contains all current supported TV format */ + u8 tv_format_supported[TV_FORMAT_NUM]; + int format_supported_num; + struct drm_property *tv_format; + + /* add the property for the SDVO-TV */ + struct drm_property *left; + struct drm_property *right; + struct drm_property *top; + struct drm_property *bottom; + struct drm_property *hpos; + struct drm_property *vpos; + struct drm_property *contrast; + struct drm_property *saturation; + struct drm_property *hue; + struct drm_property *sharpness; + struct drm_property *flicker_filter; + struct drm_property *flicker_filter_adaptive; + struct drm_property *flicker_filter_2d; + struct drm_property *tv_chroma_filter; + struct drm_property *tv_luma_filter; + struct drm_property *dot_crawl; + + /* add the property for the SDVO-TV/LVDS */ + struct drm_property *brightness; + + /* Add variable to record current setting for the above property */ + u32 left_margin, right_margin, top_margin, bottom_margin; + + /* this is to get the range of margin.*/ + u32 max_hscan, max_vscan; + u32 max_hpos, cur_hpos; + u32 max_vpos, cur_vpos; + u32 cur_brightness, max_brightness; + u32 cur_contrast, max_contrast; + u32 cur_saturation, max_saturation; + u32 cur_hue, max_hue; + u32 cur_sharpness, max_sharpness; + u32 cur_flicker_filter, max_flicker_filter; + u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive; + u32 cur_flicker_filter_2d, max_flicker_filter_2d; + u32 cur_tv_chroma_filter, max_tv_chroma_filter; + u32 cur_tv_luma_filter, max_tv_luma_filter; + u32 cur_dot_crawl, max_dot_crawl; +}; + +static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder) +{ + return container_of(encoder, struct intel_sdvo, base.base); +} + +static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_sdvo, base); +} + +static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) +{ + return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base); +} + +static bool +intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags); +static bool +intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, + struct intel_sdvo_connector *intel_sdvo_connector, + int type); +static bool +intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, + struct intel_sdvo_connector *intel_sdvo_connector); + +/** + * Writes the SDVOB or SDVOC with the given value, but always writes both + * SDVOB and SDVOC to work around apparent hardware issues (according to + * comments in the BIOS). + */ +static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) +{ + struct drm_device *dev = intel_sdvo->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 bval = val, cval = val; + int i; + + if (intel_sdvo->sdvo_reg == PCH_SDVOB) { + I915_WRITE(intel_sdvo->sdvo_reg, val); + I915_READ(intel_sdvo->sdvo_reg); + return; + } + + if (intel_sdvo->sdvo_reg == GEN3_SDVOB) + cval = I915_READ(GEN3_SDVOC); + else + bval = I915_READ(GEN3_SDVOB); + + /* + * Write the registers twice for luck. Sometimes, + * writing them only once doesn't appear to 'stick'. + * The BIOS does this too. Yay, magic + */ + for (i = 0; i < 2; i++) + { + I915_WRITE(GEN3_SDVOB, bval); + I915_READ(GEN3_SDVOB); + I915_WRITE(GEN3_SDVOC, cval); + I915_READ(GEN3_SDVOC); + } +} + +static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) +{ + struct i2c_msg msgs[] = { + { + .addr = intel_sdvo->slave_addr, + .flags = 0, + .len = 1, + .buf = &addr, + }, + { + .addr = intel_sdvo->slave_addr, + .flags = I2C_M_RD, + .len = 1, + .buf = ch, + } + }; + int ret; + + if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2) + return true; + + DRM_DEBUG_KMS("i2c transfer returned %d\n", ret); + return false; +} + +#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} +/** Mapping of command numbers to names, for debug output */ +static const struct _sdvo_cmd_name { + u8 cmd; + const char *name; +} sdvo_cmd_names[] = { + SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), + + /* Add the op code for SDVO enhancements */ + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER), + + /* HDMI op code */ + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA), + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), +}; + +#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC") + +static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, + const void *args, int args_len) +{ + int i; + + DRM_DEBUG_KMS("%s: W: %02X ", + SDVO_NAME(intel_sdvo), cmd); + for (i = 0; i < args_len; i++) + DRM_LOG_KMS("%02X ", ((u8 *)args)[i]); + for (; i < 8; i++) + DRM_LOG_KMS(" "); + for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) { + if (cmd == sdvo_cmd_names[i].cmd) { + DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name); + break; + } + } + if (i == ARRAY_SIZE(sdvo_cmd_names)) + DRM_LOG_KMS("(%02X)", cmd); + DRM_LOG_KMS("\n"); +} + +static const char *cmd_status_names[] = { + "Power on", + "Success", + "Not supported", + "Invalid arg", + "Pending", + "Target not specified", + "Scaling not supported" +}; + +static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, + const void *args, int args_len) +{ + u8 *buf, status; + struct i2c_msg *msgs; + int i, ret = true; + + /* Would be simpler to allocate both in one go ? */ + buf = kzalloc(args_len * 2 + 2, GFP_KERNEL); + if (!buf) + return false; + + msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL); + if (!msgs) { + kfree(buf); + return false; + } + + intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); + + for (i = 0; i < args_len; i++) { + msgs[i].addr = intel_sdvo->slave_addr; + msgs[i].flags = 0; + msgs[i].len = 2; + msgs[i].buf = buf + 2 *i; + buf[2*i + 0] = SDVO_I2C_ARG_0 - i; + buf[2*i + 1] = ((u8*)args)[i]; + } + msgs[i].addr = intel_sdvo->slave_addr; + msgs[i].flags = 0; + msgs[i].len = 2; + msgs[i].buf = buf + 2*i; + buf[2*i + 0] = SDVO_I2C_OPCODE; + buf[2*i + 1] = cmd; + + /* the following two are to read the response */ + status = SDVO_I2C_CMD_STATUS; + msgs[i+1].addr = intel_sdvo->slave_addr; + msgs[i+1].flags = 0; + msgs[i+1].len = 1; + msgs[i+1].buf = &status; + + msgs[i+2].addr = intel_sdvo->slave_addr; + msgs[i+2].flags = I2C_M_RD; + msgs[i+2].len = 1; + msgs[i+2].buf = &status; + + ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3); + if (ret < 0) { + DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); + ret = false; + goto out; + } + if (ret != i+3) { + /* failure in I2C transfer */ + DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3); + ret = false; + } + +out: + kfree(msgs); + kfree(buf); + return ret; +} + +static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, + void *response, int response_len) +{ + u8 retry = 15; /* 5 quick checks, followed by 10 long checks */ + u8 status; + int i; + + DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); + + /* + * The documentation states that all commands will be + * processed within 15µs, and that we need only poll + * the status byte a maximum of 3 times in order for the + * command to be complete. + * + * Check 5 times in case the hardware failed to read the docs. + * + * Also beware that the first response by many devices is to + * reply PENDING and stall for time. TVs are notorious for + * requiring longer than specified to complete their replies. + * Originally (in the DDX long ago), the delay was only ever 15ms + * with an additional delay of 30ms applied for TVs added later after + * many experiments. To accommodate both sets of delays, we do a + * sequence of slow checks if the device is falling behind and fails + * to reply within 5*15µs. + */ + if (!intel_sdvo_read_byte(intel_sdvo, + SDVO_I2C_CMD_STATUS, + &status)) + goto log_fail; + + while (status == SDVO_CMD_STATUS_PENDING && --retry) { + if (retry < 10) + msleep(15); + else + udelay(15); + + if (!intel_sdvo_read_byte(intel_sdvo, + SDVO_I2C_CMD_STATUS, + &status)) + goto log_fail; + } + + if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) + DRM_LOG_KMS("(%s)", cmd_status_names[status]); + else + DRM_LOG_KMS("(??? %d)", status); + + if (status != SDVO_CMD_STATUS_SUCCESS) + goto log_fail; + + /* Read the command response */ + for (i = 0; i < response_len; i++) { + if (!intel_sdvo_read_byte(intel_sdvo, + SDVO_I2C_RETURN_0 + i, + &((u8 *)response)[i])) + goto log_fail; + DRM_LOG_KMS(" %02X", ((u8 *)response)[i]); + } + DRM_LOG_KMS("\n"); + return true; + +log_fail: + DRM_LOG_KMS("... failed\n"); + return false; +} + +static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) +{ + if (mode->clock >= 100000) + return 1; + else if (mode->clock >= 50000) + return 2; + else + return 4; +} + +static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, + u8 ddc_bus) +{ + /* This must be the immediately preceding write before the i2c xfer */ + return intel_sdvo_write_cmd(intel_sdvo, + SDVO_CMD_SET_CONTROL_BUS_SWITCH, + &ddc_bus, 1); +} + +static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) +{ + if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len)) + return false; + + return intel_sdvo_read_response(intel_sdvo, NULL, 0); +} + +static bool +intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len) +{ + if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0)) + return false; + + return intel_sdvo_read_response(intel_sdvo, value, len); +} + +static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo) +{ + struct intel_sdvo_set_target_input_args targets = {0}; + return intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_TARGET_INPUT, + &targets, sizeof(targets)); +} + +/** + * Return whether each input is trained. + * + * This function is making an assumption about the layout of the response, + * which should be checked against the docs. + */ +static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2) +{ + struct intel_sdvo_get_trained_inputs_response response; + + BUILD_BUG_ON(sizeof(response) != 1); + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS, + &response, sizeof(response))) + return false; + + *input_1 = response.input0_trained; + *input_2 = response.input1_trained; + return true; +} + +static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo, + u16 outputs) +{ + return intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_ACTIVE_OUTPUTS, + &outputs, sizeof(outputs)); +} + +static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo, + u16 *outputs) +{ + return intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_ACTIVE_OUTPUTS, + outputs, sizeof(*outputs)); +} + +static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo, + int mode) +{ + u8 state = SDVO_ENCODER_STATE_ON; + + switch (mode) { + case DRM_MODE_DPMS_ON: + state = SDVO_ENCODER_STATE_ON; + break; + case DRM_MODE_DPMS_STANDBY: + state = SDVO_ENCODER_STATE_STANDBY; + break; + case DRM_MODE_DPMS_SUSPEND: + state = SDVO_ENCODER_STATE_SUSPEND; + break; + case DRM_MODE_DPMS_OFF: + state = SDVO_ENCODER_STATE_OFF; + break; + } + + return intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state)); +} + +static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo, + int *clock_min, + int *clock_max) +{ + struct intel_sdvo_pixel_clock_range clocks; + + BUILD_BUG_ON(sizeof(clocks) != 4); + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, + &clocks, sizeof(clocks))) + return false; + + /* Convert the values from units of 10 kHz to kHz. */ + *clock_min = clocks.min * 10; + *clock_max = clocks.max * 10; + return true; +} + +static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo, + u16 outputs) +{ + return intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_TARGET_OUTPUT, + &outputs, sizeof(outputs)); +} + +static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd, + struct intel_sdvo_dtd *dtd) +{ + return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) && + intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2)); +} + +static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo, + struct intel_sdvo_dtd *dtd) +{ + return intel_sdvo_set_timing(intel_sdvo, + SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); +} + +static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo, + struct intel_sdvo_dtd *dtd) +{ + return intel_sdvo_set_timing(intel_sdvo, + SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); +} + +static bool +intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, + uint16_t clock, + uint16_t width, + uint16_t height) +{ + struct intel_sdvo_preferred_input_timing_args args; + + memset(&args, 0, sizeof(args)); + args.clock = clock; + args.width = width; + args.height = height; + args.interlace = 0; + + if (intel_sdvo->is_lvds && + (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width || + intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height)) + args.scaled = 1; + + return intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, + &args, sizeof(args)); +} + +static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo, + struct intel_sdvo_dtd *dtd) +{ + BUILD_BUG_ON(sizeof(dtd->part1) != 8); + BUILD_BUG_ON(sizeof(dtd->part2) != 8); + return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, + &dtd->part1, sizeof(dtd->part1)) && + intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, + &dtd->part2, sizeof(dtd->part2)); +} + +static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val) +{ + return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); +} + +static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, + const struct drm_display_mode *mode) +{ + uint16_t width, height; + uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; + uint16_t h_sync_offset, v_sync_offset; + int mode_clock; + + width = mode->hdisplay; + height = mode->vdisplay; + + /* do some mode translations */ + h_blank_len = mode->htotal - mode->hdisplay; + h_sync_len = mode->hsync_end - mode->hsync_start; + + v_blank_len = mode->vtotal - mode->vdisplay; + v_sync_len = mode->vsync_end - mode->vsync_start; + + h_sync_offset = mode->hsync_start - mode->hdisplay; + v_sync_offset = mode->vsync_start - mode->vdisplay; + + mode_clock = mode->clock; + mode_clock /= 10; + dtd->part1.clock = mode_clock; + + dtd->part1.h_active = width & 0xff; + dtd->part1.h_blank = h_blank_len & 0xff; + dtd->part1.h_high = (((width >> 8) & 0xf) << 4) | + ((h_blank_len >> 8) & 0xf); + dtd->part1.v_active = height & 0xff; + dtd->part1.v_blank = v_blank_len & 0xff; + dtd->part1.v_high = (((height >> 8) & 0xf) << 4) | + ((v_blank_len >> 8) & 0xf); + + dtd->part2.h_sync_off = h_sync_offset & 0xff; + dtd->part2.h_sync_width = h_sync_len & 0xff; + dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | + (v_sync_len & 0xf); + dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) | + ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) | + ((v_sync_len & 0x30) >> 4); + + dtd->part2.dtd_flags = 0x18; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE; + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE; + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE; + + dtd->part2.sdvo_flags = 0; + dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; + dtd->part2.reserved = 0; +} + +static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, + const struct intel_sdvo_dtd *dtd) +{ + mode->hdisplay = dtd->part1.h_active; + mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; + mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off; + mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2; + mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width; + mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4; + mode->htotal = mode->hdisplay + dtd->part1.h_blank; + mode->htotal += (dtd->part1.h_high & 0xf) << 8; + + mode->vdisplay = dtd->part1.v_active; + mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8; + mode->vsync_start = mode->vdisplay; + mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf; + mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2; + mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0; + mode->vsync_end = mode->vsync_start + + (dtd->part2.v_sync_off_width & 0xf); + mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4; + mode->vtotal = mode->vdisplay + dtd->part1.v_blank; + mode->vtotal += (dtd->part1.v_high & 0xf) << 8; + + mode->clock = dtd->part1.clock * 10; + + mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); + if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE) + mode->flags |= DRM_MODE_FLAG_INTERLACE; + if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE) + mode->flags |= DRM_MODE_FLAG_PHSYNC; + if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE) + mode->flags |= DRM_MODE_FLAG_PVSYNC; +} + +static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) +{ + struct intel_sdvo_encode encode; + + BUILD_BUG_ON(sizeof(encode) != 2); + return intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_SUPP_ENCODE, + &encode, sizeof(encode)); +} + +static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, + uint8_t mode) +{ + return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1); +} + +static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, + uint8_t mode) +{ + return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1); +} + +#if 0 +static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) +{ + int i, j; + uint8_t set_buf_index[2]; + uint8_t av_split; + uint8_t buf_size; + uint8_t buf[48]; + uint8_t *pos; + + intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1); + + for (i = 0; i <= av_split; i++) { + set_buf_index[0] = i; set_buf_index[1] = 0; + intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX, + set_buf_index, 2); + intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0); + intel_sdvo_read_response(encoder, &buf_size, 1); + + pos = buf; + for (j = 0; j <= buf_size; j += 8) { + intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA, + NULL, 0); + intel_sdvo_read_response(encoder, pos, 8); + pos += 8; + } + } +} +#endif + +static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, + unsigned if_index, uint8_t tx_rate, + uint8_t *data, unsigned length) +{ + uint8_t set_buf_index[2] = { if_index, 0 }; + uint8_t hbuf_size, tmp[8]; + int i; + + if (!intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_HBUF_INDEX, + set_buf_index, 2)) + return false; + + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO, + &hbuf_size, 1)) + return false; + + /* Buffer size is 0 based, hooray! */ + hbuf_size++; + + DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n", + if_index, length, hbuf_size); + + for (i = 0; i < hbuf_size; i += 8) { + memset(tmp, 0, 8); + if (i < length) + memcpy(tmp, data + i, min_t(unsigned, 8, length - i)); + + if (!intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_HBUF_DATA, + tmp, 8)) + return false; + } + + return intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_HBUF_TXRATE, + &tx_rate, 1); +} + +static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, + const struct drm_display_mode *adjusted_mode) +{ + struct dip_infoframe avi_if = { + .type = DIP_TYPE_AVI, + .ver = DIP_VERSION_AVI, + .len = DIP_LEN_AVI, + }; + uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)]; + struct intel_crtc *intel_crtc = to_intel_crtc(intel_sdvo->base.base.crtc); + + if (intel_sdvo->rgb_quant_range_selectable) { + if (intel_crtc->config.limited_color_range) + avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; + else + avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; + } + + avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode); + + intel_dip_infoframe_csum(&avi_if); + + /* sdvo spec says that the ecc is handled by the hw, and it looks like + * we must not send the ecc field, either. */ + memcpy(sdvo_data, &avi_if, 3); + sdvo_data[3] = avi_if.checksum; + memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi)); + + return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF, + SDVO_HBUF_TX_VSYNC, + sdvo_data, sizeof(sdvo_data)); +} + +static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) +{ + struct intel_sdvo_tv_format format; + uint32_t format_map; + + format_map = 1 << intel_sdvo->tv_format_index; + memset(&format, 0, sizeof(format)); + memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map))); + + BUILD_BUG_ON(sizeof(format) != 6); + return intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_TV_FORMAT, + &format, sizeof(format)); +} + +static bool +intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo, + const struct drm_display_mode *mode) +{ + struct intel_sdvo_dtd output_dtd; + + if (!intel_sdvo_set_target_output(intel_sdvo, + intel_sdvo->attached_output)) + return false; + + intel_sdvo_get_dtd_from_mode(&output_dtd, mode); + if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) + return false; + + return true; +} + +/* Asks the sdvo controller for the preferred input mode given the output mode. + * Unfortunately we have to set up the full output mode to do that. */ +static bool +intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct intel_sdvo_dtd input_dtd; + + /* Reset the input timing to the screen. Assume always input 0. */ + if (!intel_sdvo_set_target_input(intel_sdvo)) + return false; + + if (!intel_sdvo_create_preferred_input_timing(intel_sdvo, + mode->clock / 10, + mode->hdisplay, + mode->vdisplay)) + return false; + + if (!intel_sdvo_get_preferred_input_timing(intel_sdvo, + &input_dtd)) + return false; + + intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); + intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags; + + return true; +} + +static bool intel_sdvo_compute_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; + struct drm_display_mode *mode = &pipe_config->requested_mode; + + DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n"); + pipe_config->pipe_bpp = 8*3; + + if (HAS_PCH_SPLIT(encoder->base.dev)) + pipe_config->has_pch_encoder = true; + + /* We need to construct preferred input timings based on our + * output timings. To do that, we have to set the output + * timings, even though this isn't really the right place in + * the sequence to do it. Oh well. + */ + if (intel_sdvo->is_tv) { + if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) + return false; + + (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, + mode, + adjusted_mode); + } else if (intel_sdvo->is_lvds) { + if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, + intel_sdvo->sdvo_lvds_fixed_mode)) + return false; + + (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, + mode, + adjusted_mode); + } + + /* Make the CRTC code factor in the SDVO pixel multiplier. The + * SDVO device will factor out the multiplier during mode_set. + */ + pipe_config->pixel_multiplier = + intel_sdvo_get_pixel_multiplier(adjusted_mode); + adjusted_mode->clock *= pipe_config->pixel_multiplier; + + if (intel_sdvo->color_range_auto) { + /* See CEA-861-E - 5.1 Default Encoding Parameters */ + /* FIXME: This bit is only valid when using TMDS encoding and 8 + * bit per color mode. */ + if (intel_sdvo->has_hdmi_monitor && + drm_match_cea_mode(adjusted_mode) > 1) + intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235; + else + intel_sdvo->color_range = 0; + } + + if (intel_sdvo->color_range) + pipe_config->limited_color_range = true; + + return true; +} + +static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder) +{ + struct drm_device *dev = intel_encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = intel_encoder->base.crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_display_mode *adjusted_mode = + &intel_crtc->config.adjusted_mode; + struct drm_display_mode *mode = &intel_crtc->config.requested_mode; + struct intel_sdvo *intel_sdvo = to_intel_sdvo(&intel_encoder->base); + u32 sdvox; + struct intel_sdvo_in_out_map in_out; + struct intel_sdvo_dtd input_dtd, output_dtd; + int rate; + + if (!mode) + return; + + /* First, set the input mapping for the first input to our controlled + * output. This is only correct if we're a single-input device, in + * which case the first input is the output from the appropriate SDVO + * channel on the motherboard. In a two-input device, the first input + * will be SDVOB and the second SDVOC. + */ + in_out.in0 = intel_sdvo->attached_output; + in_out.in1 = 0; + + intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_IN_OUT_MAP, + &in_out, sizeof(in_out)); + + /* Set the output timings to the screen */ + if (!intel_sdvo_set_target_output(intel_sdvo, + intel_sdvo->attached_output)) + return; + + /* lvds has a special fixed output timing. */ + if (intel_sdvo->is_lvds) + intel_sdvo_get_dtd_from_mode(&output_dtd, + intel_sdvo->sdvo_lvds_fixed_mode); + else + intel_sdvo_get_dtd_from_mode(&output_dtd, mode); + if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) + DRM_INFO("Setting output timings on %s failed\n", + SDVO_NAME(intel_sdvo)); + + /* Set the input timing to the screen. Assume always input 0. */ + if (!intel_sdvo_set_target_input(intel_sdvo)) + return; + + if (intel_sdvo->has_hdmi_monitor) { + intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); + intel_sdvo_set_colorimetry(intel_sdvo, + SDVO_COLORIMETRY_RGB256); + intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode); + } else + intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); + + if (intel_sdvo->is_tv && + !intel_sdvo_set_tv_format(intel_sdvo)) + return; + + /* We have tried to get input timing in mode_fixup, and filled into + * adjusted_mode. + */ + intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); + if (intel_sdvo->is_tv || intel_sdvo->is_lvds) + input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags; + if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) + DRM_INFO("Setting input timings on %s failed\n", + SDVO_NAME(intel_sdvo)); + + switch (intel_crtc->config.pixel_multiplier) { + default: + case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; + case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; + case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; + } + if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate)) + return; + + /* Set the SDVO control regs. */ + if (INTEL_INFO(dev)->gen >= 4) { + /* The real mode polarity is set by the SDVO commands, using + * struct intel_sdvo_dtd. */ + sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; + if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi) + sdvox |= intel_sdvo->color_range; + if (INTEL_INFO(dev)->gen < 5) + sdvox |= SDVO_BORDER_ENABLE; + } else { + sdvox = I915_READ(intel_sdvo->sdvo_reg); + switch (intel_sdvo->sdvo_reg) { + case GEN3_SDVOB: + sdvox &= SDVOB_PRESERVE_MASK; + break; + case GEN3_SDVOC: + sdvox &= SDVOC_PRESERVE_MASK; + break; + } + sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; + } + + if (INTEL_PCH_TYPE(dev) >= PCH_CPT) + sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe); + else + sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe); + + if (intel_sdvo->has_hdmi_audio) + sdvox |= SDVO_AUDIO_ENABLE; + + if (INTEL_INFO(dev)->gen >= 4) { + /* done in crtc_mode_set as the dpll_md reg must be written early */ + } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { + /* done in crtc_mode_set as it lives inside the dpll register */ + } else { + sdvox |= (intel_crtc->config.pixel_multiplier - 1) + << SDVO_PORT_MULTIPLY_SHIFT; + } + + if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL && + INTEL_INFO(dev)->gen < 5) + sdvox |= SDVO_STALL_SELECT; + intel_sdvo_write_sdvox(intel_sdvo, sdvox); +} + +static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector) +{ + struct intel_sdvo_connector *intel_sdvo_connector = + to_intel_sdvo_connector(&connector->base); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base); + u16 active_outputs; + + intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); + + if (active_outputs & intel_sdvo_connector->output_flag) + return true; + else + return false; +} + +static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + u16 active_outputs; + u32 tmp; + + tmp = I915_READ(intel_sdvo->sdvo_reg); + intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); + + if (!(tmp & SDVO_ENABLE) && (active_outputs == 0)) + return false; + + if (HAS_PCH_CPT(dev)) + *pipe = PORT_TO_PIPE_CPT(tmp); + else + *pipe = PORT_TO_PIPE(tmp); + + return true; +} + +static void intel_disable_sdvo(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + u32 temp; + + intel_sdvo_set_active_outputs(intel_sdvo, 0); + if (0) + intel_sdvo_set_encoder_power_state(intel_sdvo, + DRM_MODE_DPMS_OFF); + + temp = I915_READ(intel_sdvo->sdvo_reg); + if ((temp & SDVO_ENABLE) != 0) { + /* HW workaround for IBX, we need to move the port to + * transcoder A before disabling it. */ + if (HAS_PCH_IBX(encoder->base.dev)) { + struct drm_crtc *crtc = encoder->base.crtc; + int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; + + if (temp & SDVO_PIPE_B_SELECT) { + temp &= ~SDVO_PIPE_B_SELECT; + I915_WRITE(intel_sdvo->sdvo_reg, temp); + POSTING_READ(intel_sdvo->sdvo_reg); + + /* Again we need to write this twice. */ + I915_WRITE(intel_sdvo->sdvo_reg, temp); + POSTING_READ(intel_sdvo->sdvo_reg); + + /* Transcoder selection bits only update + * effectively on vblank. */ + if (crtc) + intel_wait_for_vblank(encoder->base.dev, pipe); + else + msleep(50); + } + } + + intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); + } +} + +static void intel_enable_sdvo(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + u32 temp; + bool input1, input2; + int i; + u8 status; + + temp = I915_READ(intel_sdvo->sdvo_reg); + if ((temp & SDVO_ENABLE) == 0) { + /* HW workaround for IBX, we need to move the port + * to transcoder A before disabling it, so restore it here. */ + if (HAS_PCH_IBX(dev)) + temp |= SDVO_PIPE_SEL(intel_crtc->pipe); + + intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); + } + for (i = 0; i < 2; i++) + intel_wait_for_vblank(dev, intel_crtc->pipe); + + status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); + /* Warn if the device reported failure to sync. + * A lot of SDVO devices fail to notify of sync, but it's + * a given it the status is a success, we succeeded. + */ + if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { + DRM_DEBUG_KMS("First %s output reported failure to " + "sync\n", SDVO_NAME(intel_sdvo)); + } + + if (0) + intel_sdvo_set_encoder_power_state(intel_sdvo, + DRM_MODE_DPMS_ON); + intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); +} + +static void intel_sdvo_dpms(struct drm_connector *connector, int mode) +{ + struct drm_crtc *crtc; + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + + /* dvo supports only 2 dpms states. */ + if (mode != DRM_MODE_DPMS_ON) + mode = DRM_MODE_DPMS_OFF; + + if (mode == connector->dpms) + return; + + connector->dpms = mode; + + /* Only need to change hw state when actually enabled */ + crtc = intel_sdvo->base.base.crtc; + if (!crtc) { + intel_sdvo->base.connectors_active = false; + return; + } + + if (mode != DRM_MODE_DPMS_ON) { + intel_sdvo_set_active_outputs(intel_sdvo, 0); + if (0) + intel_sdvo_set_encoder_power_state(intel_sdvo, mode); + + intel_sdvo->base.connectors_active = false; + + intel_crtc_update_dpms(crtc); + } else { + intel_sdvo->base.connectors_active = true; + + intel_crtc_update_dpms(crtc); + + if (0) + intel_sdvo_set_encoder_power_state(intel_sdvo, mode); + intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); + } + + intel_modeset_check_state(connector->dev); +} + +static int intel_sdvo_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + + if (intel_sdvo->pixel_clock_min > mode->clock) + return MODE_CLOCK_LOW; + + if (intel_sdvo->pixel_clock_max < mode->clock) + return MODE_CLOCK_HIGH; + + if (intel_sdvo->is_lvds) { + if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay) + return MODE_PANEL; + + if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay) + return MODE_PANEL; + } + + return MODE_OK; +} + +static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) +{ + BUILD_BUG_ON(sizeof(*caps) != 8); + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_DEVICE_CAPS, + caps, sizeof(*caps))) + return false; + + DRM_DEBUG_KMS("SDVO capabilities:\n" + " vendor_id: %d\n" + " device_id: %d\n" + " device_rev_id: %d\n" + " sdvo_version_major: %d\n" + " sdvo_version_minor: %d\n" + " sdvo_inputs_mask: %d\n" + " smooth_scaling: %d\n" + " sharp_scaling: %d\n" + " up_scaling: %d\n" + " down_scaling: %d\n" + " stall_support: %d\n" + " output_flags: %d\n", + caps->vendor_id, + caps->device_id, + caps->device_rev_id, + caps->sdvo_version_major, + caps->sdvo_version_minor, + caps->sdvo_inputs_mask, + caps->smooth_scaling, + caps->sharp_scaling, + caps->up_scaling, + caps->down_scaling, + caps->stall_support, + caps->output_flags); + + return true; +} + +static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo) +{ + struct drm_device *dev = intel_sdvo->base.base.dev; + uint16_t hotplug; + + /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise + * on the line. */ + if (IS_I945G(dev) || IS_I945GM(dev)) + return 0; + + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, + &hotplug, sizeof(hotplug))) + return 0; + + return hotplug; +} + +static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) +{ + struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + + intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, + &intel_sdvo->hotplug_active, 2); +} + +static bool +intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) +{ + /* Is there more than one type of output? */ + return hweight16(intel_sdvo->caps.output_flags) > 1; +} + +static struct edid * +intel_sdvo_get_edid(struct drm_connector *connector) +{ + struct intel_sdvo *sdvo = intel_attached_sdvo(connector); + return drm_get_edid(connector, &sdvo->ddc); +} + +/* Mac mini hack -- use the same DDC as the analog connector */ +static struct edid * +intel_sdvo_get_analog_edid(struct drm_connector *connector) +{ + struct drm_i915_private *dev_priv = connector->dev->dev_private; + + return drm_get_edid(connector, + intel_gmbus_get_adapter(dev_priv, + dev_priv->crt_ddc_pin)); +} + +static enum drm_connector_status +intel_sdvo_tmds_sink_detect(struct drm_connector *connector) +{ + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + enum drm_connector_status status; + struct edid *edid; + + edid = intel_sdvo_get_edid(connector); + + if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) { + u8 ddc, saved_ddc = intel_sdvo->ddc_bus; + + /* + * Don't use the 1 as the argument of DDC bus switch to get + * the EDID. It is used for SDVO SPD ROM. + */ + for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) { + intel_sdvo->ddc_bus = ddc; + edid = intel_sdvo_get_edid(connector); + if (edid) + break; + } + /* + * If we found the EDID on the other bus, + * assume that is the correct DDC bus. + */ + if (edid == NULL) + intel_sdvo->ddc_bus = saved_ddc; + } + + /* + * When there is no edid and no monitor is connected with VGA + * port, try to use the CRT ddc to read the EDID for DVI-connector. + */ + if (edid == NULL) + edid = intel_sdvo_get_analog_edid(connector); + + status = connector_status_unknown; + if (edid != NULL) { + /* DDC bus is shared, match EDID to connector type */ + if (edid->input & DRM_EDID_INPUT_DIGITAL) { + status = connector_status_connected; + if (intel_sdvo->is_hdmi) { + intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); + intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); + intel_sdvo->rgb_quant_range_selectable = + drm_rgb_quant_range_selectable(edid); + } + } else + status = connector_status_disconnected; + kfree(edid); + } + + if (status == connector_status_connected) { + struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); + if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO) + intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON); + } + + return status; +} + +static bool +intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo, + struct edid *edid) +{ + bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); + bool connector_is_digital = !!IS_DIGITAL(sdvo); + + DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n", + connector_is_digital, monitor_is_digital); + return connector_is_digital == monitor_is_digital; +} + +static enum drm_connector_status +intel_sdvo_detect(struct drm_connector *connector, bool force) +{ + uint16_t response; + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); + enum drm_connector_status ret; + + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_ATTACHED_DISPLAYS, + &response, 2)) + return connector_status_unknown; + + DRM_DEBUG_KMS("SDVO response %d %d [%x]\n", + response & 0xff, response >> 8, + intel_sdvo_connector->output_flag); + + if (response == 0) + return connector_status_disconnected; + + intel_sdvo->attached_output = response; + + intel_sdvo->has_hdmi_monitor = false; + intel_sdvo->has_hdmi_audio = false; + intel_sdvo->rgb_quant_range_selectable = false; + + if ((intel_sdvo_connector->output_flag & response) == 0) + ret = connector_status_disconnected; + else if (IS_TMDS(intel_sdvo_connector)) + ret = intel_sdvo_tmds_sink_detect(connector); + else { + struct edid *edid; + + /* if we have an edid check it matches the connection */ + edid = intel_sdvo_get_edid(connector); + if (edid == NULL) + edid = intel_sdvo_get_analog_edid(connector); + if (edid != NULL) { + if (intel_sdvo_connector_matches_edid(intel_sdvo_connector, + edid)) + ret = connector_status_connected; + else + ret = connector_status_disconnected; + + kfree(edid); + } else + ret = connector_status_connected; + } + + /* May update encoder flag for like clock for SDVO TV, etc.*/ + if (ret == connector_status_connected) { + intel_sdvo->is_tv = false; + intel_sdvo->is_lvds = false; + intel_sdvo->base.needs_tv_clock = false; + + if (response & SDVO_TV_MASK) { + intel_sdvo->is_tv = true; + intel_sdvo->base.needs_tv_clock = true; + } + if (response & SDVO_LVDS_MASK) + intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL; + } + + return ret; +} + +static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) +{ + struct edid *edid; + + /* set the bus switch and get the modes */ + edid = intel_sdvo_get_edid(connector); + + /* + * Mac mini hack. On this device, the DVI-I connector shares one DDC + * link between analog and digital outputs. So, if the regular SDVO + * DDC fails, check to see if the analog output is disconnected, in + * which case we'll look there for the digital DDC data. + */ + if (edid == NULL) + edid = intel_sdvo_get_analog_edid(connector); + + if (edid != NULL) { + if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector), + edid)) { + drm_mode_connector_update_edid_property(connector, edid); + drm_add_edid_modes(connector, edid); + } + + kfree(edid); + } +} + +/* + * Set of SDVO TV modes. + * Note! This is in reply order (see loop in get_tv_modes). + * XXX: all 60Hz refresh? + */ +static const struct drm_display_mode sdvo_tv_modes[] = { + { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384, + 416, 0, 200, 201, 232, 233, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384, + 416, 0, 240, 241, 272, 273, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464, + 496, 0, 300, 301, 332, 333, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704, + 736, 0, 350, 351, 382, 383, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704, + 736, 0, 400, 401, 432, 433, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704, + 736, 0, 480, 481, 512, 513, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768, + 800, 0, 480, 481, 512, 513, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768, + 800, 0, 576, 577, 608, 609, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784, + 816, 0, 350, 351, 382, 383, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784, + 816, 0, 400, 401, 432, 433, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784, + 816, 0, 480, 481, 512, 513, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784, + 816, 0, 540, 541, 572, 573, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784, + 816, 0, 576, 577, 608, 609, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832, + 864, 0, 576, 577, 608, 609, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864, + 896, 0, 600, 601, 632, 633, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896, + 928, 0, 624, 625, 656, 657, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984, + 1016, 0, 766, 767, 798, 799, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088, + 1120, 0, 768, 769, 800, 801, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344, + 1376, 0, 1024, 1025, 1056, 1057, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, +}; + +static void intel_sdvo_get_tv_modes(struct drm_connector *connector) +{ + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + struct intel_sdvo_sdtv_resolution_request tv_res; + uint32_t reply = 0, format_map = 0; + int i; + + /* Read the list of supported input resolutions for the selected TV + * format. + */ + format_map = 1 << intel_sdvo->tv_format_index; + memcpy(&tv_res, &format_map, + min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request))); + + if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) + return; + + BUILD_BUG_ON(sizeof(tv_res) != 3); + if (!intel_sdvo_write_cmd(intel_sdvo, + SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, + &tv_res, sizeof(tv_res))) + return; + if (!intel_sdvo_read_response(intel_sdvo, &reply, 3)) + return; + + for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) + if (reply & (1 << i)) { + struct drm_display_mode *nmode; + nmode = drm_mode_duplicate(connector->dev, + &sdvo_tv_modes[i]); + if (nmode) + drm_mode_probed_add(connector, nmode); + } +} + +static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) +{ + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_display_mode *newmode; + + /* + * Attempt to get the mode list from DDC. + * Assume that the preferred modes are + * arranged in priority order. + */ + intel_ddc_get_modes(connector, &intel_sdvo->ddc); + + /* + * Fetch modes from VBT. For SDVO prefer the VBT mode since some + * SDVO->LVDS transcoders can't cope with the EDID mode. Since + * drm_mode_probed_add adds the mode at the head of the list we add it + * last. + */ + if (dev_priv->sdvo_lvds_vbt_mode != NULL) { + newmode = drm_mode_duplicate(connector->dev, + dev_priv->sdvo_lvds_vbt_mode); + if (newmode != NULL) { + /* Guarantee the mode is preferred */ + newmode->type = (DRM_MODE_TYPE_PREFERRED | + DRM_MODE_TYPE_DRIVER); + drm_mode_probed_add(connector, newmode); + } + } + + list_for_each_entry(newmode, &connector->probed_modes, head) { + if (newmode->type & DRM_MODE_TYPE_PREFERRED) { + intel_sdvo->sdvo_lvds_fixed_mode = + drm_mode_duplicate(connector->dev, newmode); + + intel_sdvo->is_lvds = true; + break; + } + } + +} + +static int intel_sdvo_get_modes(struct drm_connector *connector) +{ + struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); + + if (IS_TV(intel_sdvo_connector)) + intel_sdvo_get_tv_modes(connector); + else if (IS_LVDS(intel_sdvo_connector)) + intel_sdvo_get_lvds_modes(connector); + else + intel_sdvo_get_ddc_modes(connector); + + return !list_empty(&connector->probed_modes); +} + +static void +intel_sdvo_destroy_enhance_property(struct drm_connector *connector) +{ + struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); + struct drm_device *dev = connector->dev; + + if (intel_sdvo_connector->left) + drm_property_destroy(dev, intel_sdvo_connector->left); + if (intel_sdvo_connector->right) + drm_property_destroy(dev, intel_sdvo_connector->right); + if (intel_sdvo_connector->top) + drm_property_destroy(dev, intel_sdvo_connector->top); + if (intel_sdvo_connector->bottom) + drm_property_destroy(dev, intel_sdvo_connector->bottom); + if (intel_sdvo_connector->hpos) + drm_property_destroy(dev, intel_sdvo_connector->hpos); + if (intel_sdvo_connector->vpos) + drm_property_destroy(dev, intel_sdvo_connector->vpos); + if (intel_sdvo_connector->saturation) + drm_property_destroy(dev, intel_sdvo_connector->saturation); + if (intel_sdvo_connector->contrast) + drm_property_destroy(dev, intel_sdvo_connector->contrast); + if (intel_sdvo_connector->hue) + drm_property_destroy(dev, intel_sdvo_connector->hue); + if (intel_sdvo_connector->sharpness) + drm_property_destroy(dev, intel_sdvo_connector->sharpness); + if (intel_sdvo_connector->flicker_filter) + drm_property_destroy(dev, intel_sdvo_connector->flicker_filter); + if (intel_sdvo_connector->flicker_filter_2d) + drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d); + if (intel_sdvo_connector->flicker_filter_adaptive) + drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive); + if (intel_sdvo_connector->tv_luma_filter) + drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter); + if (intel_sdvo_connector->tv_chroma_filter) + drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter); + if (intel_sdvo_connector->dot_crawl) + drm_property_destroy(dev, intel_sdvo_connector->dot_crawl); + if (intel_sdvo_connector->brightness) + drm_property_destroy(dev, intel_sdvo_connector->brightness); +} + +static void intel_sdvo_destroy(struct drm_connector *connector) +{ + struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); + + if (intel_sdvo_connector->tv_format) + drm_property_destroy(connector->dev, + intel_sdvo_connector->tv_format); + + intel_sdvo_destroy_enhance_property(connector); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(intel_sdvo_connector); +} + +static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) +{ + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + struct edid *edid; + bool has_audio = false; + + if (!intel_sdvo->is_hdmi) + return false; + + edid = intel_sdvo_get_edid(connector); + if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL) + has_audio = drm_detect_monitor_audio(edid); + kfree(edid); + + return has_audio; +} + +static int +intel_sdvo_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); + struct drm_i915_private *dev_priv = connector->dev->dev_private; + uint16_t temp_value; + uint8_t cmd; + int ret; + + ret = drm_object_property_set_value(&connector->base, property, val); + if (ret) + return ret; + + if (property == dev_priv->force_audio_property) { + int i = val; + bool has_audio; + + if (i == intel_sdvo_connector->force_audio) + return 0; + + intel_sdvo_connector->force_audio = i; + + if (i == HDMI_AUDIO_AUTO) + has_audio = intel_sdvo_detect_hdmi_audio(connector); + else + has_audio = (i == HDMI_AUDIO_ON); + + if (has_audio == intel_sdvo->has_hdmi_audio) + return 0; + + intel_sdvo->has_hdmi_audio = has_audio; + goto done; + } + + if (property == dev_priv->broadcast_rgb_property) { + bool old_auto = intel_sdvo->color_range_auto; + uint32_t old_range = intel_sdvo->color_range; + + switch (val) { + case INTEL_BROADCAST_RGB_AUTO: + intel_sdvo->color_range_auto = true; + break; + case INTEL_BROADCAST_RGB_FULL: + intel_sdvo->color_range_auto = false; + intel_sdvo->color_range = 0; + break; + case INTEL_BROADCAST_RGB_LIMITED: + intel_sdvo->color_range_auto = false; + /* FIXME: this bit is only valid when using TMDS + * encoding and 8 bit per color mode. */ + intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235; + break; + default: + return -EINVAL; + } + + if (old_auto == intel_sdvo->color_range_auto && + old_range == intel_sdvo->color_range) + return 0; + + goto done; + } + +#define CHECK_PROPERTY(name, NAME) \ + if (intel_sdvo_connector->name == property) { \ + if (intel_sdvo_connector->cur_##name == temp_value) return 0; \ + if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \ + cmd = SDVO_CMD_SET_##NAME; \ + intel_sdvo_connector->cur_##name = temp_value; \ + goto set_value; \ + } + + if (property == intel_sdvo_connector->tv_format) { + if (val >= TV_FORMAT_NUM) + return -EINVAL; + + if (intel_sdvo->tv_format_index == + intel_sdvo_connector->tv_format_supported[val]) + return 0; + + intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val]; + goto done; + } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) { + temp_value = val; + if (intel_sdvo_connector->left == property) { + drm_object_property_set_value(&connector->base, + intel_sdvo_connector->right, val); + if (intel_sdvo_connector->left_margin == temp_value) + return 0; + + intel_sdvo_connector->left_margin = temp_value; + intel_sdvo_connector->right_margin = temp_value; + temp_value = intel_sdvo_connector->max_hscan - + intel_sdvo_connector->left_margin; + cmd = SDVO_CMD_SET_OVERSCAN_H; + goto set_value; + } else if (intel_sdvo_connector->right == property) { + drm_object_property_set_value(&connector->base, + intel_sdvo_connector->left, val); + if (intel_sdvo_connector->right_margin == temp_value) + return 0; + + intel_sdvo_connector->left_margin = temp_value; + intel_sdvo_connector->right_margin = temp_value; + temp_value = intel_sdvo_connector->max_hscan - + intel_sdvo_connector->left_margin; + cmd = SDVO_CMD_SET_OVERSCAN_H; + goto set_value; + } else if (intel_sdvo_connector->top == property) { + drm_object_property_set_value(&connector->base, + intel_sdvo_connector->bottom, val); + if (intel_sdvo_connector->top_margin == temp_value) + return 0; + + intel_sdvo_connector->top_margin = temp_value; + intel_sdvo_connector->bottom_margin = temp_value; + temp_value = intel_sdvo_connector->max_vscan - + intel_sdvo_connector->top_margin; + cmd = SDVO_CMD_SET_OVERSCAN_V; + goto set_value; + } else if (intel_sdvo_connector->bottom == property) { + drm_object_property_set_value(&connector->base, + intel_sdvo_connector->top, val); + if (intel_sdvo_connector->bottom_margin == temp_value) + return 0; + + intel_sdvo_connector->top_margin = temp_value; + intel_sdvo_connector->bottom_margin = temp_value; + temp_value = intel_sdvo_connector->max_vscan - + intel_sdvo_connector->top_margin; + cmd = SDVO_CMD_SET_OVERSCAN_V; + goto set_value; + } + CHECK_PROPERTY(hpos, HPOS) + CHECK_PROPERTY(vpos, VPOS) + CHECK_PROPERTY(saturation, SATURATION) + CHECK_PROPERTY(contrast, CONTRAST) + CHECK_PROPERTY(hue, HUE) + CHECK_PROPERTY(brightness, BRIGHTNESS) + CHECK_PROPERTY(sharpness, SHARPNESS) + CHECK_PROPERTY(flicker_filter, FLICKER_FILTER) + CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D) + CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE) + CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER) + CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER) + CHECK_PROPERTY(dot_crawl, DOT_CRAWL) + } + + return -EINVAL; /* unknown property */ + +set_value: + if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2)) + return -EIO; + + +done: + if (intel_sdvo->base.base.crtc) + intel_crtc_restore_mode(intel_sdvo->base.base.crtc); + + return 0; +#undef CHECK_PROPERTY +} + +static const struct drm_connector_funcs intel_sdvo_connector_funcs = { + .dpms = intel_sdvo_dpms, + .detect = intel_sdvo_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = intel_sdvo_set_property, + .destroy = intel_sdvo_destroy, +}; + +static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { + .get_modes = intel_sdvo_get_modes, + .mode_valid = intel_sdvo_mode_valid, + .best_encoder = intel_best_encoder, +}; + +static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) +{ + struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); + + if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) + drm_mode_destroy(encoder->dev, + intel_sdvo->sdvo_lvds_fixed_mode); + + i2c_del_adapter(&intel_sdvo->ddc); + intel_encoder_destroy(encoder); +} + +static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { + .destroy = intel_sdvo_enc_destroy, +}; + +static void +intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo) +{ + uint16_t mask = 0; + unsigned int num_bits; + + /* Make a mask of outputs less than or equal to our own priority in the + * list. + */ + switch (sdvo->controlled_output) { + case SDVO_OUTPUT_LVDS1: + mask |= SDVO_OUTPUT_LVDS1; + case SDVO_OUTPUT_LVDS0: + mask |= SDVO_OUTPUT_LVDS0; + case SDVO_OUTPUT_TMDS1: + mask |= SDVO_OUTPUT_TMDS1; + case SDVO_OUTPUT_TMDS0: + mask |= SDVO_OUTPUT_TMDS0; + case SDVO_OUTPUT_RGB1: + mask |= SDVO_OUTPUT_RGB1; + case SDVO_OUTPUT_RGB0: + mask |= SDVO_OUTPUT_RGB0; + break; + } + + /* Count bits to find what number we are in the priority list. */ + mask &= sdvo->caps.output_flags; + num_bits = hweight16(mask); + /* If more than 3 outputs, default to DDC bus 3 for now. */ + if (num_bits > 3) + num_bits = 3; + + /* Corresponds to SDVO_CONTROL_BUS_DDCx */ + sdvo->ddc_bus = 1 << num_bits; +} + +/** + * Choose the appropriate DDC bus for control bus switch command for this + * SDVO output based on the controlled output. + * + * DDC bus number assignment is in a priority order of RGB outputs, then TMDS + * outputs, then LVDS outputs. + */ +static void +intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, + struct intel_sdvo *sdvo, u32 reg) +{ + struct sdvo_device_mapping *mapping; + + if (sdvo->is_sdvob) + mapping = &(dev_priv->sdvo_mappings[0]); + else + mapping = &(dev_priv->sdvo_mappings[1]); + + if (mapping->initialized) + sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); + else + intel_sdvo_guess_ddc_bus(sdvo); +} + +static void +intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, + struct intel_sdvo *sdvo, u32 reg) +{ + struct sdvo_device_mapping *mapping; + u8 pin; + + if (sdvo->is_sdvob) + mapping = &dev_priv->sdvo_mappings[0]; + else + mapping = &dev_priv->sdvo_mappings[1]; + + if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin)) + pin = mapping->i2c_pin; + else + pin = GMBUS_PORT_DPB; + + sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); + + /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow + * our code totally fails once we start using gmbus. Hence fall back to + * bit banging for now. */ + intel_gmbus_force_bit(sdvo->i2c, true); +} + +/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */ +static void +intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo) +{ + intel_gmbus_force_bit(sdvo->i2c, false); +} + +static bool +intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) +{ + return intel_sdvo_check_supp_encode(intel_sdvo); +} + +static u8 +intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct sdvo_device_mapping *my_mapping, *other_mapping; + + if (sdvo->is_sdvob) { + my_mapping = &dev_priv->sdvo_mappings[0]; + other_mapping = &dev_priv->sdvo_mappings[1]; + } else { + my_mapping = &dev_priv->sdvo_mappings[1]; + other_mapping = &dev_priv->sdvo_mappings[0]; + } + + /* If the BIOS described our SDVO device, take advantage of it. */ + if (my_mapping->slave_addr) + return my_mapping->slave_addr; + + /* If the BIOS only described a different SDVO device, use the + * address that it isn't using. + */ + if (other_mapping->slave_addr) { + if (other_mapping->slave_addr == 0x70) + return 0x72; + else + return 0x70; + } + + /* No SDVO device info is found for another DVO port, + * so use mapping assumption we had before BIOS parsing. + */ + if (sdvo->is_sdvob) + return 0x70; + else + return 0x72; +} + +static void +intel_sdvo_connector_init(struct intel_sdvo_connector *connector, + struct intel_sdvo *encoder) +{ + drm_connector_init(encoder->base.base.dev, + &connector->base.base, + &intel_sdvo_connector_funcs, + connector->base.base.connector_type); + + drm_connector_helper_add(&connector->base.base, + &intel_sdvo_connector_helper_funcs); + + connector->base.base.interlace_allowed = 1; + connector->base.base.doublescan_allowed = 0; + connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; + connector->base.get_hw_state = intel_sdvo_connector_get_hw_state; + + intel_connector_attach_encoder(&connector->base, &encoder->base); + drm_sysfs_connector_add(&connector->base.base); +} + +static void +intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo, + struct intel_sdvo_connector *connector) +{ + struct drm_device *dev = connector->base.base.dev; + + intel_attach_force_audio_property(&connector->base.base); + if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) { + intel_attach_broadcast_rgb_property(&connector->base.base); + intel_sdvo->color_range_auto = true; + } +} + +static bool +intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) +{ + struct drm_encoder *encoder = &intel_sdvo->base.base; + struct drm_connector *connector; + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + struct intel_connector *intel_connector; + struct intel_sdvo_connector *intel_sdvo_connector; + + intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); + if (!intel_sdvo_connector) + return false; + + if (device == 0) { + intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; + } else if (device == 1) { + intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; + } + + intel_connector = &intel_sdvo_connector->base; + connector = &intel_connector->base; + if (intel_sdvo_get_hotplug_support(intel_sdvo) & + intel_sdvo_connector->output_flag) { + intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag; + /* Some SDVO devices have one-shot hotplug interrupts. + * Ensure that they get re-enabled when an interrupt happens. + */ + intel_encoder->hot_plug = intel_sdvo_enable_hotplug; + intel_sdvo_enable_hotplug(intel_encoder); + } else { + intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + } + encoder->encoder_type = DRM_MODE_ENCODER_TMDS; + connector->connector_type = DRM_MODE_CONNECTOR_DVID; + + if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { + connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; + intel_sdvo->is_hdmi = true; + } + + intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); + if (intel_sdvo->is_hdmi) + intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector); + + return true; +} + +static bool +intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) +{ + struct drm_encoder *encoder = &intel_sdvo->base.base; + struct drm_connector *connector; + struct intel_connector *intel_connector; + struct intel_sdvo_connector *intel_sdvo_connector; + + intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); + if (!intel_sdvo_connector) + return false; + + intel_connector = &intel_sdvo_connector->base; + connector = &intel_connector->base; + encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; + connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; + + intel_sdvo->controlled_output |= type; + intel_sdvo_connector->output_flag = type; + + intel_sdvo->is_tv = true; + intel_sdvo->base.needs_tv_clock = true; + + intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); + + if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) + goto err; + + if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) + goto err; + + return true; + +err: + intel_sdvo_destroy(connector); + return false; +} + +static bool +intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) +{ + struct drm_encoder *encoder = &intel_sdvo->base.base; + struct drm_connector *connector; + struct intel_connector *intel_connector; + struct intel_sdvo_connector *intel_sdvo_connector; + + intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); + if (!intel_sdvo_connector) + return false; + + intel_connector = &intel_sdvo_connector->base; + connector = &intel_connector->base; + intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; + encoder->encoder_type = DRM_MODE_ENCODER_DAC; + connector->connector_type = DRM_MODE_CONNECTOR_VGA; + + if (device == 0) { + intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; + } else if (device == 1) { + intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; + } + + intel_sdvo_connector_init(intel_sdvo_connector, + intel_sdvo); + return true; +} + +static bool +intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) +{ + struct drm_encoder *encoder = &intel_sdvo->base.base; + struct drm_connector *connector; + struct intel_connector *intel_connector; + struct intel_sdvo_connector *intel_sdvo_connector; + + intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); + if (!intel_sdvo_connector) + return false; + + intel_connector = &intel_sdvo_connector->base; + connector = &intel_connector->base; + encoder->encoder_type = DRM_MODE_ENCODER_LVDS; + connector->connector_type = DRM_MODE_CONNECTOR_LVDS; + + if (device == 0) { + intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; + } else if (device == 1) { + intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; + } + + intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); + if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) + goto err; + + return true; + +err: + intel_sdvo_destroy(connector); + return false; +} + +static bool +intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) +{ + intel_sdvo->is_tv = false; + intel_sdvo->base.needs_tv_clock = false; + intel_sdvo->is_lvds = false; + + /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ + + if (flags & SDVO_OUTPUT_TMDS0) + if (!intel_sdvo_dvi_init(intel_sdvo, 0)) + return false; + + if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) + if (!intel_sdvo_dvi_init(intel_sdvo, 1)) + return false; + + /* TV has no XXX1 function block */ + if (flags & SDVO_OUTPUT_SVID0) + if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0)) + return false; + + if (flags & SDVO_OUTPUT_CVBS0) + if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0)) + return false; + + if (flags & SDVO_OUTPUT_YPRPB0) + if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0)) + return false; + + if (flags & SDVO_OUTPUT_RGB0) + if (!intel_sdvo_analog_init(intel_sdvo, 0)) + return false; + + if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) + if (!intel_sdvo_analog_init(intel_sdvo, 1)) + return false; + + if (flags & SDVO_OUTPUT_LVDS0) + if (!intel_sdvo_lvds_init(intel_sdvo, 0)) + return false; + + if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) + if (!intel_sdvo_lvds_init(intel_sdvo, 1)) + return false; + + if ((flags & SDVO_OUTPUT_MASK) == 0) { + unsigned char bytes[2]; + + intel_sdvo->controlled_output = 0; + memcpy(bytes, &intel_sdvo->caps.output_flags, 2); + DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", + SDVO_NAME(intel_sdvo), + bytes[0], bytes[1]); + return false; + } + intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + + return true; +} + +static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo) +{ + struct drm_device *dev = intel_sdvo->base.base.dev; + struct drm_connector *connector, *tmp; + + list_for_each_entry_safe(connector, tmp, + &dev->mode_config.connector_list, head) { + if (intel_attached_encoder(connector) == &intel_sdvo->base) + intel_sdvo_destroy(connector); + } +} + +static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, + struct intel_sdvo_connector *intel_sdvo_connector, + int type) +{ + struct drm_device *dev = intel_sdvo->base.base.dev; + struct intel_sdvo_tv_format format; + uint32_t format_map, i; + + if (!intel_sdvo_set_target_output(intel_sdvo, type)) + return false; + + BUILD_BUG_ON(sizeof(format) != 6); + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_SUPPORTED_TV_FORMATS, + &format, sizeof(format))) + return false; + + memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format))); + + if (format_map == 0) + return false; + + intel_sdvo_connector->format_supported_num = 0; + for (i = 0 ; i < TV_FORMAT_NUM; i++) + if (format_map & (1 << i)) + intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i; + + + intel_sdvo_connector->tv_format = + drm_property_create(dev, DRM_MODE_PROP_ENUM, + "mode", intel_sdvo_connector->format_supported_num); + if (!intel_sdvo_connector->tv_format) + return false; + + for (i = 0; i < intel_sdvo_connector->format_supported_num; i++) + drm_property_add_enum( + intel_sdvo_connector->tv_format, i, + i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); + + intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0]; + drm_object_attach_property(&intel_sdvo_connector->base.base.base, + intel_sdvo_connector->tv_format, 0); + return true; + +} + +#define ENHANCEMENT(name, NAME) do { \ + if (enhancements.name) { \ + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \ + !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \ + return false; \ + intel_sdvo_connector->max_##name = data_value[0]; \ + intel_sdvo_connector->cur_##name = response; \ + intel_sdvo_connector->name = \ + drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ + if (!intel_sdvo_connector->name) return false; \ + drm_object_attach_property(&connector->base, \ + intel_sdvo_connector->name, \ + intel_sdvo_connector->cur_##name); \ + DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ + data_value[0], data_value[1], response); \ + } \ +} while (0) + +static bool +intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, + struct intel_sdvo_connector *intel_sdvo_connector, + struct intel_sdvo_enhancements_reply enhancements) +{ + struct drm_device *dev = intel_sdvo->base.base.dev; + struct drm_connector *connector = &intel_sdvo_connector->base.base; + uint16_t response, data_value[2]; + + /* when horizontal overscan is supported, Add the left/right property */ + if (enhancements.overscan_h) { + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_MAX_OVERSCAN_H, + &data_value, 4)) + return false; + + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_OVERSCAN_H, + &response, 2)) + return false; + + intel_sdvo_connector->max_hscan = data_value[0]; + intel_sdvo_connector->left_margin = data_value[0] - response; + intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin; + intel_sdvo_connector->left = + drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]); + if (!intel_sdvo_connector->left) + return false; + + drm_object_attach_property(&connector->base, + intel_sdvo_connector->left, + intel_sdvo_connector->left_margin); + + intel_sdvo_connector->right = + drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]); + if (!intel_sdvo_connector->right) + return false; + + drm_object_attach_property(&connector->base, + intel_sdvo_connector->right, + intel_sdvo_connector->right_margin); + DRM_DEBUG_KMS("h_overscan: max %d, " + "default %d, current %d\n", + data_value[0], data_value[1], response); + } + + if (enhancements.overscan_v) { + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_MAX_OVERSCAN_V, + &data_value, 4)) + return false; + + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_OVERSCAN_V, + &response, 2)) + return false; + + intel_sdvo_connector->max_vscan = data_value[0]; + intel_sdvo_connector->top_margin = data_value[0] - response; + intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin; + intel_sdvo_connector->top = + drm_property_create_range(dev, 0, + "top_margin", 0, data_value[0]); + if (!intel_sdvo_connector->top) + return false; + + drm_object_attach_property(&connector->base, + intel_sdvo_connector->top, + intel_sdvo_connector->top_margin); + + intel_sdvo_connector->bottom = + drm_property_create_range(dev, 0, + "bottom_margin", 0, data_value[0]); + if (!intel_sdvo_connector->bottom) + return false; + + drm_object_attach_property(&connector->base, + intel_sdvo_connector->bottom, + intel_sdvo_connector->bottom_margin); + DRM_DEBUG_KMS("v_overscan: max %d, " + "default %d, current %d\n", + data_value[0], data_value[1], response); + } + + ENHANCEMENT(hpos, HPOS); + ENHANCEMENT(vpos, VPOS); + ENHANCEMENT(saturation, SATURATION); + ENHANCEMENT(contrast, CONTRAST); + ENHANCEMENT(hue, HUE); + ENHANCEMENT(sharpness, SHARPNESS); + ENHANCEMENT(brightness, BRIGHTNESS); + ENHANCEMENT(flicker_filter, FLICKER_FILTER); + ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE); + ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D); + ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER); + ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER); + + if (enhancements.dot_crawl) { + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2)) + return false; + + intel_sdvo_connector->max_dot_crawl = 1; + intel_sdvo_connector->cur_dot_crawl = response & 0x1; + intel_sdvo_connector->dot_crawl = + drm_property_create_range(dev, 0, "dot_crawl", 0, 1); + if (!intel_sdvo_connector->dot_crawl) + return false; + + drm_object_attach_property(&connector->base, + intel_sdvo_connector->dot_crawl, + intel_sdvo_connector->cur_dot_crawl); + DRM_DEBUG_KMS("dot crawl: current %d\n", response); + } + + return true; +} + +static bool +intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo, + struct intel_sdvo_connector *intel_sdvo_connector, + struct intel_sdvo_enhancements_reply enhancements) +{ + struct drm_device *dev = intel_sdvo->base.base.dev; + struct drm_connector *connector = &intel_sdvo_connector->base.base; + uint16_t response, data_value[2]; + + ENHANCEMENT(brightness, BRIGHTNESS); + + return true; +} +#undef ENHANCEMENT + +static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, + struct intel_sdvo_connector *intel_sdvo_connector) +{ + union { + struct intel_sdvo_enhancements_reply reply; + uint16_t response; + } enhancements; + + BUILD_BUG_ON(sizeof(enhancements) != 2); + + enhancements.response = 0; + intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, + &enhancements, sizeof(enhancements)); + if (enhancements.response == 0) { + DRM_DEBUG_KMS("No enhancement is supported\n"); + return true; + } + + if (IS_TV(intel_sdvo_connector)) + return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply); + else if (IS_LVDS(intel_sdvo_connector)) + return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); + else + return true; +} + +static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) +{ + struct intel_sdvo *sdvo = adapter->algo_data; + + if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus)) + return -EIO; + + return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num); +} + +static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter) +{ + struct intel_sdvo *sdvo = adapter->algo_data; + return sdvo->i2c->algo->functionality(sdvo->i2c); +} + +static const struct i2c_algorithm intel_sdvo_ddc_proxy = { + .master_xfer = intel_sdvo_ddc_proxy_xfer, + .functionality = intel_sdvo_ddc_proxy_func +}; + +static bool +intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, + struct drm_device *dev) +{ + sdvo->ddc.owner = THIS_MODULE; + sdvo->ddc.class = I2C_CLASS_DDC; + snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy"); + sdvo->ddc.dev.parent = &dev->pdev->dev; + sdvo->ddc.algo_data = sdvo; + sdvo->ddc.algo = &intel_sdvo_ddc_proxy; + + return i2c_add_adapter(&sdvo->ddc) == 0; +} + +bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_encoder *intel_encoder; + struct intel_sdvo *intel_sdvo; + u32 hotplug_mask; + int i; + intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); + if (!intel_sdvo) + return false; + + intel_sdvo->sdvo_reg = sdvo_reg; + intel_sdvo->is_sdvob = is_sdvob; + intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; + intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); + if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) + goto err_i2c_bus; + + /* encoder type will be decided later */ + intel_encoder = &intel_sdvo->base; + intel_encoder->type = INTEL_OUTPUT_SDVO; + drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); + + /* Read the regs to test if we can talk to the device */ + for (i = 0; i < 0x40; i++) { + u8 byte; + + if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) { + DRM_DEBUG_KMS("No SDVO device found on %s\n", + SDVO_NAME(intel_sdvo)); + goto err; + } + } + + hotplug_mask = 0; + if (IS_G4X(dev)) { + hotplug_mask = intel_sdvo->is_sdvob ? + SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X; + } else if (IS_GEN4(dev)) { + hotplug_mask = intel_sdvo->is_sdvob ? + SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965; + } else { + hotplug_mask = intel_sdvo->is_sdvob ? + SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915; + } + + intel_encoder->compute_config = intel_sdvo_compute_config; + intel_encoder->disable = intel_disable_sdvo; + intel_encoder->mode_set = intel_sdvo_mode_set; + intel_encoder->enable = intel_enable_sdvo; + intel_encoder->get_hw_state = intel_sdvo_get_hw_state; + + /* In default case sdvo lvds is false */ + if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) + goto err; + + if (intel_sdvo_output_setup(intel_sdvo, + intel_sdvo->caps.output_flags) != true) { + DRM_DEBUG_KMS("SDVO output failed to setup on %s\n", + SDVO_NAME(intel_sdvo)); + /* Output_setup can leave behind connectors! */ + goto err_output; + } + + /* Only enable the hotplug irq if we need it, to work around noisy + * hotplug lines. + */ + if (intel_sdvo->hotplug_active) { + intel_encoder->hpd_pin = + intel_sdvo->is_sdvob ? HPD_SDVO_B : HPD_SDVO_C; + } + + /* + * Cloning SDVO with anything is often impossible, since the SDVO + * encoder can request a special input timing mode. And even if that's + * not the case we have evidence that cloning a plain unscaled mode with + * VGA doesn't really work. Furthermore the cloning flags are way too + * simplistic anyway to express such constraints, so just give up on + * cloning for SDVO encoders. + */ + intel_sdvo->base.cloneable = false; + + intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); + + /* Set the input timing to the screen. Assume always input 0. */ + if (!intel_sdvo_set_target_input(intel_sdvo)) + goto err_output; + + if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, + &intel_sdvo->pixel_clock_min, + &intel_sdvo->pixel_clock_max)) + goto err_output; + + DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " + "clock range %dMHz - %dMHz, " + "input 1: %c, input 2: %c, " + "output 1: %c, output 2: %c\n", + SDVO_NAME(intel_sdvo), + intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id, + intel_sdvo->caps.device_rev_id, + intel_sdvo->pixel_clock_min / 1000, + intel_sdvo->pixel_clock_max / 1000, + (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', + (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', + /* check currently supported outputs */ + intel_sdvo->caps.output_flags & + (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', + intel_sdvo->caps.output_flags & + (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); + return true; + +err_output: + intel_sdvo_output_cleanup(intel_sdvo); + +err: + drm_encoder_cleanup(&intel_encoder->base); + i2c_del_adapter(&intel_sdvo->ddc); +err_i2c_bus: + intel_sdvo_unselect_i2c_bus(intel_sdvo); + kfree(intel_sdvo); + + return false; +} diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h new file mode 100644 index 0000000..770bdd6 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h @@ -0,0 +1,730 @@ +/* + * Copyright © 2006-2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + */ + +/** + * @file SDVO command definitions and structures. + */ + +#define SDVO_OUTPUT_FIRST (0) +#define SDVO_OUTPUT_TMDS0 (1 << 0) +#define SDVO_OUTPUT_RGB0 (1 << 1) +#define SDVO_OUTPUT_CVBS0 (1 << 2) +#define SDVO_OUTPUT_SVID0 (1 << 3) +#define SDVO_OUTPUT_YPRPB0 (1 << 4) +#define SDVO_OUTPUT_SCART0 (1 << 5) +#define SDVO_OUTPUT_LVDS0 (1 << 6) +#define SDVO_OUTPUT_TMDS1 (1 << 8) +#define SDVO_OUTPUT_RGB1 (1 << 9) +#define SDVO_OUTPUT_CVBS1 (1 << 10) +#define SDVO_OUTPUT_SVID1 (1 << 11) +#define SDVO_OUTPUT_YPRPB1 (1 << 12) +#define SDVO_OUTPUT_SCART1 (1 << 13) +#define SDVO_OUTPUT_LVDS1 (1 << 14) +#define SDVO_OUTPUT_LAST (14) + +struct intel_sdvo_caps { + u8 vendor_id; + u8 device_id; + u8 device_rev_id; + u8 sdvo_version_major; + u8 sdvo_version_minor; + unsigned int sdvo_inputs_mask:2; + unsigned int smooth_scaling:1; + unsigned int sharp_scaling:1; + unsigned int up_scaling:1; + unsigned int down_scaling:1; + unsigned int stall_support:1; + unsigned int pad:1; + u16 output_flags; +} __attribute__((packed)); + +/* Note: SDVO detailed timing flags match EDID misc flags. */ +#define DTD_FLAG_HSYNC_POSITIVE (1 << 1) +#define DTD_FLAG_VSYNC_POSITIVE (1 << 2) +#define DTD_FLAG_INTERLACE (1 << 7) + +/** This matches the EDID DTD structure, more or less */ +struct intel_sdvo_dtd { + struct { + u16 clock; /**< pixel clock, in 10kHz units */ + u8 h_active; /**< lower 8 bits (pixels) */ + u8 h_blank; /**< lower 8 bits (pixels) */ + u8 h_high; /**< upper 4 bits each h_active, h_blank */ + u8 v_active; /**< lower 8 bits (lines) */ + u8 v_blank; /**< lower 8 bits (lines) */ + u8 v_high; /**< upper 4 bits each v_active, v_blank */ + } part1; + + struct { + u8 h_sync_off; /**< lower 8 bits, from hblank start */ + u8 h_sync_width; /**< lower 8 bits (pixels) */ + /** lower 4 bits each vsync offset, vsync width */ + u8 v_sync_off_width; + /** + * 2 high bits of hsync offset, 2 high bits of hsync width, + * bits 4-5 of vsync offset, and 2 high bits of vsync width. + */ + u8 sync_off_width_high; + u8 dtd_flags; + u8 sdvo_flags; + /** bits 6-7 of vsync offset at bits 6-7 */ + u8 v_sync_off_high; + u8 reserved; + } part2; +} __attribute__((packed)); + +struct intel_sdvo_pixel_clock_range { + u16 min; /**< pixel clock, in 10kHz units */ + u16 max; /**< pixel clock, in 10kHz units */ +} __attribute__((packed)); + +struct intel_sdvo_preferred_input_timing_args { + u16 clock; + u16 width; + u16 height; + u8 interlace:1; + u8 scaled:1; + u8 pad:6; +} __attribute__((packed)); + +/* I2C registers for SDVO */ +#define SDVO_I2C_ARG_0 0x07 +#define SDVO_I2C_ARG_1 0x06 +#define SDVO_I2C_ARG_2 0x05 +#define SDVO_I2C_ARG_3 0x04 +#define SDVO_I2C_ARG_4 0x03 +#define SDVO_I2C_ARG_5 0x02 +#define SDVO_I2C_ARG_6 0x01 +#define SDVO_I2C_ARG_7 0x00 +#define SDVO_I2C_OPCODE 0x08 +#define SDVO_I2C_CMD_STATUS 0x09 +#define SDVO_I2C_RETURN_0 0x0a +#define SDVO_I2C_RETURN_1 0x0b +#define SDVO_I2C_RETURN_2 0x0c +#define SDVO_I2C_RETURN_3 0x0d +#define SDVO_I2C_RETURN_4 0x0e +#define SDVO_I2C_RETURN_5 0x0f +#define SDVO_I2C_RETURN_6 0x10 +#define SDVO_I2C_RETURN_7 0x11 +#define SDVO_I2C_VENDOR_BEGIN 0x20 + +/* Status results */ +#define SDVO_CMD_STATUS_POWER_ON 0x0 +#define SDVO_CMD_STATUS_SUCCESS 0x1 +#define SDVO_CMD_STATUS_NOTSUPP 0x2 +#define SDVO_CMD_STATUS_INVALID_ARG 0x3 +#define SDVO_CMD_STATUS_PENDING 0x4 +#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5 +#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6 + +/* SDVO commands, argument/result registers */ + +#define SDVO_CMD_RESET 0x01 + +/** Returns a struct intel_sdvo_caps */ +#define SDVO_CMD_GET_DEVICE_CAPS 0x02 + +#define SDVO_CMD_GET_FIRMWARE_REV 0x86 +# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0 +# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1 +# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2 + +/** + * Reports which inputs are trained (managed to sync). + * + * Devices must have trained within 2 vsyncs of a mode change. + */ +#define SDVO_CMD_GET_TRAINED_INPUTS 0x03 +struct intel_sdvo_get_trained_inputs_response { + unsigned int input0_trained:1; + unsigned int input1_trained:1; + unsigned int pad:6; +} __attribute__((packed)); + +/** Returns a struct intel_sdvo_output_flags of active outputs. */ +#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04 + +/** + * Sets the current set of active outputs. + * + * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP + * on multi-output devices. + */ +#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05 + +/** + * Returns the current mapping of SDVO inputs to outputs on the device. + * + * Returns two struct intel_sdvo_output_flags structures. + */ +#define SDVO_CMD_GET_IN_OUT_MAP 0x06 +struct intel_sdvo_in_out_map { + u16 in0, in1; +}; + +/** + * Sets the current mapping of SDVO inputs to outputs on the device. + * + * Takes two struct i380_sdvo_output_flags structures. + */ +#define SDVO_CMD_SET_IN_OUT_MAP 0x07 + +/** + * Returns a struct intel_sdvo_output_flags of attached displays. + */ +#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b + +/** + * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging. + */ +#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c + +/** + * Takes a struct intel_sdvo_output_flags. + */ +#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d + +/** + * Returns a struct intel_sdvo_output_flags of displays with hot plug + * interrupts enabled. + */ +#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e + +#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f +struct intel_sdvo_get_interrupt_event_source_response { + u16 interrupt_status; + unsigned int ambient_light_interrupt:1; + unsigned int hdmi_audio_encrypt_change:1; + unsigned int pad:6; +} __attribute__((packed)); + +/** + * Selects which input is affected by future input commands. + * + * Commands affected include SET_INPUT_TIMINGS_PART[12], + * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12], + * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS. + */ +#define SDVO_CMD_SET_TARGET_INPUT 0x10 +struct intel_sdvo_set_target_input_args { + unsigned int target_1:1; + unsigned int pad:7; +} __attribute__((packed)); + +/** + * Takes a struct intel_sdvo_output_flags of which outputs are targeted by + * future output commands. + * + * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12], + * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE. + */ +#define SDVO_CMD_SET_TARGET_OUTPUT 0x11 + +#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12 +#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13 +#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14 +#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15 +#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16 +#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17 +#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18 +#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19 +/* Part 1 */ +# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0 +# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1 +# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2 +# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3 +# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4 +# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5 +# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6 +# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7 +/* Part 2 */ +# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0 +# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1 +# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2 +# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3 +# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4 +# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7) +# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5) +# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3) +# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1) +# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5 +# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7) +# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6) +# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6) +# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4) +# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4) +# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4) +# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4) +# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6 + +/** + * Generates a DTD based on the given width, height, and flags. + * + * This will be supported by any device supporting scaling or interlaced + * modes. + */ +#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a +# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0 +# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1 +# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2 +# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3 +# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4 +# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5 +# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6 +# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0) +# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1) + +#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b +#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c + +/** Returns a struct intel_sdvo_pixel_clock_range */ +#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d +/** Returns a struct intel_sdvo_pixel_clock_range */ +#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e + +/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */ +#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f + +/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ +#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20 +/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ +#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21 +# define SDVO_CLOCK_RATE_MULT_1X (1 << 0) +# define SDVO_CLOCK_RATE_MULT_2X (1 << 1) +# define SDVO_CLOCK_RATE_MULT_4X (1 << 3) + +#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 +/** 6 bytes of bit flags for TV formats shared by all TV format functions */ +struct intel_sdvo_tv_format { + unsigned int ntsc_m:1; + unsigned int ntsc_j:1; + unsigned int ntsc_443:1; + unsigned int pal_b:1; + unsigned int pal_d:1; + unsigned int pal_g:1; + unsigned int pal_h:1; + unsigned int pal_i:1; + + unsigned int pal_m:1; + unsigned int pal_n:1; + unsigned int pal_nc:1; + unsigned int pal_60:1; + unsigned int secam_b:1; + unsigned int secam_d:1; + unsigned int secam_g:1; + unsigned int secam_k:1; + + unsigned int secam_k1:1; + unsigned int secam_l:1; + unsigned int secam_60:1; + unsigned int hdtv_std_smpte_240m_1080i_59:1; + unsigned int hdtv_std_smpte_240m_1080i_60:1; + unsigned int hdtv_std_smpte_260m_1080i_59:1; + unsigned int hdtv_std_smpte_260m_1080i_60:1; + unsigned int hdtv_std_smpte_274m_1080i_50:1; + + unsigned int hdtv_std_smpte_274m_1080i_59:1; + unsigned int hdtv_std_smpte_274m_1080i_60:1; + unsigned int hdtv_std_smpte_274m_1080p_23:1; + unsigned int hdtv_std_smpte_274m_1080p_24:1; + unsigned int hdtv_std_smpte_274m_1080p_25:1; + unsigned int hdtv_std_smpte_274m_1080p_29:1; + unsigned int hdtv_std_smpte_274m_1080p_30:1; + unsigned int hdtv_std_smpte_274m_1080p_50:1; + + unsigned int hdtv_std_smpte_274m_1080p_59:1; + unsigned int hdtv_std_smpte_274m_1080p_60:1; + unsigned int hdtv_std_smpte_295m_1080i_50:1; + unsigned int hdtv_std_smpte_295m_1080p_50:1; + unsigned int hdtv_std_smpte_296m_720p_59:1; + unsigned int hdtv_std_smpte_296m_720p_60:1; + unsigned int hdtv_std_smpte_296m_720p_50:1; + unsigned int hdtv_std_smpte_293m_480p_59:1; + + unsigned int hdtv_std_smpte_170m_480i_59:1; + unsigned int hdtv_std_iturbt601_576i_50:1; + unsigned int hdtv_std_iturbt601_576p_50:1; + unsigned int hdtv_std_eia_7702a_480i_60:1; + unsigned int hdtv_std_eia_7702a_480p_60:1; + unsigned int pad:3; +} __attribute__((packed)); + +#define SDVO_CMD_GET_TV_FORMAT 0x28 + +#define SDVO_CMD_SET_TV_FORMAT 0x29 + +/** Returns the resolutiosn that can be used with the given TV format */ +#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83 +struct intel_sdvo_sdtv_resolution_request { + unsigned int ntsc_m:1; + unsigned int ntsc_j:1; + unsigned int ntsc_443:1; + unsigned int pal_b:1; + unsigned int pal_d:1; + unsigned int pal_g:1; + unsigned int pal_h:1; + unsigned int pal_i:1; + + unsigned int pal_m:1; + unsigned int pal_n:1; + unsigned int pal_nc:1; + unsigned int pal_60:1; + unsigned int secam_b:1; + unsigned int secam_d:1; + unsigned int secam_g:1; + unsigned int secam_k:1; + + unsigned int secam_k1:1; + unsigned int secam_l:1; + unsigned int secam_60:1; + unsigned int pad:5; +} __attribute__((packed)); + +struct intel_sdvo_sdtv_resolution_reply { + unsigned int res_320x200:1; + unsigned int res_320x240:1; + unsigned int res_400x300:1; + unsigned int res_640x350:1; + unsigned int res_640x400:1; + unsigned int res_640x480:1; + unsigned int res_704x480:1; + unsigned int res_704x576:1; + + unsigned int res_720x350:1; + unsigned int res_720x400:1; + unsigned int res_720x480:1; + unsigned int res_720x540:1; + unsigned int res_720x576:1; + unsigned int res_768x576:1; + unsigned int res_800x600:1; + unsigned int res_832x624:1; + + unsigned int res_920x766:1; + unsigned int res_1024x768:1; + unsigned int res_1280x1024:1; + unsigned int pad:5; +} __attribute__((packed)); + +/* Get supported resolution with squire pixel aspect ratio that can be + scaled for the requested HDTV format */ +#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85 + +struct intel_sdvo_hdtv_resolution_request { + unsigned int hdtv_std_smpte_240m_1080i_59:1; + unsigned int hdtv_std_smpte_240m_1080i_60:1; + unsigned int hdtv_std_smpte_260m_1080i_59:1; + unsigned int hdtv_std_smpte_260m_1080i_60:1; + unsigned int hdtv_std_smpte_274m_1080i_50:1; + unsigned int hdtv_std_smpte_274m_1080i_59:1; + unsigned int hdtv_std_smpte_274m_1080i_60:1; + unsigned int hdtv_std_smpte_274m_1080p_23:1; + + unsigned int hdtv_std_smpte_274m_1080p_24:1; + unsigned int hdtv_std_smpte_274m_1080p_25:1; + unsigned int hdtv_std_smpte_274m_1080p_29:1; + unsigned int hdtv_std_smpte_274m_1080p_30:1; + unsigned int hdtv_std_smpte_274m_1080p_50:1; + unsigned int hdtv_std_smpte_274m_1080p_59:1; + unsigned int hdtv_std_smpte_274m_1080p_60:1; + unsigned int hdtv_std_smpte_295m_1080i_50:1; + + unsigned int hdtv_std_smpte_295m_1080p_50:1; + unsigned int hdtv_std_smpte_296m_720p_59:1; + unsigned int hdtv_std_smpte_296m_720p_60:1; + unsigned int hdtv_std_smpte_296m_720p_50:1; + unsigned int hdtv_std_smpte_293m_480p_59:1; + unsigned int hdtv_std_smpte_170m_480i_59:1; + unsigned int hdtv_std_iturbt601_576i_50:1; + unsigned int hdtv_std_iturbt601_576p_50:1; + + unsigned int hdtv_std_eia_7702a_480i_60:1; + unsigned int hdtv_std_eia_7702a_480p_60:1; + unsigned int pad:6; +} __attribute__((packed)); + +struct intel_sdvo_hdtv_resolution_reply { + unsigned int res_640x480:1; + unsigned int res_800x600:1; + unsigned int res_1024x768:1; + unsigned int res_1280x960:1; + unsigned int res_1400x1050:1; + unsigned int res_1600x1200:1; + unsigned int res_1920x1440:1; + unsigned int res_2048x1536:1; + + unsigned int res_2560x1920:1; + unsigned int res_3200x2400:1; + unsigned int res_3840x2880:1; + unsigned int pad1:5; + + unsigned int res_848x480:1; + unsigned int res_1064x600:1; + unsigned int res_1280x720:1; + unsigned int res_1360x768:1; + unsigned int res_1704x960:1; + unsigned int res_1864x1050:1; + unsigned int res_1920x1080:1; + unsigned int res_2128x1200:1; + + unsigned int res_2560x1400:1; + unsigned int res_2728x1536:1; + unsigned int res_3408x1920:1; + unsigned int res_4264x2400:1; + unsigned int res_5120x2880:1; + unsigned int pad2:3; + + unsigned int res_768x480:1; + unsigned int res_960x600:1; + unsigned int res_1152x720:1; + unsigned int res_1124x768:1; + unsigned int res_1536x960:1; + unsigned int res_1680x1050:1; + unsigned int res_1728x1080:1; + unsigned int res_1920x1200:1; + + unsigned int res_2304x1440:1; + unsigned int res_2456x1536:1; + unsigned int res_3072x1920:1; + unsigned int res_3840x2400:1; + unsigned int res_4608x2880:1; + unsigned int pad3:3; + + unsigned int res_1280x1024:1; + unsigned int pad4:7; + + unsigned int res_1280x768:1; + unsigned int pad5:7; +} __attribute__((packed)); + +/* Get supported power state returns info for encoder and monitor, rely on + last SetTargetInput and SetTargetOutput calls */ +#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a +/* Get power state returns info for encoder and monitor, rely on last + SetTargetInput and SetTargetOutput calls */ +#define SDVO_CMD_GET_POWER_STATE 0x2b +#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b +#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c +# define SDVO_ENCODER_STATE_ON (1 << 0) +# define SDVO_ENCODER_STATE_STANDBY (1 << 1) +# define SDVO_ENCODER_STATE_SUSPEND (1 << 2) +# define SDVO_ENCODER_STATE_OFF (1 << 3) +# define SDVO_MONITOR_STATE_ON (1 << 4) +# define SDVO_MONITOR_STATE_STANDBY (1 << 5) +# define SDVO_MONITOR_STATE_SUSPEND (1 << 6) +# define SDVO_MONITOR_STATE_OFF (1 << 7) + +#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d +#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e +#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f +/** + * The panel power sequencing parameters are in units of milliseconds. + * The high fields are bits 8:9 of the 10-bit values. + */ +struct sdvo_panel_power_sequencing { + u8 t0; + u8 t1; + u8 t2; + u8 t3; + u8 t4; + + unsigned int t0_high:2; + unsigned int t1_high:2; + unsigned int t2_high:2; + unsigned int t3_high:2; + + unsigned int t4_high:2; + unsigned int pad:6; +} __attribute__((packed)); + +#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30 +struct sdvo_max_backlight_reply { + u8 max_value; + u8 default_value; +} __attribute__((packed)); + +#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31 +#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32 + +#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33 +struct sdvo_get_ambient_light_reply { + u16 trip_low; + u16 trip_high; + u16 value; +} __attribute__((packed)); +#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34 +struct sdvo_set_ambient_light_reply { + u16 trip_low; + u16 trip_high; + unsigned int enable:1; + unsigned int pad:7; +} __attribute__((packed)); + +/* Set display power state */ +#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d +# define SDVO_DISPLAY_STATE_ON (1 << 0) +# define SDVO_DISPLAY_STATE_STANDBY (1 << 1) +# define SDVO_DISPLAY_STATE_SUSPEND (1 << 2) +# define SDVO_DISPLAY_STATE_OFF (1 << 3) + +#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84 +struct intel_sdvo_enhancements_reply { + unsigned int flicker_filter:1; + unsigned int flicker_filter_adaptive:1; + unsigned int flicker_filter_2d:1; + unsigned int saturation:1; + unsigned int hue:1; + unsigned int brightness:1; + unsigned int contrast:1; + unsigned int overscan_h:1; + + unsigned int overscan_v:1; + unsigned int hpos:1; + unsigned int vpos:1; + unsigned int sharpness:1; + unsigned int dot_crawl:1; + unsigned int dither:1; + unsigned int tv_chroma_filter:1; + unsigned int tv_luma_filter:1; +} __attribute__((packed)); + +/* Picture enhancement limits below are dependent on the current TV format, + * and thus need to be queried and set after it. + */ +#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d +#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b +#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52 +#define SDVO_CMD_GET_MAX_SATURATION 0x55 +#define SDVO_CMD_GET_MAX_HUE 0x58 +#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b +#define SDVO_CMD_GET_MAX_CONTRAST 0x5e +#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61 +#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64 +#define SDVO_CMD_GET_MAX_HPOS 0x67 +#define SDVO_CMD_GET_MAX_VPOS 0x6a +#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d +#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74 +#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77 +struct intel_sdvo_enhancement_limits_reply { + u16 max_value; + u16 default_value; +} __attribute__((packed)); + +#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f +#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80 +# define SDVO_LVDS_COLOR_DEPTH_18 (0 << 0) +# define SDVO_LVDS_COLOR_DEPTH_24 (1 << 0) +# define SDVO_LVDS_CONNECTOR_SPWG (0 << 2) +# define SDVO_LVDS_CONNECTOR_OPENLDI (1 << 2) +# define SDVO_LVDS_SINGLE_CHANNEL (0 << 4) +# define SDVO_LVDS_DUAL_CHANNEL (1 << 4) + +#define SDVO_CMD_GET_FLICKER_FILTER 0x4e +#define SDVO_CMD_SET_FLICKER_FILTER 0x4f +#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50 +#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51 +#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53 +#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54 +#define SDVO_CMD_GET_SATURATION 0x56 +#define SDVO_CMD_SET_SATURATION 0x57 +#define SDVO_CMD_GET_HUE 0x59 +#define SDVO_CMD_SET_HUE 0x5a +#define SDVO_CMD_GET_BRIGHTNESS 0x5c +#define SDVO_CMD_SET_BRIGHTNESS 0x5d +#define SDVO_CMD_GET_CONTRAST 0x5f +#define SDVO_CMD_SET_CONTRAST 0x60 +#define SDVO_CMD_GET_OVERSCAN_H 0x62 +#define SDVO_CMD_SET_OVERSCAN_H 0x63 +#define SDVO_CMD_GET_OVERSCAN_V 0x65 +#define SDVO_CMD_SET_OVERSCAN_V 0x66 +#define SDVO_CMD_GET_HPOS 0x68 +#define SDVO_CMD_SET_HPOS 0x69 +#define SDVO_CMD_GET_VPOS 0x6b +#define SDVO_CMD_SET_VPOS 0x6c +#define SDVO_CMD_GET_SHARPNESS 0x6e +#define SDVO_CMD_SET_SHARPNESS 0x6f +#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75 +#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76 +#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78 +#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79 +struct intel_sdvo_enhancements_arg { + u16 value; +} __attribute__((packed)); + +#define SDVO_CMD_GET_DOT_CRAWL 0x70 +#define SDVO_CMD_SET_DOT_CRAWL 0x71 +# define SDVO_DOT_CRAWL_ON (1 << 0) +# define SDVO_DOT_CRAWL_DEFAULT_ON (1 << 1) + +#define SDVO_CMD_GET_DITHER 0x72 +#define SDVO_CMD_SET_DITHER 0x73 +# define SDVO_DITHER_ON (1 << 0) +# define SDVO_DITHER_DEFAULT_ON (1 << 1) + +#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a +# define SDVO_CONTROL_BUS_PROM (1 << 0) +# define SDVO_CONTROL_BUS_DDC1 (1 << 1) +# define SDVO_CONTROL_BUS_DDC2 (1 << 2) +# define SDVO_CONTROL_BUS_DDC3 (1 << 3) + +/* HDMI op codes */ +#define SDVO_CMD_GET_SUPP_ENCODE 0x9d +#define SDVO_CMD_GET_ENCODE 0x9e +#define SDVO_CMD_SET_ENCODE 0x9f + #define SDVO_ENCODE_DVI 0x0 + #define SDVO_ENCODE_HDMI 0x1 +#define SDVO_CMD_SET_PIXEL_REPLI 0x8b +#define SDVO_CMD_GET_PIXEL_REPLI 0x8c +#define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d +#define SDVO_CMD_SET_COLORIMETRY 0x8e + #define SDVO_COLORIMETRY_RGB256 0x0 + #define SDVO_COLORIMETRY_RGB220 0x1 + #define SDVO_COLORIMETRY_YCrCb422 0x3 + #define SDVO_COLORIMETRY_YCrCb444 0x4 +#define SDVO_CMD_GET_COLORIMETRY 0x8f +#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90 +#define SDVO_CMD_SET_AUDIO_STAT 0x91 +#define SDVO_CMD_GET_AUDIO_STAT 0x92 +#define SDVO_CMD_SET_HBUF_INDEX 0x93 + #define SDVO_HBUF_INDEX_ELD 0 + #define SDVO_HBUF_INDEX_AVI_IF 1 +#define SDVO_CMD_GET_HBUF_INDEX 0x94 +#define SDVO_CMD_GET_HBUF_INFO 0x95 +#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96 +#define SDVO_CMD_GET_HBUF_AV_SPLIT 0x97 +#define SDVO_CMD_SET_HBUF_DATA 0x98 +#define SDVO_CMD_GET_HBUF_DATA 0x99 +#define SDVO_CMD_SET_HBUF_TXRATE 0x9a +#define SDVO_CMD_GET_HBUF_TXRATE 0x9b + #define SDVO_HBUF_TX_DISABLED (0 << 6) + #define SDVO_HBUF_TX_ONCE (2 << 6) + #define SDVO_HBUF_TX_VSYNC (3 << 6) +#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c +#define SDVO_NEED_TO_STALL (1 << 7) + +struct intel_sdvo_encode { + u8 dvi_rev; + u8 hdmi_rev; +} __attribute__ ((packed)); diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c new file mode 100644 index 0000000..c7d25c5 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -0,0 +1,963 @@ +/* + * Copyright © 2011 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: + * Jesse Barnes + * + * New plane/sprite handling. + * + * The older chips had a separate interface for programming plane related + * registers; newer ones are much simpler and we can use the new DRM plane + * support. + */ +#include +#include +#include +#include "intel_drv.h" +#include +#include "i915_drv.h" + +static void +vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t x, uint32_t y, + uint32_t src_w, uint32_t src_h) +{ + struct drm_device *dev = dplane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane = to_intel_plane(dplane); + int pipe = intel_plane->pipe; + int plane = intel_plane->plane; + u32 sprctl; + unsigned long sprsurf_offset, linear_offset; + int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); + + sprctl = I915_READ(SPCNTR(pipe, plane)); + + /* Mask out pixel format bits in case we change it */ + sprctl &= ~SP_PIXFORMAT_MASK; + sprctl &= ~SP_YUV_BYTE_ORDER_MASK; + sprctl &= ~SP_TILED; + + switch (fb->pixel_format) { + case DRM_FORMAT_YUYV: + sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YUYV; + break; + case DRM_FORMAT_YVYU: + sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YVYU; + break; + case DRM_FORMAT_UYVY: + sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_UYVY; + break; + case DRM_FORMAT_VYUY: + sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_VYUY; + break; + case DRM_FORMAT_RGB565: + sprctl |= SP_FORMAT_BGR565; + break; + case DRM_FORMAT_XRGB8888: + sprctl |= SP_FORMAT_BGRX8888; + break; + case DRM_FORMAT_ARGB8888: + sprctl |= SP_FORMAT_BGRA8888; + break; + case DRM_FORMAT_XBGR2101010: + sprctl |= SP_FORMAT_RGBX1010102; + break; + case DRM_FORMAT_ABGR2101010: + sprctl |= SP_FORMAT_RGBA1010102; + break; + case DRM_FORMAT_XBGR8888: + sprctl |= SP_FORMAT_RGBX8888; + break; + case DRM_FORMAT_ABGR8888: + sprctl |= SP_FORMAT_RGBA8888; + break; + default: + /* + * If we get here one of the upper layers failed to filter + * out the unsupported plane formats + */ + BUG(); + break; + } + + if (obj->tiling_mode != I915_TILING_NONE) + sprctl |= SP_TILED; + + sprctl |= SP_ENABLE; + + /* Sizes are 0 based */ + src_w--; + src_h--; + crtc_w--; + crtc_h--; + + intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size); + + I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); + I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); + + linear_offset = y * fb->pitches[0] + x * pixel_size; + sprsurf_offset = intel_gen4_compute_page_offset(&x, &y, + obj->tiling_mode, + pixel_size, + fb->pitches[0]); + linear_offset -= sprsurf_offset; + + if (obj->tiling_mode != I915_TILING_NONE) + I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x); + else + I915_WRITE(SPLINOFF(pipe, plane), linear_offset); + + I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); + I915_WRITE(SPCNTR(pipe, plane), sprctl); + I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset + + sprsurf_offset); + POSTING_READ(SPSURF(pipe, plane)); +} + +static void +vlv_disable_plane(struct drm_plane *dplane) +{ + struct drm_device *dev = dplane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane = to_intel_plane(dplane); + int pipe = intel_plane->pipe; + int plane = intel_plane->plane; + + I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) & + ~SP_ENABLE); + /* Activate double buffered register update */ + I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0); + POSTING_READ(SPSURF(pipe, plane)); +} + +static int +vlv_update_colorkey(struct drm_plane *dplane, + struct drm_intel_sprite_colorkey *key) +{ + struct drm_device *dev = dplane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane = to_intel_plane(dplane); + int pipe = intel_plane->pipe; + int plane = intel_plane->plane; + u32 sprctl; + + if (key->flags & I915_SET_COLORKEY_DESTINATION) + return -EINVAL; + + I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value); + I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value); + I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask); + + sprctl = I915_READ(SPCNTR(pipe, plane)); + sprctl &= ~SP_SOURCE_KEY; + if (key->flags & I915_SET_COLORKEY_SOURCE) + sprctl |= SP_SOURCE_KEY; + I915_WRITE(SPCNTR(pipe, plane), sprctl); + + POSTING_READ(SPKEYMSK(pipe, plane)); + + return 0; +} + +static void +vlv_get_colorkey(struct drm_plane *dplane, + struct drm_intel_sprite_colorkey *key) +{ + struct drm_device *dev = dplane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane = to_intel_plane(dplane); + int pipe = intel_plane->pipe; + int plane = intel_plane->plane; + u32 sprctl; + + key->min_value = I915_READ(SPKEYMINVAL(pipe, plane)); + key->max_value = I915_READ(SPKEYMAXVAL(pipe, plane)); + key->channel_mask = I915_READ(SPKEYMSK(pipe, plane)); + + sprctl = I915_READ(SPCNTR(pipe, plane)); + if (sprctl & SP_SOURCE_KEY) + key->flags = I915_SET_COLORKEY_SOURCE; + else + key->flags = I915_SET_COLORKEY_NONE; +} + +static void +ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t x, uint32_t y, + uint32_t src_w, uint32_t src_h) +{ + struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane = to_intel_plane(plane); + int pipe = intel_plane->pipe; + u32 sprctl, sprscale = 0; + unsigned long sprsurf_offset, linear_offset; + int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); + bool scaling_was_enabled = dev_priv->sprite_scaling_enabled; + + sprctl = I915_READ(SPRCTL(pipe)); + + /* Mask out pixel format bits in case we change it */ + sprctl &= ~SPRITE_PIXFORMAT_MASK; + sprctl &= ~SPRITE_RGB_ORDER_RGBX; + sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK; + sprctl &= ~SPRITE_TILED; + + switch (fb->pixel_format) { + case DRM_FORMAT_XBGR8888: + sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; + break; + case DRM_FORMAT_XRGB8888: + sprctl |= SPRITE_FORMAT_RGBX888; + break; + case DRM_FORMAT_YUYV: + sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV; + break; + case DRM_FORMAT_YVYU: + sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU; + break; + case DRM_FORMAT_UYVY: + sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY; + break; + case DRM_FORMAT_VYUY: + sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY; + break; + default: + BUG(); + } + + if (obj->tiling_mode != I915_TILING_NONE) + sprctl |= SPRITE_TILED; + + /* must disable */ + sprctl |= SPRITE_TRICKLE_FEED_DISABLE; + sprctl |= SPRITE_ENABLE; + + if (IS_HASWELL(dev)) + sprctl |= SPRITE_PIPE_CSC_ENABLE; + + /* Sizes are 0 based */ + src_w--; + src_h--; + crtc_w--; + crtc_h--; + + intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size); + + /* + * IVB workaround: must disable low power watermarks for at least + * one frame before enabling scaling. LP watermarks can be re-enabled + * when scaling is disabled. + */ + if (crtc_w != src_w || crtc_h != src_h) { + dev_priv->sprite_scaling_enabled |= 1 << pipe; + + if (!scaling_was_enabled) { + intel_update_watermarks(dev); + intel_wait_for_vblank(dev, pipe); + } + sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; + } else + dev_priv->sprite_scaling_enabled &= ~(1 << pipe); + + I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); + I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); + + linear_offset = y * fb->pitches[0] + x * pixel_size; + sprsurf_offset = + intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, + pixel_size, fb->pitches[0]); + linear_offset -= sprsurf_offset; + + /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET + * register */ + if (IS_HASWELL(dev)) + I915_WRITE(SPROFFSET(pipe), (y << 16) | x); + else if (obj->tiling_mode != I915_TILING_NONE) + I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x); + else + I915_WRITE(SPRLINOFF(pipe), linear_offset); + + I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); + if (intel_plane->can_scale) + I915_WRITE(SPRSCALE(pipe), sprscale); + I915_WRITE(SPRCTL(pipe), sprctl); + I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset); + POSTING_READ(SPRSURF(pipe)); + + /* potentially re-enable LP watermarks */ + if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) + intel_update_watermarks(dev); +} + +static void +ivb_disable_plane(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane = to_intel_plane(plane); + int pipe = intel_plane->pipe; + bool scaling_was_enabled = dev_priv->sprite_scaling_enabled; + + I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); + /* Can't leave the scaler enabled... */ + if (intel_plane->can_scale) + I915_WRITE(SPRSCALE(pipe), 0); + /* Activate double buffered register update */ + I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); + POSTING_READ(SPRSURF(pipe)); + + dev_priv->sprite_scaling_enabled &= ~(1 << pipe); + + /* potentially re-enable LP watermarks */ + if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) + intel_update_watermarks(dev); +} + +static int +ivb_update_colorkey(struct drm_plane *plane, + struct drm_intel_sprite_colorkey *key) +{ + struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane; + u32 sprctl; + int ret = 0; + + intel_plane = to_intel_plane(plane); + + I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value); + I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value); + I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask); + + sprctl = I915_READ(SPRCTL(intel_plane->pipe)); + sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY); + if (key->flags & I915_SET_COLORKEY_DESTINATION) + sprctl |= SPRITE_DEST_KEY; + else if (key->flags & I915_SET_COLORKEY_SOURCE) + sprctl |= SPRITE_SOURCE_KEY; + I915_WRITE(SPRCTL(intel_plane->pipe), sprctl); + + POSTING_READ(SPRKEYMSK(intel_plane->pipe)); + + return ret; +} + +static void +ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key) +{ + struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane; + u32 sprctl; + + intel_plane = to_intel_plane(plane); + + key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe)); + key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe)); + key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe)); + key->flags = 0; + + sprctl = I915_READ(SPRCTL(intel_plane->pipe)); + + if (sprctl & SPRITE_DEST_KEY) + key->flags = I915_SET_COLORKEY_DESTINATION; + else if (sprctl & SPRITE_SOURCE_KEY) + key->flags = I915_SET_COLORKEY_SOURCE; + else + key->flags = I915_SET_COLORKEY_NONE; +} + +static void +ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t x, uint32_t y, + uint32_t src_w, uint32_t src_h) +{ + struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane = to_intel_plane(plane); + int pipe = intel_plane->pipe; + unsigned long dvssurf_offset, linear_offset; + u32 dvscntr, dvsscale; + int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); + + dvscntr = I915_READ(DVSCNTR(pipe)); + + /* Mask out pixel format bits in case we change it */ + dvscntr &= ~DVS_PIXFORMAT_MASK; + dvscntr &= ~DVS_RGB_ORDER_XBGR; + dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK; + dvscntr &= ~DVS_TILED; + + switch (fb->pixel_format) { + case DRM_FORMAT_XBGR8888: + dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR; + break; + case DRM_FORMAT_XRGB8888: + dvscntr |= DVS_FORMAT_RGBX888; + break; + case DRM_FORMAT_YUYV: + dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV; + break; + case DRM_FORMAT_YVYU: + dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU; + break; + case DRM_FORMAT_UYVY: + dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY; + break; + case DRM_FORMAT_VYUY: + dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY; + break; + default: + BUG(); + } + + if (obj->tiling_mode != I915_TILING_NONE) + dvscntr |= DVS_TILED; + + if (IS_GEN6(dev)) + dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ + dvscntr |= DVS_ENABLE; + + /* Sizes are 0 based */ + src_w--; + src_h--; + crtc_w--; + crtc_h--; + + intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size); + + dvsscale = 0; + if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h) + dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; + + I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); + I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); + + linear_offset = y * fb->pitches[0] + x * pixel_size; + dvssurf_offset = + intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, + pixel_size, fb->pitches[0]); + linear_offset -= dvssurf_offset; + + if (obj->tiling_mode != I915_TILING_NONE) + I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x); + else + I915_WRITE(DVSLINOFF(pipe), linear_offset); + + I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); + I915_WRITE(DVSSCALE(pipe), dvsscale); + I915_WRITE(DVSCNTR(pipe), dvscntr); + I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset); + POSTING_READ(DVSSURF(pipe)); +} + +static void +ilk_disable_plane(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane = to_intel_plane(plane); + int pipe = intel_plane->pipe; + + I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE); + /* Disable the scaler */ + I915_WRITE(DVSSCALE(pipe), 0); + /* Flush double buffered register updates */ + I915_MODIFY_DISPBASE(DVSSURF(pipe), 0); + POSTING_READ(DVSSURF(pipe)); +} + +static void +intel_enable_primary(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int reg = DSPCNTR(intel_crtc->plane); + + if (!intel_crtc->primary_disabled) + return; + + intel_crtc->primary_disabled = false; + intel_update_fbc(dev); + + I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE); +} + +static void +intel_disable_primary(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int reg = DSPCNTR(intel_crtc->plane); + + if (intel_crtc->primary_disabled) + return; + + I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE); + + intel_crtc->primary_disabled = true; + intel_update_fbc(dev); +} + +static int +ilk_update_colorkey(struct drm_plane *plane, + struct drm_intel_sprite_colorkey *key) +{ + struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane; + u32 dvscntr; + int ret = 0; + + intel_plane = to_intel_plane(plane); + + I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value); + I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value); + I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask); + + dvscntr = I915_READ(DVSCNTR(intel_plane->pipe)); + dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY); + if (key->flags & I915_SET_COLORKEY_DESTINATION) + dvscntr |= DVS_DEST_KEY; + else if (key->flags & I915_SET_COLORKEY_SOURCE) + dvscntr |= DVS_SOURCE_KEY; + I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr); + + POSTING_READ(DVSKEYMSK(intel_plane->pipe)); + + return ret; +} + +static void +ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key) +{ + struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane; + u32 dvscntr; + + intel_plane = to_intel_plane(plane); + + key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe)); + key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe)); + key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe)); + key->flags = 0; + + dvscntr = I915_READ(DVSCNTR(intel_plane->pipe)); + + if (dvscntr & DVS_DEST_KEY) + key->flags = I915_SET_COLORKEY_DESTINATION; + else if (dvscntr & DVS_SOURCE_KEY) + key->flags = I915_SET_COLORKEY_SOURCE; + else + key->flags = I915_SET_COLORKEY_NONE; +} + +static int +intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_plane *intel_plane = to_intel_plane(plane); + struct intel_framebuffer *intel_fb; + struct drm_i915_gem_object *obj, *old_obj; + int pipe = intel_plane->pipe; + enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, + pipe); + int ret = 0; + int x = src_x >> 16, y = src_y >> 16; + int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay; + bool disable_primary = false; + + intel_fb = to_intel_framebuffer(fb); + obj = intel_fb->obj; + + old_obj = intel_plane->obj; + + intel_plane->crtc_x = crtc_x; + intel_plane->crtc_y = crtc_y; + intel_plane->crtc_w = crtc_w; + intel_plane->crtc_h = crtc_h; + intel_plane->src_x = src_x; + intel_plane->src_y = src_y; + intel_plane->src_w = src_w; + intel_plane->src_h = src_h; + + src_w = src_w >> 16; + src_h = src_h >> 16; + + /* Pipe must be running... */ + if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) + return -EINVAL; + + if (crtc_x >= primary_w || crtc_y >= primary_h) + return -EINVAL; + + /* Don't modify another pipe's plane */ + if (intel_plane->pipe != intel_crtc->pipe) + return -EINVAL; + + /* Sprite planes can be linear or x-tiled surfaces */ + switch (obj->tiling_mode) { + case I915_TILING_NONE: + case I915_TILING_X: + break; + default: + return -EINVAL; + } + + /* + * Clamp the width & height into the visible area. Note we don't + * try to scale the source if part of the visible region is offscreen. + * The caller must handle that by adjusting source offset and size. + */ + if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) { + crtc_w += crtc_x; + crtc_x = 0; + } + if ((crtc_x + crtc_w) <= 0) /* Nothing to display */ + goto out; + if ((crtc_x + crtc_w) > primary_w) + crtc_w = primary_w - crtc_x; + + if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) { + crtc_h += crtc_y; + crtc_y = 0; + } + if ((crtc_y + crtc_h) <= 0) /* Nothing to display */ + goto out; + if (crtc_y + crtc_h > primary_h) + crtc_h = primary_h - crtc_y; + + if (!crtc_w || !crtc_h) /* Again, nothing to display */ + goto out; + + /* + * We may not have a scaler, eg. HSW does not have it any more + */ + if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h)) + return -EINVAL; + + /* + * We can take a larger source and scale it down, but + * only so much... 16x is the max on SNB. + */ + if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale) + return -EINVAL; + + /* + * If the sprite is completely covering the primary plane, + * we can disable the primary and save power. + */ + if ((crtc_x == 0) && (crtc_y == 0) && + (crtc_w == primary_w) && (crtc_h == primary_h)) + disable_primary = true; + + mutex_lock(&dev->struct_mutex); + + /* Note that this will apply the VT-d workaround for scanouts, + * which is more restrictive than required for sprites. (The + * primary plane requires 256KiB alignment with 64 PTE padding, + * the sprite planes only require 128KiB alignment and 32 PTE padding. + */ + ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); + if (ret) + goto out_unlock; + + intel_plane->obj = obj; + + /* + * Be sure to re-enable the primary before the sprite is no longer + * covering it fully. + */ + if (!disable_primary) + intel_enable_primary(crtc); + + intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y, + crtc_w, crtc_h, x, y, src_w, src_h); + + if (disable_primary) + intel_disable_primary(crtc); + + /* Unpin old obj after new one is active to avoid ugliness */ + if (old_obj) { + /* + * It's fairly common to simply update the position of + * an existing object. In that case, we don't need to + * wait for vblank to avoid ugliness, we only need to + * do the pin & ref bookkeeping. + */ + if (old_obj != obj) { + mutex_unlock(&dev->struct_mutex); + intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); + mutex_lock(&dev->struct_mutex); + } + intel_unpin_fb_obj(old_obj); + } + +out_unlock: + mutex_unlock(&dev->struct_mutex); +out: + return ret; +} + +static int +intel_disable_plane(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct intel_plane *intel_plane = to_intel_plane(plane); + int ret = 0; + + if (plane->crtc) + intel_enable_primary(plane->crtc); + intel_plane->disable_plane(plane); + + if (!intel_plane->obj) + goto out; + + intel_wait_for_vblank(dev, intel_plane->pipe); + + mutex_lock(&dev->struct_mutex); + intel_unpin_fb_obj(intel_plane->obj); + intel_plane->obj = NULL; + mutex_unlock(&dev->struct_mutex); +out: + + return ret; +} + +static void intel_destroy_plane(struct drm_plane *plane) +{ + struct intel_plane *intel_plane = to_intel_plane(plane); + intel_disable_plane(plane); + drm_plane_cleanup(plane); + kfree(intel_plane); +} + +int intel_sprite_set_colorkey(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_intel_sprite_colorkey *set = data; + struct drm_mode_object *obj; + struct drm_plane *plane; + struct intel_plane *intel_plane; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + /* Make sure we don't try to enable both src & dest simultaneously */ + if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) + return -EINVAL; + + drm_modeset_lock_all(dev); + + obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE); + if (!obj) { + ret = -EINVAL; + goto out_unlock; + } + + plane = obj_to_plane(obj); + intel_plane = to_intel_plane(plane); + ret = intel_plane->update_colorkey(plane, set); + +out_unlock: + drm_modeset_unlock_all(dev); + return ret; +} + +int intel_sprite_get_colorkey(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_intel_sprite_colorkey *get = data; + struct drm_mode_object *obj; + struct drm_plane *plane; + struct intel_plane *intel_plane; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + + drm_modeset_lock_all(dev); + + obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE); + if (!obj) { + ret = -EINVAL; + goto out_unlock; + } + + plane = obj_to_plane(obj); + intel_plane = to_intel_plane(plane); + intel_plane->get_colorkey(plane, get); + +out_unlock: + drm_modeset_unlock_all(dev); + return ret; +} + +void intel_plane_restore(struct drm_plane *plane) +{ + struct intel_plane *intel_plane = to_intel_plane(plane); + + if (!plane->crtc || !plane->fb) + return; + + intel_update_plane(plane, plane->crtc, plane->fb, + intel_plane->crtc_x, intel_plane->crtc_y, + intel_plane->crtc_w, intel_plane->crtc_h, + intel_plane->src_x, intel_plane->src_y, + intel_plane->src_w, intel_plane->src_h); +} + +static const struct drm_plane_funcs intel_plane_funcs = { + .update_plane = intel_update_plane, + .disable_plane = intel_disable_plane, + .destroy = intel_destroy_plane, +}; + +static uint32_t ilk_plane_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, +}; + +static uint32_t snb_plane_formats[] = { + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, +}; + +static uint32_t vlv_plane_formats[] = { + DRM_FORMAT_RGB565, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, +}; + +int +intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) +{ + struct intel_plane *intel_plane; + unsigned long possible_crtcs; + const uint32_t *plane_formats; + int num_plane_formats; + int ret; + + if (INTEL_INFO(dev)->gen < 5) + return -ENODEV; + + intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL); + if (!intel_plane) + return -ENOMEM; + + switch (INTEL_INFO(dev)->gen) { + case 5: + case 6: + intel_plane->can_scale = true; + intel_plane->max_downscale = 16; + intel_plane->update_plane = ilk_update_plane; + intel_plane->disable_plane = ilk_disable_plane; + intel_plane->update_colorkey = ilk_update_colorkey; + intel_plane->get_colorkey = ilk_get_colorkey; + + if (IS_GEN6(dev)) { + plane_formats = snb_plane_formats; + num_plane_formats = ARRAY_SIZE(snb_plane_formats); + } else { + plane_formats = ilk_plane_formats; + num_plane_formats = ARRAY_SIZE(ilk_plane_formats); + } + break; + + case 7: + if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev)) + intel_plane->can_scale = false; + else + intel_plane->can_scale = true; + + if (IS_VALLEYVIEW(dev)) { + intel_plane->max_downscale = 1; + intel_plane->update_plane = vlv_update_plane; + intel_plane->disable_plane = vlv_disable_plane; + intel_plane->update_colorkey = vlv_update_colorkey; + intel_plane->get_colorkey = vlv_get_colorkey; + + plane_formats = vlv_plane_formats; + num_plane_formats = ARRAY_SIZE(vlv_plane_formats); + } else { + intel_plane->max_downscale = 2; + intel_plane->update_plane = ivb_update_plane; + intel_plane->disable_plane = ivb_disable_plane; + intel_plane->update_colorkey = ivb_update_colorkey; + intel_plane->get_colorkey = ivb_get_colorkey; + + plane_formats = snb_plane_formats; + num_plane_formats = ARRAY_SIZE(snb_plane_formats); + } + break; + + default: + kfree(intel_plane); + return -ENODEV; + } + + intel_plane->pipe = pipe; + intel_plane->plane = plane; + possible_crtcs = (1 << pipe); + ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs, + &intel_plane_funcs, + plane_formats, num_plane_formats, + false); + if (ret) + kfree(intel_plane); + + return ret; +} diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c new file mode 100644 index 0000000..a202d8d --- /dev/null +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -0,0 +1,1681 @@ +/* + * Copyright © 2006-2008 Intel Corporation + * Jesse Barnes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +/** @file + * Integrated TV-out support for the 915GM and 945GM. + */ + +#include +#include +#include +#include "intel_drv.h" +#include +#include "i915_drv.h" + +enum tv_margin { + TV_MARGIN_LEFT, TV_MARGIN_TOP, + TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM +}; + +/** Private structure for the integrated TV support */ +struct intel_tv { + struct intel_encoder base; + + int type; + const char *tv_format; + int margin[4]; + u32 save_TV_H_CTL_1; + u32 save_TV_H_CTL_2; + u32 save_TV_H_CTL_3; + u32 save_TV_V_CTL_1; + u32 save_TV_V_CTL_2; + u32 save_TV_V_CTL_3; + u32 save_TV_V_CTL_4; + u32 save_TV_V_CTL_5; + u32 save_TV_V_CTL_6; + u32 save_TV_V_CTL_7; + u32 save_TV_SC_CTL_1, save_TV_SC_CTL_2, save_TV_SC_CTL_3; + + u32 save_TV_CSC_Y; + u32 save_TV_CSC_Y2; + u32 save_TV_CSC_U; + u32 save_TV_CSC_U2; + u32 save_TV_CSC_V; + u32 save_TV_CSC_V2; + u32 save_TV_CLR_KNOBS; + u32 save_TV_CLR_LEVEL; + u32 save_TV_WIN_POS; + u32 save_TV_WIN_SIZE; + u32 save_TV_FILTER_CTL_1; + u32 save_TV_FILTER_CTL_2; + u32 save_TV_FILTER_CTL_3; + + u32 save_TV_H_LUMA[60]; + u32 save_TV_H_CHROMA[60]; + u32 save_TV_V_LUMA[43]; + u32 save_TV_V_CHROMA[43]; + + u32 save_TV_DAC; + u32 save_TV_CTL; +}; + +struct video_levels { + int blank, black, burst; +}; + +struct color_conversion { + u16 ry, gy, by, ay; + u16 ru, gu, bu, au; + u16 rv, gv, bv, av; +}; + +static const u32 filter_table[] = { + 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140, + 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000, + 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160, + 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780, + 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50, + 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20, + 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0, + 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0, + 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020, + 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140, + 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20, + 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848, + 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900, + 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080, + 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060, + 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140, + 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000, + 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160, + 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780, + 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50, + 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20, + 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0, + 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0, + 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020, + 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140, + 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20, + 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848, + 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900, + 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080, + 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060, + 0x36403000, 0x2D002CC0, 0x30003640, 0x2D0036C0, + 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540, + 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00, + 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000, + 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00, + 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40, + 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240, + 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00, + 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0, + 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840, + 0x28003100, 0x28002F00, 0x00003100, 0x36403000, + 0x2D002CC0, 0x30003640, 0x2D0036C0, + 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540, + 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00, + 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000, + 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00, + 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40, + 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240, + 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00, + 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0, + 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840, + 0x28003100, 0x28002F00, 0x00003100, +}; + +/* + * Color conversion values have 3 separate fixed point formats: + * + * 10 bit fields (ay, au) + * 1.9 fixed point (b.bbbbbbbbb) + * 11 bit fields (ry, by, ru, gu, gv) + * exp.mantissa (ee.mmmmmmmmm) + * ee = 00 = 10^-1 (0.mmmmmmmmm) + * ee = 01 = 10^-2 (0.0mmmmmmmmm) + * ee = 10 = 10^-3 (0.00mmmmmmmmm) + * ee = 11 = 10^-4 (0.000mmmmmmmmm) + * 12 bit fields (gy, rv, bu) + * exp.mantissa (eee.mmmmmmmmm) + * eee = 000 = 10^-1 (0.mmmmmmmmm) + * eee = 001 = 10^-2 (0.0mmmmmmmmm) + * eee = 010 = 10^-3 (0.00mmmmmmmmm) + * eee = 011 = 10^-4 (0.000mmmmmmmmm) + * eee = 100 = reserved + * eee = 101 = reserved + * eee = 110 = reserved + * eee = 111 = 10^0 (m.mmmmmmmm) (only usable for 1.0 representation) + * + * Saturation and contrast are 8 bits, with their own representation: + * 8 bit field (saturation, contrast) + * exp.mantissa (ee.mmmmmm) + * ee = 00 = 10^-1 (0.mmmmmm) + * ee = 01 = 10^0 (m.mmmmm) + * ee = 10 = 10^1 (mm.mmmm) + * ee = 11 = 10^2 (mmm.mmm) + * + * Simple conversion function: + * + * static u32 + * float_to_csc_11(float f) + * { + * u32 exp; + * u32 mant; + * u32 ret; + * + * if (f < 0) + * f = -f; + * + * if (f >= 1) { + * exp = 0x7; + * mant = 1 << 8; + * } else { + * for (exp = 0; exp < 3 && f < 0.5; exp++) + * f *= 2.0; + * mant = (f * (1 << 9) + 0.5); + * if (mant >= (1 << 9)) + * mant = (1 << 9) - 1; + * } + * ret = (exp << 9) | mant; + * return ret; + * } + */ + +/* + * Behold, magic numbers! If we plant them they might grow a big + * s-video cable to the sky... or something. + * + * Pre-converted to appropriate hex value. + */ + +/* + * PAL & NTSC values for composite & s-video connections + */ +static const struct color_conversion ntsc_m_csc_composite = { + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, + .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, + .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, +}; + +static const struct video_levels ntsc_m_levels_composite = { + .blank = 225, .black = 267, .burst = 113, +}; + +static const struct color_conversion ntsc_m_csc_svideo = { + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, + .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, + .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, +}; + +static const struct video_levels ntsc_m_levels_svideo = { + .blank = 266, .black = 316, .burst = 133, +}; + +static const struct color_conversion ntsc_j_csc_composite = { + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119, + .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200, + .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200, +}; + +static const struct video_levels ntsc_j_levels_composite = { + .blank = 225, .black = 225, .burst = 113, +}; + +static const struct color_conversion ntsc_j_csc_svideo = { + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c, + .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200, + .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200, +}; + +static const struct video_levels ntsc_j_levels_svideo = { + .blank = 266, .black = 266, .burst = 133, +}; + +static const struct color_conversion pal_csc_composite = { + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113, + .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200, + .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200, +}; + +static const struct video_levels pal_levels_composite = { + .blank = 237, .black = 237, .burst = 118, +}; + +static const struct color_conversion pal_csc_svideo = { + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, + .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200, + .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200, +}; + +static const struct video_levels pal_levels_svideo = { + .blank = 280, .black = 280, .burst = 139, +}; + +static const struct color_conversion pal_m_csc_composite = { + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, + .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, + .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, +}; + +static const struct video_levels pal_m_levels_composite = { + .blank = 225, .black = 267, .burst = 113, +}; + +static const struct color_conversion pal_m_csc_svideo = { + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, + .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, + .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, +}; + +static const struct video_levels pal_m_levels_svideo = { + .blank = 266, .black = 316, .burst = 133, +}; + +static const struct color_conversion pal_n_csc_composite = { + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, + .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, + .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, +}; + +static const struct video_levels pal_n_levels_composite = { + .blank = 225, .black = 267, .burst = 118, +}; + +static const struct color_conversion pal_n_csc_svideo = { + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, + .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, + .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, +}; + +static const struct video_levels pal_n_levels_svideo = { + .blank = 266, .black = 316, .burst = 139, +}; + +/* + * Component connections + */ +static const struct color_conversion sdtv_csc_yprpb = { + .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, + .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200, + .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200, +}; + +static const struct color_conversion sdtv_csc_rgb = { + .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166, + .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166, + .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166, +}; + +static const struct color_conversion hdtv_csc_yprpb = { + .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145, + .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200, + .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200, +}; + +static const struct color_conversion hdtv_csc_rgb = { + .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166, + .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166, + .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166, +}; + +static const struct video_levels component_levels = { + .blank = 279, .black = 279, .burst = 0, +}; + + +struct tv_mode { + const char *name; + int clock; + int refresh; /* in millihertz (for precision) */ + u32 oversample; + int hsync_end, hblank_start, hblank_end, htotal; + bool progressive, trilevel_sync, component_only; + int vsync_start_f1, vsync_start_f2, vsync_len; + bool veq_ena; + int veq_start_f1, veq_start_f2, veq_len; + int vi_end_f1, vi_end_f2, nbr_end; + bool burst_ena; + int hburst_start, hburst_len; + int vburst_start_f1, vburst_end_f1; + int vburst_start_f2, vburst_end_f2; + int vburst_start_f3, vburst_end_f3; + int vburst_start_f4, vburst_end_f4; + /* + * subcarrier programming + */ + int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc; + u32 sc_reset; + bool pal_burst; + /* + * blank/black levels + */ + const struct video_levels *composite_levels, *svideo_levels; + const struct color_conversion *composite_color, *svideo_color; + const u32 *filter_table; + int max_srcw; +}; + + +/* + * Sub carrier DDA + * + * I think this works as follows: + * + * subcarrier freq = pixel_clock * (dda1_inc + dda2_inc / dda2_size) / 4096 + * + * Presumably, when dda3 is added in, it gets to adjust the dda2_inc value + * + * So, + * dda1_ideal = subcarrier/pixel * 4096 + * dda1_inc = floor (dda1_ideal) + * dda2 = dda1_ideal - dda1_inc + * + * then pick a ratio for dda2 that gives the closest approximation. If + * you can't get close enough, you can play with dda3 as well. This + * seems likely to happen when dda2 is small as the jumps would be larger + * + * To invert this, + * + * pixel_clock = subcarrier * 4096 / (dda1_inc + dda2_inc / dda2_size) + * + * The constants below were all computed using a 107.520MHz clock + */ + +/** + * Register programming values for TV modes. + * + * These values account for -1s required. + */ + +static const struct tv_mode tv_modes[] = { + { + .name = "NTSC-M", + .clock = 108000, + .refresh = 59940, + .oversample = TV_OVERSAMPLE_8X, + .component_only = 0, + /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ + + .hsync_end = 64, .hblank_end = 124, + .hblank_start = 836, .htotal = 857, + + .progressive = false, .trilevel_sync = false, + + .vsync_start_f1 = 6, .vsync_start_f2 = 7, + .vsync_len = 6, + + .veq_ena = true, .veq_start_f1 = 0, + .veq_start_f2 = 1, .veq_len = 18, + + .vi_end_f1 = 20, .vi_end_f2 = 21, + .nbr_end = 240, + + .burst_ena = true, + .hburst_start = 72, .hburst_len = 34, + .vburst_start_f1 = 9, .vburst_end_f1 = 240, + .vburst_start_f2 = 10, .vburst_end_f2 = 240, + .vburst_start_f3 = 9, .vburst_end_f3 = 240, + .vburst_start_f4 = 10, .vburst_end_f4 = 240, + + /* desired 3.5800000 actual 3.5800000 clock 107.52 */ + .dda1_inc = 135, + .dda2_inc = 20800, .dda2_size = 27456, + .dda3_inc = 0, .dda3_size = 0, + .sc_reset = TV_SC_RESET_EVERY_4, + .pal_burst = false, + + .composite_levels = &ntsc_m_levels_composite, + .composite_color = &ntsc_m_csc_composite, + .svideo_levels = &ntsc_m_levels_svideo, + .svideo_color = &ntsc_m_csc_svideo, + + .filter_table = filter_table, + }, + { + .name = "NTSC-443", + .clock = 108000, + .refresh = 59940, + .oversample = TV_OVERSAMPLE_8X, + .component_only = 0, + /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */ + .hsync_end = 64, .hblank_end = 124, + .hblank_start = 836, .htotal = 857, + + .progressive = false, .trilevel_sync = false, + + .vsync_start_f1 = 6, .vsync_start_f2 = 7, + .vsync_len = 6, + + .veq_ena = true, .veq_start_f1 = 0, + .veq_start_f2 = 1, .veq_len = 18, + + .vi_end_f1 = 20, .vi_end_f2 = 21, + .nbr_end = 240, + + .burst_ena = true, + .hburst_start = 72, .hburst_len = 34, + .vburst_start_f1 = 9, .vburst_end_f1 = 240, + .vburst_start_f2 = 10, .vburst_end_f2 = 240, + .vburst_start_f3 = 9, .vburst_end_f3 = 240, + .vburst_start_f4 = 10, .vburst_end_f4 = 240, + + /* desired 4.4336180 actual 4.4336180 clock 107.52 */ + .dda1_inc = 168, + .dda2_inc = 4093, .dda2_size = 27456, + .dda3_inc = 310, .dda3_size = 525, + .sc_reset = TV_SC_RESET_NEVER, + .pal_burst = false, + + .composite_levels = &ntsc_m_levels_composite, + .composite_color = &ntsc_m_csc_composite, + .svideo_levels = &ntsc_m_levels_svideo, + .svideo_color = &ntsc_m_csc_svideo, + + .filter_table = filter_table, + }, + { + .name = "NTSC-J", + .clock = 108000, + .refresh = 59940, + .oversample = TV_OVERSAMPLE_8X, + .component_only = 0, + + /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ + .hsync_end = 64, .hblank_end = 124, + .hblank_start = 836, .htotal = 857, + + .progressive = false, .trilevel_sync = false, + + .vsync_start_f1 = 6, .vsync_start_f2 = 7, + .vsync_len = 6, + + .veq_ena = true, .veq_start_f1 = 0, + .veq_start_f2 = 1, .veq_len = 18, + + .vi_end_f1 = 20, .vi_end_f2 = 21, + .nbr_end = 240, + + .burst_ena = true, + .hburst_start = 72, .hburst_len = 34, + .vburst_start_f1 = 9, .vburst_end_f1 = 240, + .vburst_start_f2 = 10, .vburst_end_f2 = 240, + .vburst_start_f3 = 9, .vburst_end_f3 = 240, + .vburst_start_f4 = 10, .vburst_end_f4 = 240, + + /* desired 3.5800000 actual 3.5800000 clock 107.52 */ + .dda1_inc = 135, + .dda2_inc = 20800, .dda2_size = 27456, + .dda3_inc = 0, .dda3_size = 0, + .sc_reset = TV_SC_RESET_EVERY_4, + .pal_burst = false, + + .composite_levels = &ntsc_j_levels_composite, + .composite_color = &ntsc_j_csc_composite, + .svideo_levels = &ntsc_j_levels_svideo, + .svideo_color = &ntsc_j_csc_svideo, + + .filter_table = filter_table, + }, + { + .name = "PAL-M", + .clock = 108000, + .refresh = 59940, + .oversample = TV_OVERSAMPLE_8X, + .component_only = 0, + + /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ + .hsync_end = 64, .hblank_end = 124, + .hblank_start = 836, .htotal = 857, + + .progressive = false, .trilevel_sync = false, + + .vsync_start_f1 = 6, .vsync_start_f2 = 7, + .vsync_len = 6, + + .veq_ena = true, .veq_start_f1 = 0, + .veq_start_f2 = 1, .veq_len = 18, + + .vi_end_f1 = 20, .vi_end_f2 = 21, + .nbr_end = 240, + + .burst_ena = true, + .hburst_start = 72, .hburst_len = 34, + .vburst_start_f1 = 9, .vburst_end_f1 = 240, + .vburst_start_f2 = 10, .vburst_end_f2 = 240, + .vburst_start_f3 = 9, .vburst_end_f3 = 240, + .vburst_start_f4 = 10, .vburst_end_f4 = 240, + + /* desired 3.5800000 actual 3.5800000 clock 107.52 */ + .dda1_inc = 135, + .dda2_inc = 16704, .dda2_size = 27456, + .dda3_inc = 0, .dda3_size = 0, + .sc_reset = TV_SC_RESET_EVERY_8, + .pal_burst = true, + + .composite_levels = &pal_m_levels_composite, + .composite_color = &pal_m_csc_composite, + .svideo_levels = &pal_m_levels_svideo, + .svideo_color = &pal_m_csc_svideo, + + .filter_table = filter_table, + }, + { + /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ + .name = "PAL-N", + .clock = 108000, + .refresh = 50000, + .oversample = TV_OVERSAMPLE_8X, + .component_only = 0, + + .hsync_end = 64, .hblank_end = 128, + .hblank_start = 844, .htotal = 863, + + .progressive = false, .trilevel_sync = false, + + + .vsync_start_f1 = 6, .vsync_start_f2 = 7, + .vsync_len = 6, + + .veq_ena = true, .veq_start_f1 = 0, + .veq_start_f2 = 1, .veq_len = 18, + + .vi_end_f1 = 24, .vi_end_f2 = 25, + .nbr_end = 286, + + .burst_ena = true, + .hburst_start = 73, .hburst_len = 34, + .vburst_start_f1 = 8, .vburst_end_f1 = 285, + .vburst_start_f2 = 8, .vburst_end_f2 = 286, + .vburst_start_f3 = 9, .vburst_end_f3 = 286, + .vburst_start_f4 = 9, .vburst_end_f4 = 285, + + + /* desired 4.4336180 actual 4.4336180 clock 107.52 */ + .dda1_inc = 135, + .dda2_inc = 23578, .dda2_size = 27648, + .dda3_inc = 134, .dda3_size = 625, + .sc_reset = TV_SC_RESET_EVERY_8, + .pal_burst = true, + + .composite_levels = &pal_n_levels_composite, + .composite_color = &pal_n_csc_composite, + .svideo_levels = &pal_n_levels_svideo, + .svideo_color = &pal_n_csc_svideo, + + .filter_table = filter_table, + }, + { + /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ + .name = "PAL", + .clock = 108000, + .refresh = 50000, + .oversample = TV_OVERSAMPLE_8X, + .component_only = 0, + + .hsync_end = 64, .hblank_end = 142, + .hblank_start = 844, .htotal = 863, + + .progressive = false, .trilevel_sync = false, + + .vsync_start_f1 = 5, .vsync_start_f2 = 6, + .vsync_len = 5, + + .veq_ena = true, .veq_start_f1 = 0, + .veq_start_f2 = 1, .veq_len = 15, + + .vi_end_f1 = 24, .vi_end_f2 = 25, + .nbr_end = 286, + + .burst_ena = true, + .hburst_start = 73, .hburst_len = 32, + .vburst_start_f1 = 8, .vburst_end_f1 = 285, + .vburst_start_f2 = 8, .vburst_end_f2 = 286, + .vburst_start_f3 = 9, .vburst_end_f3 = 286, + .vburst_start_f4 = 9, .vburst_end_f4 = 285, + + /* desired 4.4336180 actual 4.4336180 clock 107.52 */ + .dda1_inc = 168, + .dda2_inc = 4122, .dda2_size = 27648, + .dda3_inc = 67, .dda3_size = 625, + .sc_reset = TV_SC_RESET_EVERY_8, + .pal_burst = true, + + .composite_levels = &pal_levels_composite, + .composite_color = &pal_csc_composite, + .svideo_levels = &pal_levels_svideo, + .svideo_color = &pal_csc_svideo, + + .filter_table = filter_table, + }, + { + .name = "480p", + .clock = 107520, + .refresh = 59940, + .oversample = TV_OVERSAMPLE_4X, + .component_only = 1, + + .hsync_end = 64, .hblank_end = 122, + .hblank_start = 842, .htotal = 857, + + .progressive = true, .trilevel_sync = false, + + .vsync_start_f1 = 12, .vsync_start_f2 = 12, + .vsync_len = 12, + + .veq_ena = false, + + .vi_end_f1 = 44, .vi_end_f2 = 44, + .nbr_end = 479, + + .burst_ena = false, + + .filter_table = filter_table, + }, + { + .name = "576p", + .clock = 107520, + .refresh = 50000, + .oversample = TV_OVERSAMPLE_4X, + .component_only = 1, + + .hsync_end = 64, .hblank_end = 139, + .hblank_start = 859, .htotal = 863, + + .progressive = true, .trilevel_sync = false, + + .vsync_start_f1 = 10, .vsync_start_f2 = 10, + .vsync_len = 10, + + .veq_ena = false, + + .vi_end_f1 = 48, .vi_end_f2 = 48, + .nbr_end = 575, + + .burst_ena = false, + + .filter_table = filter_table, + }, + { + .name = "720p@60Hz", + .clock = 148800, + .refresh = 60000, + .oversample = TV_OVERSAMPLE_2X, + .component_only = 1, + + .hsync_end = 80, .hblank_end = 300, + .hblank_start = 1580, .htotal = 1649, + + .progressive = true, .trilevel_sync = true, + + .vsync_start_f1 = 10, .vsync_start_f2 = 10, + .vsync_len = 10, + + .veq_ena = false, + + .vi_end_f1 = 29, .vi_end_f2 = 29, + .nbr_end = 719, + + .burst_ena = false, + + .filter_table = filter_table, + }, + { + .name = "720p@50Hz", + .clock = 148800, + .refresh = 50000, + .oversample = TV_OVERSAMPLE_2X, + .component_only = 1, + + .hsync_end = 80, .hblank_end = 300, + .hblank_start = 1580, .htotal = 1979, + + .progressive = true, .trilevel_sync = true, + + .vsync_start_f1 = 10, .vsync_start_f2 = 10, + .vsync_len = 10, + + .veq_ena = false, + + .vi_end_f1 = 29, .vi_end_f2 = 29, + .nbr_end = 719, + + .burst_ena = false, + + .filter_table = filter_table, + .max_srcw = 800 + }, + { + .name = "1080i@50Hz", + .clock = 148800, + .refresh = 50000, + .oversample = TV_OVERSAMPLE_2X, + .component_only = 1, + + .hsync_end = 88, .hblank_end = 235, + .hblank_start = 2155, .htotal = 2639, + + .progressive = false, .trilevel_sync = true, + + .vsync_start_f1 = 4, .vsync_start_f2 = 5, + .vsync_len = 10, + + .veq_ena = true, .veq_start_f1 = 4, + .veq_start_f2 = 4, .veq_len = 10, + + + .vi_end_f1 = 21, .vi_end_f2 = 22, + .nbr_end = 539, + + .burst_ena = false, + + .filter_table = filter_table, + }, + { + .name = "1080i@60Hz", + .clock = 148800, + .refresh = 60000, + .oversample = TV_OVERSAMPLE_2X, + .component_only = 1, + + .hsync_end = 88, .hblank_end = 235, + .hblank_start = 2155, .htotal = 2199, + + .progressive = false, .trilevel_sync = true, + + .vsync_start_f1 = 4, .vsync_start_f2 = 5, + .vsync_len = 10, + + .veq_ena = true, .veq_start_f1 = 4, + .veq_start_f2 = 4, .veq_len = 10, + + + .vi_end_f1 = 21, .vi_end_f2 = 22, + .nbr_end = 539, + + .burst_ena = false, + + .filter_table = filter_table, + }, +}; + +static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder) +{ + return container_of(encoder, struct intel_tv, base.base); +} + +static struct intel_tv *intel_attached_tv(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_tv, + base); +} + +static bool +intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 tmp = I915_READ(TV_CTL); + + if (!(tmp & TV_ENC_ENABLE)) + return false; + + *pipe = PORT_TO_PIPE(tmp); + + return true; +} + +static void +intel_enable_tv(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); +} + +static void +intel_disable_tv(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE); +} + +static const struct tv_mode * +intel_tv_mode_lookup(const char *tv_format) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tv_modes); i++) { + const struct tv_mode *tv_mode = &tv_modes[i]; + + if (!strcmp(tv_format, tv_mode->name)) + return tv_mode; + } + return NULL; +} + +static const struct tv_mode * +intel_tv_mode_find(struct intel_tv *intel_tv) +{ + return intel_tv_mode_lookup(intel_tv->tv_format); +} + +static enum drm_mode_status +intel_tv_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct intel_tv *intel_tv = intel_attached_tv(connector); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); + + /* Ensure TV refresh is close to desired refresh */ + if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) + < 1000) + return MODE_OK; + + return MODE_CLOCK_RANGE; +} + + +static bool +intel_tv_compute_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + struct intel_tv *intel_tv = enc_to_intel_tv(&encoder->base); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); + + if (!tv_mode) + return false; + + if (intel_encoder_check_is_cloned(&intel_tv->base)) + return false; + + pipe_config->adjusted_mode.clock = tv_mode->clock; + DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); + pipe_config->pipe_bpp = 8*3; + + /* TV has it's own notion of sync and other mode flags, so clear them. */ + pipe_config->adjusted_mode.flags = 0; + + /* + * FIXME: We don't check whether the input mode is actually what we want + * or whether userspace is doing something stupid. + */ + + return true; +} + +static void +intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = encoder->crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); + u32 tv_ctl; + u32 hctl1, hctl2, hctl3; + u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; + u32 scctl1, scctl2, scctl3; + int i, j; + const struct video_levels *video_levels; + const struct color_conversion *color_conversion; + bool burst_ena; + int pipe = intel_crtc->pipe; + + if (!tv_mode) + return; /* can't happen (mode_prepare prevents this) */ + + tv_ctl = I915_READ(TV_CTL); + tv_ctl &= TV_CTL_SAVE; + + switch (intel_tv->type) { + default: + case DRM_MODE_CONNECTOR_Unknown: + case DRM_MODE_CONNECTOR_Composite: + tv_ctl |= TV_ENC_OUTPUT_COMPOSITE; + video_levels = tv_mode->composite_levels; + color_conversion = tv_mode->composite_color; + burst_ena = tv_mode->burst_ena; + break; + case DRM_MODE_CONNECTOR_Component: + tv_ctl |= TV_ENC_OUTPUT_COMPONENT; + video_levels = &component_levels; + if (tv_mode->burst_ena) + color_conversion = &sdtv_csc_yprpb; + else + color_conversion = &hdtv_csc_yprpb; + burst_ena = false; + break; + case DRM_MODE_CONNECTOR_SVIDEO: + tv_ctl |= TV_ENC_OUTPUT_SVIDEO; + video_levels = tv_mode->svideo_levels; + color_conversion = tv_mode->svideo_color; + burst_ena = tv_mode->burst_ena; + break; + } + hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) | + (tv_mode->htotal << TV_HTOTAL_SHIFT); + + hctl2 = (tv_mode->hburst_start << 16) | + (tv_mode->hburst_len << TV_HBURST_LEN_SHIFT); + + if (burst_ena) + hctl2 |= TV_BURST_ENA; + + hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) | + (tv_mode->hblank_end << TV_HBLANK_END_SHIFT); + + vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) | + (tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) | + (tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT); + + vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) | + (tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) | + (tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT); + + vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) | + (tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) | + (tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT); + + if (tv_mode->veq_ena) + vctl3 |= TV_EQUAL_ENA; + + vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) | + (tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT); + + vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) | + (tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT); + + vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) | + (tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT); + + vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) | + (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT); + + if (intel_crtc->pipe == 1) + tv_ctl |= TV_ENC_PIPEB_SELECT; + tv_ctl |= tv_mode->oversample; + + if (tv_mode->progressive) + tv_ctl |= TV_PROGRESSIVE; + if (tv_mode->trilevel_sync) + tv_ctl |= TV_TRILEVEL_SYNC; + if (tv_mode->pal_burst) + tv_ctl |= TV_PAL_BURST; + + scctl1 = 0; + if (tv_mode->dda1_inc) + scctl1 |= TV_SC_DDA1_EN; + if (tv_mode->dda2_inc) + scctl1 |= TV_SC_DDA2_EN; + if (tv_mode->dda3_inc) + scctl1 |= TV_SC_DDA3_EN; + scctl1 |= tv_mode->sc_reset; + if (video_levels) + scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; + scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; + + scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | + tv_mode->dda2_inc << TV_SCDDA2_INC_SHIFT; + + scctl3 = tv_mode->dda3_size << TV_SCDDA3_SIZE_SHIFT | + tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT; + + /* Enable two fixes for the chips that need them. */ + if (dev->pci_device < 0x2772) + tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX; + + I915_WRITE(TV_H_CTL_1, hctl1); + I915_WRITE(TV_H_CTL_2, hctl2); + I915_WRITE(TV_H_CTL_3, hctl3); + I915_WRITE(TV_V_CTL_1, vctl1); + I915_WRITE(TV_V_CTL_2, vctl2); + I915_WRITE(TV_V_CTL_3, vctl3); + I915_WRITE(TV_V_CTL_4, vctl4); + I915_WRITE(TV_V_CTL_5, vctl5); + I915_WRITE(TV_V_CTL_6, vctl6); + I915_WRITE(TV_V_CTL_7, vctl7); + I915_WRITE(TV_SC_CTL_1, scctl1); + I915_WRITE(TV_SC_CTL_2, scctl2); + I915_WRITE(TV_SC_CTL_3, scctl3); + + if (color_conversion) { + I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) | + color_conversion->gy); + I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) | + color_conversion->ay); + I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) | + color_conversion->gu); + I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) | + color_conversion->au); + I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) | + color_conversion->gv); + I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) | + color_conversion->av); + } + + if (INTEL_INFO(dev)->gen >= 4) + I915_WRITE(TV_CLR_KNOBS, 0x00404000); + else + I915_WRITE(TV_CLR_KNOBS, 0x00606000); + + if (video_levels) + I915_WRITE(TV_CLR_LEVEL, + ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | + (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); + { + int pipeconf_reg = PIPECONF(pipe); + int dspcntr_reg = DSPCNTR(intel_crtc->plane); + int pipeconf = I915_READ(pipeconf_reg); + int dspcntr = I915_READ(dspcntr_reg); + int xpos = 0x0, ypos = 0x0; + unsigned int xsize, ysize; + /* Pipe must be off here */ + I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); + intel_flush_display_plane(dev_priv, intel_crtc->plane); + + /* Wait for vblank for the disable to take effect */ + if (IS_GEN2(dev)) + intel_wait_for_vblank(dev, intel_crtc->pipe); + + I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE); + /* Wait for vblank for the disable to take effect. */ + intel_wait_for_pipe_off(dev, intel_crtc->pipe); + + /* Filter ctl must be set before TV_WIN_SIZE */ + I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE); + xsize = tv_mode->hblank_start - tv_mode->hblank_end; + if (tv_mode->progressive) + ysize = tv_mode->nbr_end + 1; + else + ysize = 2*tv_mode->nbr_end + 1; + + xpos += intel_tv->margin[TV_MARGIN_LEFT]; + ypos += intel_tv->margin[TV_MARGIN_TOP]; + xsize -= (intel_tv->margin[TV_MARGIN_LEFT] + + intel_tv->margin[TV_MARGIN_RIGHT]); + ysize -= (intel_tv->margin[TV_MARGIN_TOP] + + intel_tv->margin[TV_MARGIN_BOTTOM]); + I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos); + I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize); + + I915_WRITE(pipeconf_reg, pipeconf); + I915_WRITE(dspcntr_reg, dspcntr); + intel_flush_display_plane(dev_priv, intel_crtc->plane); + } + + j = 0; + for (i = 0; i < 60; i++) + I915_WRITE(TV_H_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); + for (i = 0; i < 60; i++) + I915_WRITE(TV_H_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); + for (i = 0; i < 43; i++) + I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); + for (i = 0; i < 43; i++) + I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); + I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE); + I915_WRITE(TV_CTL, tv_ctl); +} + +static const struct drm_display_mode reported_modes[] = { + { + .name = "NTSC 480i", + .clock = 107520, + .hdisplay = 1280, + .hsync_start = 1368, + .hsync_end = 1496, + .htotal = 1712, + + .vdisplay = 1024, + .vsync_start = 1027, + .vsync_end = 1034, + .vtotal = 1104, + .type = DRM_MODE_TYPE_DRIVER, + }, +}; + +/** + * Detects TV presence by checking for load. + * + * Requires that the current pipe's DPLL is active. + + * \return true if TV is connected. + * \return false if TV is disconnected. + */ +static int +intel_tv_detect_type(struct intel_tv *intel_tv, + struct drm_connector *connector) +{ + struct drm_encoder *encoder = &intel_tv->base.base; + struct drm_crtc *crtc = encoder->crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long irqflags; + u32 tv_ctl, save_tv_ctl; + u32 tv_dac, save_tv_dac; + int type; + + /* Disable TV interrupts around load detect or we'll recurse */ + if (connector->polled & DRM_CONNECTOR_POLL_HPD) { + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_disable_pipestat(dev_priv, 0, + PIPE_HOTPLUG_INTERRUPT_ENABLE | + PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + } + + save_tv_dac = tv_dac = I915_READ(TV_DAC); + save_tv_ctl = tv_ctl = I915_READ(TV_CTL); + + /* Poll for TV detection */ + tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK); + tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; + if (intel_crtc->pipe == 1) + tv_ctl |= TV_ENC_PIPEB_SELECT; + else + tv_ctl &= ~TV_ENC_PIPEB_SELECT; + + tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK); + tv_dac |= (TVDAC_STATE_CHG_EN | + TVDAC_A_SENSE_CTL | + TVDAC_B_SENSE_CTL | + TVDAC_C_SENSE_CTL | + DAC_CTL_OVERRIDE | + DAC_A_0_7_V | + DAC_B_0_7_V | + DAC_C_0_7_V); + + + /* + * The TV sense state should be cleared to zero on cantiga platform. Otherwise + * the TV is misdetected. This is hardware requirement. + */ + if (IS_GM45(dev)) + tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL | + TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL); + + I915_WRITE(TV_CTL, tv_ctl); + I915_WRITE(TV_DAC, tv_dac); + POSTING_READ(TV_DAC); + + intel_wait_for_vblank(intel_tv->base.base.dev, + to_intel_crtc(intel_tv->base.base.crtc)->pipe); + + type = -1; + tv_dac = I915_READ(TV_DAC); + DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac); + /* + * A B C + * 0 1 1 Composite + * 1 0 X svideo + * 0 0 0 Component + */ + if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { + DRM_DEBUG_KMS("Detected Composite TV connection\n"); + type = DRM_MODE_CONNECTOR_Composite; + } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { + DRM_DEBUG_KMS("Detected S-Video TV connection\n"); + type = DRM_MODE_CONNECTOR_SVIDEO; + } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { + DRM_DEBUG_KMS("Detected Component TV connection\n"); + type = DRM_MODE_CONNECTOR_Component; + } else { + DRM_DEBUG_KMS("Unrecognised TV connection\n"); + type = -1; + } + + I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); + I915_WRITE(TV_CTL, save_tv_ctl); + POSTING_READ(TV_CTL); + + /* For unknown reasons the hw barfs if we don't do this vblank wait. */ + intel_wait_for_vblank(intel_tv->base.base.dev, + to_intel_crtc(intel_tv->base.base.crtc)->pipe); + + /* Restore interrupt config */ + if (connector->polled & DRM_CONNECTOR_POLL_HPD) { + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_enable_pipestat(dev_priv, 0, + PIPE_HOTPLUG_INTERRUPT_ENABLE | + PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + } + + return type; +} + +/* + * Here we set accurate tv format according to connector type + * i.e Component TV should not be assigned by NTSC or PAL + */ +static void intel_tv_find_better_format(struct drm_connector *connector) +{ + struct intel_tv *intel_tv = intel_attached_tv(connector); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); + int i; + + if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) == + tv_mode->component_only) + return; + + + for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) { + tv_mode = tv_modes + i; + + if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) == + tv_mode->component_only) + break; + } + + intel_tv->tv_format = tv_mode->name; + drm_object_property_set_value(&connector->base, + connector->dev->mode_config.tv_mode_property, i); +} + +/** + * Detect the TV connection. + * + * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure + * we have a pipe programmed in order to probe the TV. + */ +static enum drm_connector_status +intel_tv_detect(struct drm_connector *connector, bool force) +{ + struct drm_display_mode mode; + struct intel_tv *intel_tv = intel_attached_tv(connector); + int type; + + mode = reported_modes[0]; + + if (force) { + struct intel_load_detect_pipe tmp; + + if (intel_get_load_detect_pipe(connector, &mode, &tmp)) { + type = intel_tv_detect_type(intel_tv, connector); + intel_release_load_detect_pipe(connector, &tmp); + } else + return connector_status_unknown; + } else + return connector->status; + + if (type < 0) + return connector_status_disconnected; + + intel_tv->type = type; + intel_tv_find_better_format(connector); + + return connector_status_connected; +} + +static const struct input_res { + const char *name; + int w, h; +} input_res_table[] = { + {"640x480", 640, 480}, + {"800x600", 800, 600}, + {"1024x768", 1024, 768}, + {"1280x1024", 1280, 1024}, + {"848x480", 848, 480}, + {"1280x720", 1280, 720}, + {"1920x1080", 1920, 1080}, +}; + +/* + * Chose preferred mode according to line number of TV format + */ +static void +intel_tv_chose_preferred_modes(struct drm_connector *connector, + struct drm_display_mode *mode_ptr) +{ + struct intel_tv *intel_tv = intel_attached_tv(connector); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); + + if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) + mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; + else if (tv_mode->nbr_end > 480) { + if (tv_mode->progressive == true && tv_mode->nbr_end < 720) { + if (mode_ptr->vdisplay == 720) + mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; + } else if (mode_ptr->vdisplay == 1080) + mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; + } +} + +/** + * Stub get_modes function. + * + * This should probably return a set of fixed modes, unless we can figure out + * how to probe modes off of TV connections. + */ + +static int +intel_tv_get_modes(struct drm_connector *connector) +{ + struct drm_display_mode *mode_ptr; + struct intel_tv *intel_tv = intel_attached_tv(connector); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); + int j, count = 0; + u64 tmp; + + for (j = 0; j < ARRAY_SIZE(input_res_table); + j++) { + const struct input_res *input = &input_res_table[j]; + unsigned int hactive_s = input->w; + unsigned int vactive_s = input->h; + + if (tv_mode->max_srcw && input->w > tv_mode->max_srcw) + continue; + + if (input->w > 1024 && (!tv_mode->progressive + && !tv_mode->component_only)) + continue; + + mode_ptr = drm_mode_create(connector->dev); + if (!mode_ptr) + continue; + strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); + + mode_ptr->hdisplay = hactive_s; + mode_ptr->hsync_start = hactive_s + 1; + mode_ptr->hsync_end = hactive_s + 64; + if (mode_ptr->hsync_end <= mode_ptr->hsync_start) + mode_ptr->hsync_end = mode_ptr->hsync_start + 1; + mode_ptr->htotal = hactive_s + 96; + + mode_ptr->vdisplay = vactive_s; + mode_ptr->vsync_start = vactive_s + 1; + mode_ptr->vsync_end = vactive_s + 32; + if (mode_ptr->vsync_end <= mode_ptr->vsync_start) + mode_ptr->vsync_end = mode_ptr->vsync_start + 1; + mode_ptr->vtotal = vactive_s + 33; + + tmp = (u64) tv_mode->refresh * mode_ptr->vtotal; + tmp *= mode_ptr->htotal; + tmp = div_u64(tmp, 1000000); + mode_ptr->clock = (int) tmp; + + mode_ptr->type = DRM_MODE_TYPE_DRIVER; + intel_tv_chose_preferred_modes(connector, mode_ptr); + drm_mode_probed_add(connector, mode_ptr); + count++; + } + + return count; +} + +static void +intel_tv_destroy(struct drm_connector *connector) +{ + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + + +static int +intel_tv_set_property(struct drm_connector *connector, struct drm_property *property, + uint64_t val) +{ + struct drm_device *dev = connector->dev; + struct intel_tv *intel_tv = intel_attached_tv(connector); + struct drm_crtc *crtc = intel_tv->base.base.crtc; + int ret = 0; + bool changed = false; + + ret = drm_object_property_set_value(&connector->base, property, val); + if (ret < 0) + goto out; + + if (property == dev->mode_config.tv_left_margin_property && + intel_tv->margin[TV_MARGIN_LEFT] != val) { + intel_tv->margin[TV_MARGIN_LEFT] = val; + changed = true; + } else if (property == dev->mode_config.tv_right_margin_property && + intel_tv->margin[TV_MARGIN_RIGHT] != val) { + intel_tv->margin[TV_MARGIN_RIGHT] = val; + changed = true; + } else if (property == dev->mode_config.tv_top_margin_property && + intel_tv->margin[TV_MARGIN_TOP] != val) { + intel_tv->margin[TV_MARGIN_TOP] = val; + changed = true; + } else if (property == dev->mode_config.tv_bottom_margin_property && + intel_tv->margin[TV_MARGIN_BOTTOM] != val) { + intel_tv->margin[TV_MARGIN_BOTTOM] = val; + changed = true; + } else if (property == dev->mode_config.tv_mode_property) { + if (val >= ARRAY_SIZE(tv_modes)) { + ret = -EINVAL; + goto out; + } + if (!strcmp(intel_tv->tv_format, tv_modes[val].name)) + goto out; + + intel_tv->tv_format = tv_modes[val].name; + changed = true; + } else { + ret = -EINVAL; + goto out; + } + + if (changed && crtc) + intel_crtc_restore_mode(crtc); +out: + return ret; +} + +static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { + .mode_set = intel_tv_mode_set, +}; + +static const struct drm_connector_funcs intel_tv_connector_funcs = { + .dpms = intel_connector_dpms, + .detect = intel_tv_detect, + .destroy = intel_tv_destroy, + .set_property = intel_tv_set_property, + .fill_modes = drm_helper_probe_single_connector_modes, +}; + +static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { + .mode_valid = intel_tv_mode_valid, + .get_modes = intel_tv_get_modes, + .best_encoder = intel_best_encoder, +}; + +static const struct drm_encoder_funcs intel_tv_enc_funcs = { + .destroy = intel_encoder_destroy, +}; + +/* + * Enumerate the child dev array parsed from VBT to check whether + * the integrated TV is present. + * If it is present, return 1. + * If it is not present, return false. + * If no child dev is parsed from VBT, it assumes that the TV is present. + */ +static int tv_is_present_in_vbt(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct child_device_config *p_child; + int i, ret; + + if (!dev_priv->child_dev_num) + return 1; + + ret = 0; + for (i = 0; i < dev_priv->child_dev_num; i++) { + p_child = dev_priv->child_dev + i; + /* + * If the device type is not TV, continue. + */ + if (p_child->device_type != DEVICE_TYPE_INT_TV && + p_child->device_type != DEVICE_TYPE_TV) + continue; + /* Only when the addin_offset is non-zero, it is regarded + * as present. + */ + if (p_child->addin_offset) { + ret = 1; + break; + } + } + return ret; +} + +void +intel_tv_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_connector *connector; + struct intel_tv *intel_tv; + struct intel_encoder *intel_encoder; + struct intel_connector *intel_connector; + u32 tv_dac_on, tv_dac_off, save_tv_dac; + char *tv_format_names[ARRAY_SIZE(tv_modes)]; + int i, initial_mode = 0; + + if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) + return; + + if (!tv_is_present_in_vbt(dev)) { + DRM_DEBUG_KMS("Integrated TV is not present.\n"); + return; + } + /* Even if we have an encoder we may not have a connector */ + if (!dev_priv->int_tv_support) + return; + + /* + * Sanity check the TV output by checking to see if the + * DAC register holds a value + */ + save_tv_dac = I915_READ(TV_DAC); + + I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN); + tv_dac_on = I915_READ(TV_DAC); + + I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); + tv_dac_off = I915_READ(TV_DAC); + + I915_WRITE(TV_DAC, save_tv_dac); + + /* + * If the register does not hold the state change enable + * bit, (either as a 0 or a 1), assume it doesn't really + * exist + */ + if ((tv_dac_on & TVDAC_STATE_CHG_EN) == 0 || + (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) + return; + + intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL); + if (!intel_tv) { + return; + } + + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(intel_tv); + return; + } + + intel_encoder = &intel_tv->base; + connector = &intel_connector->base; + + /* The documentation, for the older chipsets at least, recommend + * using a polling method rather than hotplug detection for TVs. + * This is because in order to perform the hotplug detection, the PLLs + * for the TV must be kept alive increasing power drain and starving + * bandwidth from other encoders. Notably for instance, it causes + * pipe underruns on Crestline when this encoder is supposedly idle. + * + * More recent chipsets favour HDMI rather than integrated S-Video. + */ + intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; + + drm_connector_init(dev, connector, &intel_tv_connector_funcs, + DRM_MODE_CONNECTOR_SVIDEO); + + drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, + DRM_MODE_ENCODER_TVDAC); + + intel_encoder->compute_config = intel_tv_compute_config; + intel_encoder->enable = intel_enable_tv; + intel_encoder->disable = intel_disable_tv; + intel_encoder->get_hw_state = intel_tv_get_hw_state; + intel_connector->get_hw_state = intel_connector_get_hw_state; + + intel_connector_attach_encoder(intel_connector, intel_encoder); + intel_encoder->type = INTEL_OUTPUT_TVOUT; + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + intel_encoder->cloneable = false; + intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); + intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT); + intel_tv->type = DRM_MODE_CONNECTOR_Unknown; + + /* BIOS margin values */ + intel_tv->margin[TV_MARGIN_LEFT] = 54; + intel_tv->margin[TV_MARGIN_TOP] = 36; + intel_tv->margin[TV_MARGIN_RIGHT] = 46; + intel_tv->margin[TV_MARGIN_BOTTOM] = 37; + + intel_tv->tv_format = tv_modes[initial_mode].name; + + drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs); + drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + + /* Create TV properties then attach current values */ + for (i = 0; i < ARRAY_SIZE(tv_modes); i++) + tv_format_names[i] = (char *)tv_modes[i].name; + drm_mode_create_tv_properties(dev, + ARRAY_SIZE(tv_modes), + tv_format_names); + + drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property, + initial_mode); + drm_object_attach_property(&connector->base, + dev->mode_config.tv_left_margin_property, + intel_tv->margin[TV_MARGIN_LEFT]); + drm_object_attach_property(&connector->base, + dev->mode_config.tv_top_margin_property, + intel_tv->margin[TV_MARGIN_TOP]); + drm_object_attach_property(&connector->base, + dev->mode_config.tv_right_margin_property, + intel_tv->margin[TV_MARGIN_RIGHT]); + drm_object_attach_property(&connector->base, + dev->mode_config.tv_bottom_margin_property, + intel_tv->margin[TV_MARGIN_BOTTOM]); + drm_sysfs_connector_add(connector); +} -- cgit v1.2.3