summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon')
-rw-r--r--drivers/gpu/drm/radeon/.gitignore3
-rw-r--r--drivers/gpu/drm/radeon/Kconfig8
-rw-r--r--drivers/gpu/drm/radeon/Makefile87
-rw-r--r--drivers/gpu/drm/radeon/ObjectID.h696
-rw-r--r--drivers/gpu/drm/radeon/atom-bits.h48
-rw-r--r--drivers/gpu/drm/radeon/atom-names.h100
-rw-r--r--drivers/gpu/drm/radeon/atom-types.h42
-rw-r--r--drivers/gpu/drm/radeon/atom.c1416
-rw-r--r--drivers/gpu/drm/radeon/atom.h159
-rw-r--r--drivers/gpu/drm/radeon/atombios.h8012
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1929
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c925
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2673
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c141
-rw-r--r--drivers/gpu/drm/radeon/avivod.h62
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.c374
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.h35
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c5082
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c729
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.c357
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.h35
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c3514
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c299
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h240
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h2100
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c725
-rw-r--r--drivers/gpu/drm/radeon/ni.c2479
-rw-r--r--drivers/gpu/drm/radeon/ni_reg.h86
-rw-r--r--drivers/gpu/drm/radeon/nid.h703
-rw-r--r--drivers/gpu/drm/radeon/r100.c4113
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h97
-rw-r--r--drivers/gpu/drm/radeon/r100d.h869
-rw-r--r--drivers/gpu/drm/radeon/r200.c548
-rw-r--r--drivers/gpu/drm/radeon/r300.c1558
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c1186
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h1789
-rw-r--r--drivers/gpu/drm/radeon/r300d.h343
-rw-r--r--drivers/gpu/drm/radeon/r420.c491
-rw-r--r--drivers/gpu/drm/radeon/r420d.h249
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h800
-rw-r--r--drivers/gpu/drm/radeon/r520.c329
-rw-r--r--drivers/gpu/drm/radeon/r520d.h187
-rw-r--r--drivers/gpu/drm/radeon/r600.c4833
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c193
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c843
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c785
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c719
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.h39
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2660
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c2621
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c534
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h174
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2002
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2079
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c649
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.h445
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c285
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2097
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h556
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c3315
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c560
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c250
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c696
-rw-r--r--drivers/gpu/drm/radeon/radeon_blit_common.h44
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c912
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c3619
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c2051
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c2243
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c811
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c316
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1549
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c1666
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c505
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h2164
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c384
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h116
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c405
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c949
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c1287
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c627
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c1188
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c424
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c402
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c480
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c762
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c1077
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c1810
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c923
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c302
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h761
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c642
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h181
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c874
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c98
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h3724
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c896
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c420
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c119
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c3261
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c533
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h82
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace_points.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c899
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c869
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman642
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen644
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r100105
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r200186
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r300714
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r420780
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r600755
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rn5030
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs600780
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv515496
-rw-r--r--drivers/gpu/drm/radeon/rs100d.h40
-rw-r--r--drivers/gpu/drm/radeon/rs400.c569
-rw-r--r--drivers/gpu/drm/radeon/rs400d.h160
-rw-r--r--drivers/gpu/drm/radeon/rs600.c1078
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h685
-rw-r--r--drivers/gpu/drm/radeon/rs690.c807
-rw-r--r--drivers/gpu/drm/radeon/rs690d.h313
-rw-r--r--drivers/gpu/drm/radeon/rv200d.h36
-rw-r--r--drivers/gpu/drm/radeon/rv250d.h123
-rw-r--r--drivers/gpu/drm/radeon/rv350d.h52
-rw-r--r--drivers/gpu/drm/radeon/rv515.c1257
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h638
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2194
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h717
-rw-r--r--drivers/gpu/drm/radeon/si.c5771
-rw-r--r--drivers/gpu/drm/radeon/si_blit_shaders.c253
-rw-r--r--drivers/gpu/drm/radeon/si_blit_shaders.h32
-rw-r--r--drivers/gpu/drm/radeon/si_reg.h105
-rw-r--r--drivers/gpu/drm/radeon/sid.h1115
133 files changed, 133414 insertions, 0 deletions
diff --git a/drivers/gpu/drm/radeon/.gitignore b/drivers/gpu/drm/radeon/.gitignore
new file mode 100644
index 0000000..403eb3a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/.gitignore
@@ -0,0 +1,3 @@
+mkregtable
+*_reg_safe.h
+
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
new file mode 100644
index 0000000..970f8e9
--- /dev/null
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -0,0 +1,8 @@
+config DRM_RADEON_UMS
+ bool "Enable userspace modesetting on radeon (DEPRECATED)"
+ depends on DRM_RADEON
+ help
+ Choose this option if you still need userspace modesetting.
+
+ Userspace modesetting is deprecated for quite some time now, so
+ enable this only if you have ancient versions of the DDX drivers.
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
new file mode 100644
index 0000000..c338cea
--- /dev/null
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -0,0 +1,87 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+
+hostprogs-y := mkregtable
+clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
+
+quiet_cmd_mkregtable = MKREGTABLE $@
+ cmd_mkregtable = $(obj)/mkregtable $< > $@
+
+$(obj)/rn50_reg_safe.h: $(src)/reg_srcs/rn50 $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
+$(obj)/r100_reg_safe.h: $(src)/reg_srcs/r100 $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
+$(obj)/r200_reg_safe.h: $(src)/reg_srcs/r200 $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
+$(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
+$(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
+$(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
+$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
+$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
+$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
+$(obj)/cayman_reg_safe.h: $(src)/reg_srcs/cayman $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
+$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
+
+$(obj)/r200.o: $(obj)/r200_reg_safe.h
+
+$(obj)/rv515.o: $(obj)/rv515_reg_safe.h
+
+$(obj)/r300.o: $(obj)/r300_reg_safe.h
+
+$(obj)/r420.o: $(obj)/r420_reg_safe.h
+
+$(obj)/rs600.o: $(obj)/rs600_reg_safe.h
+
+$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
+
+$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h
+
+radeon-y := radeon_drv.o
+
+# add UMS driver
+radeon-$(CPTCFG_DRM_RADEON_UMS)+= radeon_cp.o radeon_state.o radeon_mem.o \
+ radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o
+
+# add KMS driver
+radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
+ radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
+ atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \
+ radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \
+ radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \
+ radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
+ radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
+ rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
+ r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
+ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
+ evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
+ evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
+ atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
+ si_blit_shaders.o radeon_prime.o radeon_uvd.o
+
+radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
+radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
+radeon-$(CONFIG_ACPI) += radeon_acpi.o
+
+obj-$(CPTCFG_DRM_RADEON)+= radeon.o
+
+CFLAGS_radeon_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
new file mode 100644
index 0000000..ca4b038
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -0,0 +1,696 @@
+/*
+* Copyright 2006-2007 Advanced Micro Devices, Inc.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a
+* copy of this software and associated documentation files (the "Software"),
+* to deal in the Software without restriction, including without limitation
+* the rights to use, copy, modify, merge, publish, distribute, sublicense,
+* and/or sell copies of the Software, and to permit persons to whom the
+* Software is furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+* OTHER DEALINGS IN THE SOFTWARE.
+*/
+/* based on stg/asic_reg/drivers/inc/asic_reg/ObjectID.h ver 23 */
+
+#ifndef _OBJECTID_H
+#define _OBJECTID_H
+
+#if defined(_X86_)
+#pragma pack(1)
+#endif
+
+/****************************************************/
+/* Graphics Object Type Definition */
+/****************************************************/
+#define GRAPH_OBJECT_TYPE_NONE 0x0
+#define GRAPH_OBJECT_TYPE_GPU 0x1
+#define GRAPH_OBJECT_TYPE_ENCODER 0x2
+#define GRAPH_OBJECT_TYPE_CONNECTOR 0x3
+#define GRAPH_OBJECT_TYPE_ROUTER 0x4
+/* deleted */
+#define GRAPH_OBJECT_TYPE_DISPLAY_PATH 0x6
+#define GRAPH_OBJECT_TYPE_GENERIC 0x7
+
+/****************************************************/
+/* Encoder Object ID Definition */
+/****************************************************/
+#define ENCODER_OBJECT_ID_NONE 0x00
+
+/* Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_INTERNAL_LVDS 0x01
+#define ENCODER_OBJECT_ID_INTERNAL_TMDS1 0x02
+#define ENCODER_OBJECT_ID_INTERNAL_TMDS2 0x03
+#define ENCODER_OBJECT_ID_INTERNAL_DAC1 0x04
+#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */
+#define ENCODER_OBJECT_ID_INTERNAL_SDVOA 0x06
+#define ENCODER_OBJECT_ID_INTERNAL_SDVOB 0x07
+
+/* External Third Party Encoders */
+#define ENCODER_OBJECT_ID_SI170B 0x08
+#define ENCODER_OBJECT_ID_CH7303 0x09
+#define ENCODER_OBJECT_ID_CH7301 0x0A
+#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA 0x0C
+#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB 0x0D
+#define ENCODER_OBJECT_ID_TITFP513 0x0E
+#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */
+#define ENCODER_OBJECT_ID_VT1623 0x10
+#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11
+#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12
+#define ENCODER_OBJECT_ID_ALMOND 0x22
+#define ENCODER_OBJECT_ID_TRAVIS 0x23
+#define ENCODER_OBJECT_ID_NUTMEG 0x22
+/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 0x15
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */
+#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */
+#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */
+#define ENCODER_OBJECT_ID_INTERNAL_DDI 0x19
+#define ENCODER_OBJECT_ID_VT1625 0x1A
+#define ENCODER_OBJECT_ID_HDMI_SI1932 0x1B
+#define ENCODER_OBJECT_ID_DP_AN9801 0x1C
+#define ENCODER_OBJECT_ID_DP_DP501 0x1D
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY 0x1E
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA 0x1F
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 0x20
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 0x21
+#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
+
+#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
+
+/****************************************************/
+/* Connector Object ID Definition */
+/****************************************************/
+#define CONNECTOR_OBJECT_ID_NONE 0x00
+#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I 0x01
+#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I 0x02
+#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 0x03
+#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D 0x04
+#define CONNECTOR_OBJECT_ID_VGA 0x05
+#define CONNECTOR_OBJECT_ID_COMPOSITE 0x06
+#define CONNECTOR_OBJECT_ID_SVIDEO 0x07
+#define CONNECTOR_OBJECT_ID_YPbPr 0x08
+#define CONNECTOR_OBJECT_ID_D_CONNECTOR 0x09
+#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */
+#define CONNECTOR_OBJECT_ID_SCART 0x0B
+#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A 0x0C
+#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B 0x0D
+#define CONNECTOR_OBJECT_ID_LVDS 0x0E
+#define CONNECTOR_OBJECT_ID_7PIN_DIN 0x0F
+#define CONNECTOR_OBJECT_ID_PCIE_CONNECTOR 0x10
+#define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11
+#define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12
+#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13
+#define CONNECTOR_OBJECT_ID_eDP 0x14
+#define CONNECTOR_OBJECT_ID_MXM 0x15
+#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
+
+/* deleted */
+
+/****************************************************/
+/* Router Object ID Definition */
+/****************************************************/
+#define ROUTER_OBJECT_ID_NONE 0x00
+#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01
+
+/****************************************************/
+/* Generic Object ID Definition */
+/****************************************************/
+#define GENERIC_OBJECT_ID_NONE 0x00
+#define GENERIC_OBJECT_ID_GLSYNC 0x01
+#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02
+#define GENERIC_OBJECT_ID_MXM_OPM 0x03
+#define GENERIC_OBJECT_ID_STEREO_PIN 0x04 //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
+
+/****************************************************/
+/* Graphics Object ENUM ID Definition */
+/****************************************************/
+#define GRAPH_OBJECT_ENUM_ID1 0x01
+#define GRAPH_OBJECT_ENUM_ID2 0x02
+#define GRAPH_OBJECT_ENUM_ID3 0x03
+#define GRAPH_OBJECT_ENUM_ID4 0x04
+#define GRAPH_OBJECT_ENUM_ID5 0x05
+#define GRAPH_OBJECT_ENUM_ID6 0x06
+#define GRAPH_OBJECT_ENUM_ID7 0x07
+
+/****************************************************/
+/* Graphics Object ID Bit definition */
+/****************************************************/
+#define OBJECT_ID_MASK 0x00FF
+#define ENUM_ID_MASK 0x0700
+#define RESERVED1_ID_MASK 0x0800
+#define OBJECT_TYPE_MASK 0x7000
+#define RESERVED2_ID_MASK 0x8000
+
+#define OBJECT_ID_SHIFT 0x00
+#define ENUM_ID_SHIFT 0x08
+#define OBJECT_TYPE_SHIFT 0x0C
+
+
+/****************************************************/
+/* Graphics Object family definition */
+/****************************************************/
+#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
+ GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT)
+/****************************************************/
+/* GPU Object ID definition - Shared with BIOS */
+/****************************************************/
+#define GPU_ENUM_ID1 ( GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
+
+/****************************************************/
+/* Encoder Object ID definition - Shared with BIOS */
+/****************************************************/
+/*
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 0x2102
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 0x2103
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1 0x2104
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1 0x2105
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 0x2106
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 0x2107
+#define ENCODER_SIL170B_ENUM_ID1 0x2108
+#define ENCODER_CH7303_ENUM_ID1 0x2109
+#define ENCODER_CH7301_ENUM_ID1 0x210A
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1 0x210B
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 0x210C
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 0x210D
+#define ENCODER_TITFP513_ENUM_ID1 0x210E
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 0x210F
+#define ENCODER_VT1623_ENUM_ID1 0x2110
+#define ENCODER_HDMI_SI1930_ENUM_ID1 0x2111
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1 0x2112
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 0x2113
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 0x2114
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 0x2115
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116
+#define ENCODER_SI178_ENUM_ID1 0x2117
+#define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118
+#define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119
+#define ENCODER_VT1625_ENUM_ID1 0x211A
+#define ENCODER_HDMI_SI1932_ENUM_ID1 0x211B
+#define ENCODER_ENCODER_DP_AN9801_ENUM_ID1 0x211C
+#define ENCODER_DP_DP501_ENUM_ID1 0x211D
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E
+*/
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+#define ENCODER_SIL170B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7303_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7301_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_TITFP513_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_VT1623_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1930_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) // Shared with CV/TV and CRT
+
+#define ENCODER_SI178_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
+
+#define ENCODER_MVPU_FPGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DDI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
+
+#define ENCODER_VT1625_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1932_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_DP501_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_AN9801_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
+
+#define ENCODER_ALMOND_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_ALMOND_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_NUTMEG_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT)
+
+#define ENCODER_VCE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_VCE << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Connector Object ID definition - Shared with BIOS */
+/****************************************************/
+/*
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 0x3101
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 0x3102
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 0x3103
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 0x3104
+#define CONNECTOR_VGA_ENUM_ID1 0x3105
+#define CONNECTOR_COMPOSITE_ENUM_ID1 0x3106
+#define CONNECTOR_SVIDEO_ENUM_ID1 0x3107
+#define CONNECTOR_YPbPr_ENUM_ID1 0x3108
+#define CONNECTOR_D_CONNECTORE_ENUM_ID1 0x3109
+#define CONNECTOR_9PIN_DIN_ENUM_ID1 0x310A
+#define CONNECTOR_SCART_ENUM_ID1 0x310B
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 0x310C
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 0x310D
+#define CONNECTOR_LVDS_ENUM_ID1 0x310E
+#define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110
+*/
+#define CONNECTOR_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_MXM_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_A
+
+#define CONNECTOR_MXM_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_B
+
+#define CONNECTOR_MXM_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_C
+
+#define CONNECTOR_MXM_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_D
+
+#define CONNECTOR_MXM_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_TXxx
+
+#define CONNECTOR_MXM_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_UXxx
+
+#define CONNECTOR_MXM_ENUM_ID7 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC
+
+#define CONNECTOR_LVDS_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Router Object ID definition - Shared with BIOS */
+/****************************************************/
+#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
+
+/* deleted */
+
+/****************************************************/
+/* Generic Object ID definition - Shared with BIOS */
+/****************************************************/
+#define GENERICOBJECT_GLSYNC_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_GLSYNC << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID2 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_MXM_OPM_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_STEREO_PIN_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Object Cap definition - Shared with BIOS */
+/****************************************************/
+#define GRAPHICS_OBJECT_CAP_I2C 0x00000001L
+#define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L
+
+
+#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01
+#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02
+#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03
+
+#if defined(_X86_)
+#pragma pack()
+#endif
+
+#endif /*GRAPHICTYPE */
+
+
+
+
diff --git a/drivers/gpu/drm/radeon/atom-bits.h b/drivers/gpu/drm/radeon/atom-bits.h
new file mode 100644
index 0000000..e8fae5c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom-bits.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_BITS_H
+#define ATOM_BITS_H
+
+static inline uint8_t get_u8(void *bios, int ptr)
+{
+ return ((unsigned char *)bios)[ptr];
+}
+#define U8(ptr) get_u8(ctx->ctx->bios, (ptr))
+#define CU8(ptr) get_u8(ctx->bios, (ptr))
+static inline uint16_t get_u16(void *bios, int ptr)
+{
+ return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
+}
+#define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
+#define CU16(ptr) get_u16(ctx->bios, (ptr))
+static inline uint32_t get_u32(void *bios, int ptr)
+{
+ return get_u16(bios, ptr)|(((uint32_t)get_u16(bios, ptr+2))<<16);
+}
+#define U32(ptr) get_u32(ctx->ctx->bios, (ptr))
+#define CU32(ptr) get_u32(ctx->bios, (ptr))
+#define CSTR(ptr) (((char *)(ctx->bios))+(ptr))
+
+#endif
diff --git a/drivers/gpu/drm/radeon/atom-names.h b/drivers/gpu/drm/radeon/atom-names.h
new file mode 100644
index 0000000..6f907a5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom-names.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_NAMES_H
+#define ATOM_NAMES_H
+
+#include "atom.h"
+
+#ifdef ATOM_DEBUG
+
+#define ATOM_OP_NAMES_CNT 123
+static char *atom_op_names[ATOM_OP_NAMES_CNT] = {
+"RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL",
+"MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC",
+"OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG",
+"SHIFT_LEFT_PS", "SHIFT_LEFT_WS", "SHIFT_LEFT_FB", "SHIFT_LEFT_PLL",
+"SHIFT_LEFT_MC", "SHIFT_RIGHT_REG", "SHIFT_RIGHT_PS", "SHIFT_RIGHT_WS",
+"SHIFT_RIGHT_FB", "SHIFT_RIGHT_PLL", "SHIFT_RIGHT_MC", "MUL_REG",
+"MUL_PS", "MUL_WS", "MUL_FB", "MUL_PLL", "MUL_MC", "DIV_REG", "DIV_PS",
+"DIV_WS", "DIV_FB", "DIV_PLL", "DIV_MC", "ADD_REG", "ADD_PS", "ADD_WS",
+"ADD_FB", "ADD_PLL", "ADD_MC", "SUB_REG", "SUB_PS", "SUB_WS", "SUB_FB",
+"SUB_PLL", "SUB_MC", "SET_ATI_PORT", "SET_PCI_PORT", "SET_SYS_IO_PORT",
+"SET_REG_BLOCK", "SET_FB_BASE", "COMPARE_REG", "COMPARE_PS",
+"COMPARE_WS", "COMPARE_FB", "COMPARE_PLL", "COMPARE_MC", "SWITCH",
+"JUMP", "JUMP_EQUAL", "JUMP_BELOW", "JUMP_ABOVE", "JUMP_BELOW_OR_EQUAL",
+"JUMP_ABOVE_OR_EQUAL", "JUMP_NOT_EQUAL", "TEST_REG", "TEST_PS", "TEST_WS",
+"TEST_FB", "TEST_PLL", "TEST_MC", "DELAY_MILLISEC", "DELAY_MICROSEC",
+"CALL_TABLE", "REPEAT", "CLEAR_REG", "CLEAR_PS", "CLEAR_WS", "CLEAR_FB",
+"CLEAR_PLL", "CLEAR_MC", "NOP", "EOT", "MASK_REG", "MASK_PS", "MASK_WS",
+"MASK_FB", "MASK_PLL", "MASK_MC", "POST_CARD", "BEEP", "SAVE_REG",
+"RESTORE_REG", "SET_DATA_BLOCK", "XOR_REG", "XOR_PS", "XOR_WS", "XOR_FB",
+"XOR_PLL", "XOR_MC", "SHL_REG", "SHL_PS", "SHL_WS", "SHL_FB", "SHL_PLL",
+"SHL_MC", "SHR_REG", "SHR_PS", "SHR_WS", "SHR_FB", "SHR_PLL", "SHR_MC",
+"DEBUG", "CTB_DS",
+};
+
+#define ATOM_TABLE_NAMES_CNT 74
+static char *atom_table_names[ATOM_TABLE_NAMES_CNT] = {
+"ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit",
+"VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit",
+"GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl",
+"GPIOPinControl", "SetEngineClock", "SetMemoryClock", "SetPixelClock",
+"DynamicClockGating", "ResetMemoryDLL", "ResetMemoryDevice",
+"MemoryPLLInit", "EnableMemorySelfRefresh", "AdjustMemoryController",
+"EnableASIC_StaticPwrMgt", "ASIC_StaticPwrMgtStatusChange",
+"DAC_LoadDetection", "TMDS2EncoderControl", "LCD1OutputControl",
+"DAC1EncoderControl", "DAC2EncoderControl", "DVOOutputControl",
+"CV1OutputControl", "SetCRTC_DPM_State", "TVEncoderControl",
+"TMDS1EncoderControl", "LVDSEncoderControl", "TV1OutputControl",
+"EnableScaler", "BlankCRTC", "EnableCRTC", "GetPixelClock",
+"EnableVGA_Render", "EnableVGA_Access", "SetCRTC_Timing",
+"SetCRTC_OverScan", "SetCRTC_Replication", "SelectCRTC_Source",
+"EnableGraphSurfaces", "UpdateCRTC_DoubleBufferRegisters",
+"LUT_AutoFill", "EnableHW_IconCursor", "GetMemoryClock",
+"GetEngineClock", "SetCRTC_UsingDTDTiming", "TVBootUpStdPinDetection",
+"DFP2OutputControl", "VRAM_BlockDetectionByStrap", "MemoryCleanUp",
+"ReadEDIDFromHWAssistedI2C", "WriteOneByteToHWAssistedI2C",
+"ReadHWAssistedI2CStatus", "SpeedFanControl", "PowerConnectorDetection",
+"MC_Synchronization", "ComputeMemoryEnginePLL", "MemoryRefreshConversion",
+"VRAM_GetCurrentInfoBlock", "DynamicMemorySettings", "MemoryTraining",
+"EnableLVDS_SS", "DFP1OutputControl", "SetVoltage", "CRT1OutputControl",
+"CRT2OutputControl", "SetupHWAssistedI2CStatus", "ClockSource",
+"MemoryDeviceInit", "EnableYUV",
+};
+
+#define ATOM_IO_NAMES_CNT 5
+static char *atom_io_names[ATOM_IO_NAMES_CNT] = {
+"MM", "PLL", "MC", "PCIE", "PCIE PORT",
+};
+
+#else
+
+#define ATOM_OP_NAMES_CNT 0
+#define ATOM_TABLE_NAMES_CNT 0
+#define ATOM_IO_NAMES_CNT 0
+
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/radeon/atom-types.h b/drivers/gpu/drm/radeon/atom-types.h
new file mode 100644
index 0000000..1125b86
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom-types.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Dave Airlie
+ */
+
+#ifndef ATOM_TYPES_H
+#define ATOM_TYPES_H
+
+/* sync atom types to kernel types */
+
+typedef uint16_t USHORT;
+typedef uint32_t ULONG;
+typedef uint8_t UCHAR;
+
+
+#ifndef ATOM_BIG_ENDIAN
+#if defined(__BIG_ENDIAN)
+#define ATOM_BIG_ENDIAN 1
+#else
+#define ATOM_BIG_ENDIAN 0
+#endif
+#endif
+#endif
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
new file mode 100644
index 0000000..15da7ef
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -0,0 +1,1416 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#define ATOM_DEBUG
+
+#include "atom.h"
+#include "atom-names.h"
+#include "atom-bits.h"
+#include "radeon.h"
+
+#define ATOM_COND_ABOVE 0
+#define ATOM_COND_ABOVEOREQUAL 1
+#define ATOM_COND_ALWAYS 2
+#define ATOM_COND_BELOW 3
+#define ATOM_COND_BELOWOREQUAL 4
+#define ATOM_COND_EQUAL 5
+#define ATOM_COND_NOTEQUAL 6
+
+#define ATOM_PORT_ATI 0
+#define ATOM_PORT_PCI 1
+#define ATOM_PORT_SYSIO 2
+
+#define ATOM_UNIT_MICROSEC 0
+#define ATOM_UNIT_MILLISEC 1
+
+#define PLL_INDEX 2
+#define PLL_DATA 3
+
+typedef struct {
+ struct atom_context *ctx;
+ uint32_t *ps, *ws;
+ int ps_shift;
+ uint16_t start;
+ unsigned last_jump;
+ unsigned long last_jump_jiffies;
+ bool abort;
+} atom_exec_context;
+
+int atom_debug = 0;
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
+
+static uint32_t atom_arg_mask[8] =
+ { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
+0xFF000000 };
+static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
+
+static int atom_dst_to_src[8][4] = {
+ /* translate destination alignment field to the source alignment encoding */
+ {0, 0, 0, 0},
+ {1, 2, 3, 0},
+ {1, 2, 3, 0},
+ {1, 2, 3, 0},
+ {4, 5, 6, 7},
+ {4, 5, 6, 7},
+ {4, 5, 6, 7},
+ {4, 5, 6, 7},
+};
+static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
+
+static int debug_depth = 0;
+#ifdef ATOM_DEBUG
+static void debug_print_spaces(int n)
+{
+ while (n--)
+ printk(" ");
+}
+
+#define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
+#define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
+#else
+#define DEBUG(...) do { } while (0)
+#define SDEBUG(...) do { } while (0)
+#endif
+
+static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
+ uint32_t index, uint32_t data)
+{
+ struct radeon_device *rdev = ctx->card->dev->dev_private;
+ uint32_t temp = 0xCDCDCDCD;
+
+ while (1)
+ switch (CU8(base)) {
+ case ATOM_IIO_NOP:
+ base++;
+ break;
+ case ATOM_IIO_READ:
+ temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
+ base += 3;
+ break;
+ case ATOM_IIO_WRITE:
+ if (rdev->family == CHIP_RV515)
+ (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
+ ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
+ base += 3;
+ break;
+ case ATOM_IIO_CLEAR:
+ temp &=
+ ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+ CU8(base + 2));
+ base += 3;
+ break;
+ case ATOM_IIO_SET:
+ temp |=
+ (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
+ 2);
+ base += 3;
+ break;
+ case ATOM_IIO_MOVE_INDEX:
+ temp &=
+ ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+ CU8(base + 3));
+ temp |=
+ ((index >> CU8(base + 2)) &
+ (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
+ 3);
+ base += 4;
+ break;
+ case ATOM_IIO_MOVE_DATA:
+ temp &=
+ ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+ CU8(base + 3));
+ temp |=
+ ((data >> CU8(base + 2)) &
+ (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
+ 3);
+ base += 4;
+ break;
+ case ATOM_IIO_MOVE_ATTR:
+ temp &=
+ ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+ CU8(base + 3));
+ temp |=
+ ((ctx->
+ io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
+ CU8
+ (base
+ +
+ 1))))
+ << CU8(base + 3);
+ base += 4;
+ break;
+ case ATOM_IIO_END:
+ return temp;
+ default:
+ printk(KERN_INFO "Unknown IIO opcode.\n");
+ return 0;
+ }
+}
+
+static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
+ int *ptr, uint32_t *saved, int print)
+{
+ uint32_t idx, val = 0xCDCDCDCD, align, arg;
+ struct atom_context *gctx = ctx->ctx;
+ arg = attr & 7;
+ align = (attr >> 3) & 7;
+ switch (arg) {
+ case ATOM_ARG_REG:
+ idx = U16(*ptr);
+ (*ptr) += 2;
+ if (print)
+ DEBUG("REG[0x%04X]", idx);
+ idx += gctx->reg_block;
+ switch (gctx->io_mode) {
+ case ATOM_IO_MM:
+ val = gctx->card->reg_read(gctx->card, idx);
+ break;
+ case ATOM_IO_PCI:
+ printk(KERN_INFO
+ "PCI registers are not implemented.\n");
+ return 0;
+ case ATOM_IO_SYSIO:
+ printk(KERN_INFO
+ "SYSIO registers are not implemented.\n");
+ return 0;
+ default:
+ if (!(gctx->io_mode & 0x80)) {
+ printk(KERN_INFO "Bad IO mode.\n");
+ return 0;
+ }
+ if (!gctx->iio[gctx->io_mode & 0x7F]) {
+ printk(KERN_INFO
+ "Undefined indirect IO read method %d.\n",
+ gctx->io_mode & 0x7F);
+ return 0;
+ }
+ val =
+ atom_iio_execute(gctx,
+ gctx->iio[gctx->io_mode & 0x7F],
+ idx, 0);
+ }
+ break;
+ case ATOM_ARG_PS:
+ idx = U8(*ptr);
+ (*ptr)++;
+ /* get_unaligned_le32 avoids unaligned accesses from atombios
+ * tables, noticed on a DEC Alpha. */
+ val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+ if (print)
+ DEBUG("PS[0x%02X,0x%04X]", idx, val);
+ break;
+ case ATOM_ARG_WS:
+ idx = U8(*ptr);
+ (*ptr)++;
+ if (print)
+ DEBUG("WS[0x%02X]", idx);
+ switch (idx) {
+ case ATOM_WS_QUOTIENT:
+ val = gctx->divmul[0];
+ break;
+ case ATOM_WS_REMAINDER:
+ val = gctx->divmul[1];
+ break;
+ case ATOM_WS_DATAPTR:
+ val = gctx->data_block;
+ break;
+ case ATOM_WS_SHIFT:
+ val = gctx->shift;
+ break;
+ case ATOM_WS_OR_MASK:
+ val = 1 << gctx->shift;
+ break;
+ case ATOM_WS_AND_MASK:
+ val = ~(1 << gctx->shift);
+ break;
+ case ATOM_WS_FB_WINDOW:
+ val = gctx->fb_base;
+ break;
+ case ATOM_WS_ATTRIBUTES:
+ val = gctx->io_attr;
+ break;
+ case ATOM_WS_REGPTR:
+ val = gctx->reg_block;
+ break;
+ default:
+ val = ctx->ws[idx];
+ }
+ break;
+ case ATOM_ARG_ID:
+ idx = U16(*ptr);
+ (*ptr) += 2;
+ if (print) {
+ if (gctx->data_block)
+ DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
+ else
+ DEBUG("ID[0x%04X]", idx);
+ }
+ val = U32(idx + gctx->data_block);
+ break;
+ case ATOM_ARG_FB:
+ idx = U8(*ptr);
+ (*ptr)++;
+ if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
+ DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
+ gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
+ val = 0;
+ } else
+ val = gctx->scratch[(gctx->fb_base / 4) + idx];
+ if (print)
+ DEBUG("FB[0x%02X]", idx);
+ break;
+ case ATOM_ARG_IMM:
+ switch (align) {
+ case ATOM_SRC_DWORD:
+ val = U32(*ptr);
+ (*ptr) += 4;
+ if (print)
+ DEBUG("IMM 0x%08X\n", val);
+ return val;
+ case ATOM_SRC_WORD0:
+ case ATOM_SRC_WORD8:
+ case ATOM_SRC_WORD16:
+ val = U16(*ptr);
+ (*ptr) += 2;
+ if (print)
+ DEBUG("IMM 0x%04X\n", val);
+ return val;
+ case ATOM_SRC_BYTE0:
+ case ATOM_SRC_BYTE8:
+ case ATOM_SRC_BYTE16:
+ case ATOM_SRC_BYTE24:
+ val = U8(*ptr);
+ (*ptr)++;
+ if (print)
+ DEBUG("IMM 0x%02X\n", val);
+ return val;
+ }
+ return 0;
+ case ATOM_ARG_PLL:
+ idx = U8(*ptr);
+ (*ptr)++;
+ if (print)
+ DEBUG("PLL[0x%02X]", idx);
+ val = gctx->card->pll_read(gctx->card, idx);
+ break;
+ case ATOM_ARG_MC:
+ idx = U8(*ptr);
+ (*ptr)++;
+ if (print)
+ DEBUG("MC[0x%02X]", idx);
+ val = gctx->card->mc_read(gctx->card, idx);
+ break;
+ }
+ if (saved)
+ *saved = val;
+ val &= atom_arg_mask[align];
+ val >>= atom_arg_shift[align];
+ if (print)
+ switch (align) {
+ case ATOM_SRC_DWORD:
+ DEBUG(".[31:0] -> 0x%08X\n", val);
+ break;
+ case ATOM_SRC_WORD0:
+ DEBUG(".[15:0] -> 0x%04X\n", val);
+ break;
+ case ATOM_SRC_WORD8:
+ DEBUG(".[23:8] -> 0x%04X\n", val);
+ break;
+ case ATOM_SRC_WORD16:
+ DEBUG(".[31:16] -> 0x%04X\n", val);
+ break;
+ case ATOM_SRC_BYTE0:
+ DEBUG(".[7:0] -> 0x%02X\n", val);
+ break;
+ case ATOM_SRC_BYTE8:
+ DEBUG(".[15:8] -> 0x%02X\n", val);
+ break;
+ case ATOM_SRC_BYTE16:
+ DEBUG(".[23:16] -> 0x%02X\n", val);
+ break;
+ case ATOM_SRC_BYTE24:
+ DEBUG(".[31:24] -> 0x%02X\n", val);
+ break;
+ }
+ return val;
+}
+
+static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
+{
+ uint32_t align = (attr >> 3) & 7, arg = attr & 7;
+ switch (arg) {
+ case ATOM_ARG_REG:
+ case ATOM_ARG_ID:
+ (*ptr) += 2;
+ break;
+ case ATOM_ARG_PLL:
+ case ATOM_ARG_MC:
+ case ATOM_ARG_PS:
+ case ATOM_ARG_WS:
+ case ATOM_ARG_FB:
+ (*ptr)++;
+ break;
+ case ATOM_ARG_IMM:
+ switch (align) {
+ case ATOM_SRC_DWORD:
+ (*ptr) += 4;
+ return;
+ case ATOM_SRC_WORD0:
+ case ATOM_SRC_WORD8:
+ case ATOM_SRC_WORD16:
+ (*ptr) += 2;
+ return;
+ case ATOM_SRC_BYTE0:
+ case ATOM_SRC_BYTE8:
+ case ATOM_SRC_BYTE16:
+ case ATOM_SRC_BYTE24:
+ (*ptr)++;
+ return;
+ }
+ return;
+ }
+}
+
+static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
+{
+ return atom_get_src_int(ctx, attr, ptr, NULL, 1);
+}
+
+static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
+{
+ uint32_t val = 0xCDCDCDCD;
+
+ switch (align) {
+ case ATOM_SRC_DWORD:
+ val = U32(*ptr);
+ (*ptr) += 4;
+ break;
+ case ATOM_SRC_WORD0:
+ case ATOM_SRC_WORD8:
+ case ATOM_SRC_WORD16:
+ val = U16(*ptr);
+ (*ptr) += 2;
+ break;
+ case ATOM_SRC_BYTE0:
+ case ATOM_SRC_BYTE8:
+ case ATOM_SRC_BYTE16:
+ case ATOM_SRC_BYTE24:
+ val = U8(*ptr);
+ (*ptr)++;
+ break;
+ }
+ return val;
+}
+
+static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
+ int *ptr, uint32_t *saved, int print)
+{
+ return atom_get_src_int(ctx,
+ arg | atom_dst_to_src[(attr >> 3) &
+ 7][(attr >> 6) & 3] << 3,
+ ptr, saved, print);
+}
+
+static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
+{
+ atom_skip_src_int(ctx,
+ arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
+ 3] << 3, ptr);
+}
+
+static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
+ int *ptr, uint32_t val, uint32_t saved)
+{
+ uint32_t align =
+ atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
+ val, idx;
+ struct atom_context *gctx = ctx->ctx;
+ old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
+ val <<= atom_arg_shift[align];
+ val &= atom_arg_mask[align];
+ saved &= ~atom_arg_mask[align];
+ val |= saved;
+ switch (arg) {
+ case ATOM_ARG_REG:
+ idx = U16(*ptr);
+ (*ptr) += 2;
+ DEBUG("REG[0x%04X]", idx);
+ idx += gctx->reg_block;
+ switch (gctx->io_mode) {
+ case ATOM_IO_MM:
+ if (idx == 0)
+ gctx->card->reg_write(gctx->card, idx,
+ val << 2);
+ else
+ gctx->card->reg_write(gctx->card, idx, val);
+ break;
+ case ATOM_IO_PCI:
+ printk(KERN_INFO
+ "PCI registers are not implemented.\n");
+ return;
+ case ATOM_IO_SYSIO:
+ printk(KERN_INFO
+ "SYSIO registers are not implemented.\n");
+ return;
+ default:
+ if (!(gctx->io_mode & 0x80)) {
+ printk(KERN_INFO "Bad IO mode.\n");
+ return;
+ }
+ if (!gctx->iio[gctx->io_mode & 0xFF]) {
+ printk(KERN_INFO
+ "Undefined indirect IO write method %d.\n",
+ gctx->io_mode & 0x7F);
+ return;
+ }
+ atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
+ idx, val);
+ }
+ break;
+ case ATOM_ARG_PS:
+ idx = U8(*ptr);
+ (*ptr)++;
+ DEBUG("PS[0x%02X]", idx);
+ ctx->ps[idx] = cpu_to_le32(val);
+ break;
+ case ATOM_ARG_WS:
+ idx = U8(*ptr);
+ (*ptr)++;
+ DEBUG("WS[0x%02X]", idx);
+ switch (idx) {
+ case ATOM_WS_QUOTIENT:
+ gctx->divmul[0] = val;
+ break;
+ case ATOM_WS_REMAINDER:
+ gctx->divmul[1] = val;
+ break;
+ case ATOM_WS_DATAPTR:
+ gctx->data_block = val;
+ break;
+ case ATOM_WS_SHIFT:
+ gctx->shift = val;
+ break;
+ case ATOM_WS_OR_MASK:
+ case ATOM_WS_AND_MASK:
+ break;
+ case ATOM_WS_FB_WINDOW:
+ gctx->fb_base = val;
+ break;
+ case ATOM_WS_ATTRIBUTES:
+ gctx->io_attr = val;
+ break;
+ case ATOM_WS_REGPTR:
+ gctx->reg_block = val;
+ break;
+ default:
+ ctx->ws[idx] = val;
+ }
+ break;
+ case ATOM_ARG_FB:
+ idx = U8(*ptr);
+ (*ptr)++;
+ if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
+ DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
+ gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
+ } else
+ gctx->scratch[(gctx->fb_base / 4) + idx] = val;
+ DEBUG("FB[0x%02X]", idx);
+ break;
+ case ATOM_ARG_PLL:
+ idx = U8(*ptr);
+ (*ptr)++;
+ DEBUG("PLL[0x%02X]", idx);
+ gctx->card->pll_write(gctx->card, idx, val);
+ break;
+ case ATOM_ARG_MC:
+ idx = U8(*ptr);
+ (*ptr)++;
+ DEBUG("MC[0x%02X]", idx);
+ gctx->card->mc_write(gctx->card, idx, val);
+ return;
+ }
+ switch (align) {
+ case ATOM_SRC_DWORD:
+ DEBUG(".[31:0] <- 0x%08X\n", old_val);
+ break;
+ case ATOM_SRC_WORD0:
+ DEBUG(".[15:0] <- 0x%04X\n", old_val);
+ break;
+ case ATOM_SRC_WORD8:
+ DEBUG(".[23:8] <- 0x%04X\n", old_val);
+ break;
+ case ATOM_SRC_WORD16:
+ DEBUG(".[31:16] <- 0x%04X\n", old_val);
+ break;
+ case ATOM_SRC_BYTE0:
+ DEBUG(".[7:0] <- 0x%02X\n", old_val);
+ break;
+ case ATOM_SRC_BYTE8:
+ DEBUG(".[15:8] <- 0x%02X\n", old_val);
+ break;
+ case ATOM_SRC_BYTE16:
+ DEBUG(".[23:16] <- 0x%02X\n", old_val);
+ break;
+ case ATOM_SRC_BYTE24:
+ DEBUG(".[31:24] <- 0x%02X\n", old_val);
+ break;
+ }
+}
+
+static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src, saved;
+ int dptr = *ptr;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ SDEBUG(" src: ");
+ src = atom_get_src(ctx, attr, ptr);
+ dst += src;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src, saved;
+ int dptr = *ptr;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ SDEBUG(" src: ");
+ src = atom_get_src(ctx, attr, ptr);
+ dst &= src;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
+{
+ printk("ATOM BIOS beeped!\n");
+}
+
+static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
+{
+ int idx = U8((*ptr)++);
+ int r = 0;
+
+ if (idx < ATOM_TABLE_NAMES_CNT)
+ SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
+ else
+ SDEBUG(" table: %d\n", idx);
+ if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
+ r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+ if (r) {
+ ctx->abort = true;
+ }
+}
+
+static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t saved;
+ int dptr = *ptr;
+ attr &= 0x38;
+ attr |= atom_def_dst[attr >> 3] << 6;
+ atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
+}
+
+static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src;
+ SDEBUG(" src1: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+ SDEBUG(" src2: ");
+ src = atom_get_src(ctx, attr, ptr);
+ ctx->ctx->cs_equal = (dst == src);
+ ctx->ctx->cs_above = (dst > src);
+ SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
+ ctx->ctx->cs_above ? "GT" : "LE");
+}
+
+static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
+{
+ unsigned count = U8((*ptr)++);
+ SDEBUG(" count: %d\n", count);
+ if (arg == ATOM_UNIT_MICROSEC)
+ udelay(count);
+ else if (!drm_can_sleep())
+ mdelay(count);
+ else
+ msleep(count);
+}
+
+static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src;
+ SDEBUG(" src1: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+ SDEBUG(" src2: ");
+ src = atom_get_src(ctx, attr, ptr);
+ if (src != 0) {
+ ctx->ctx->divmul[0] = dst / src;
+ ctx->ctx->divmul[1] = dst % src;
+ } else {
+ ctx->ctx->divmul[0] = 0;
+ ctx->ctx->divmul[1] = 0;
+ }
+}
+
+static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
+{
+ /* functionally, a nop */
+}
+
+static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
+{
+ int execute = 0, target = U16(*ptr);
+ unsigned long cjiffies;
+
+ (*ptr) += 2;
+ switch (arg) {
+ case ATOM_COND_ABOVE:
+ execute = ctx->ctx->cs_above;
+ break;
+ case ATOM_COND_ABOVEOREQUAL:
+ execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
+ break;
+ case ATOM_COND_ALWAYS:
+ execute = 1;
+ break;
+ case ATOM_COND_BELOW:
+ execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
+ break;
+ case ATOM_COND_BELOWOREQUAL:
+ execute = !ctx->ctx->cs_above;
+ break;
+ case ATOM_COND_EQUAL:
+ execute = ctx->ctx->cs_equal;
+ break;
+ case ATOM_COND_NOTEQUAL:
+ execute = !ctx->ctx->cs_equal;
+ break;
+ }
+ if (arg != ATOM_COND_ALWAYS)
+ SDEBUG(" taken: %s\n", execute ? "yes" : "no");
+ SDEBUG(" target: 0x%04X\n", target);
+ if (execute) {
+ if (ctx->last_jump == (ctx->start + target)) {
+ cjiffies = jiffies;
+ if (time_after(cjiffies, ctx->last_jump_jiffies)) {
+ cjiffies -= ctx->last_jump_jiffies;
+ if ((jiffies_to_msecs(cjiffies) > 5000)) {
+ DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
+ ctx->abort = true;
+ }
+ } else {
+ /* jiffies wrap around we will just wait a little longer */
+ ctx->last_jump_jiffies = jiffies;
+ }
+ } else {
+ ctx->last_jump = ctx->start + target;
+ ctx->last_jump_jiffies = jiffies;
+ }
+ *ptr = ctx->start + target;
+ }
+}
+
+static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, mask, src, saved;
+ int dptr = *ptr;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
+ SDEBUG(" mask: 0x%08x", mask);
+ SDEBUG(" src: ");
+ src = atom_get_src(ctx, attr, ptr);
+ dst &= mask;
+ dst |= src;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t src, saved;
+ int dptr = *ptr;
+ if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
+ atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
+ else {
+ atom_skip_dst(ctx, arg, attr, ptr);
+ saved = 0xCDCDCDCD;
+ }
+ SDEBUG(" src: ");
+ src = atom_get_src(ctx, attr, ptr);
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, src, saved);
+}
+
+static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src;
+ SDEBUG(" src1: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+ SDEBUG(" src2: ");
+ src = atom_get_src(ctx, attr, ptr);
+ ctx->ctx->divmul[0] = dst * src;
+}
+
+static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
+{
+ /* nothing */
+}
+
+static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src, saved;
+ int dptr = *ptr;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ SDEBUG(" src: ");
+ src = atom_get_src(ctx, attr, ptr);
+ dst |= src;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t val = U8((*ptr)++);
+ SDEBUG("POST card output: 0x%02X\n", val);
+}
+
+static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
+{
+ printk(KERN_INFO "unimplemented!\n");
+}
+
+static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
+{
+ printk(KERN_INFO "unimplemented!\n");
+}
+
+static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
+{
+ printk(KERN_INFO "unimplemented!\n");
+}
+
+static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
+{
+ int idx = U8(*ptr);
+ (*ptr)++;
+ SDEBUG(" block: %d\n", idx);
+ if (!idx)
+ ctx->ctx->data_block = 0;
+ else if (idx == 255)
+ ctx->ctx->data_block = ctx->start;
+ else
+ ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
+ SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
+}
+
+static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ SDEBUG(" fb_base: ");
+ ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
+}
+
+static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
+{
+ int port;
+ switch (arg) {
+ case ATOM_PORT_ATI:
+ port = U16(*ptr);
+ if (port < ATOM_IO_NAMES_CNT)
+ SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
+ else
+ SDEBUG(" port: %d\n", port);
+ if (!port)
+ ctx->ctx->io_mode = ATOM_IO_MM;
+ else
+ ctx->ctx->io_mode = ATOM_IO_IIO | port;
+ (*ptr) += 2;
+ break;
+ case ATOM_PORT_PCI:
+ ctx->ctx->io_mode = ATOM_IO_PCI;
+ (*ptr)++;
+ break;
+ case ATOM_PORT_SYSIO:
+ ctx->ctx->io_mode = ATOM_IO_SYSIO;
+ (*ptr)++;
+ break;
+ }
+}
+
+static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
+{
+ ctx->ctx->reg_block = U16(*ptr);
+ (*ptr) += 2;
+ SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
+}
+
+static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++), shift;
+ uint32_t saved, dst;
+ int dptr = *ptr;
+ attr &= 0x38;
+ attr |= atom_def_dst[attr >> 3] << 6;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
+ SDEBUG(" shift: %d\n", shift);
+ dst <<= shift;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++), shift;
+ uint32_t saved, dst;
+ int dptr = *ptr;
+ attr &= 0x38;
+ attr |= atom_def_dst[attr >> 3] << 6;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
+ SDEBUG(" shift: %d\n", shift);
+ dst >>= shift;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++), shift;
+ uint32_t saved, dst;
+ int dptr = *ptr;
+ uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ /* op needs to full dst value */
+ dst = saved;
+ shift = atom_get_src(ctx, attr, ptr);
+ SDEBUG(" shift: %d\n", shift);
+ dst <<= shift;
+ dst &= atom_arg_mask[dst_align];
+ dst >>= atom_arg_shift[dst_align];
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++), shift;
+ uint32_t saved, dst;
+ int dptr = *ptr;
+ uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ /* op needs to full dst value */
+ dst = saved;
+ shift = atom_get_src(ctx, attr, ptr);
+ SDEBUG(" shift: %d\n", shift);
+ dst >>= shift;
+ dst &= atom_arg_mask[dst_align];
+ dst >>= atom_arg_shift[dst_align];
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src, saved;
+ int dptr = *ptr;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ SDEBUG(" src: ");
+ src = atom_get_src(ctx, attr, ptr);
+ dst -= src;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t src, val, target;
+ SDEBUG(" switch: ");
+ src = atom_get_src(ctx, attr, ptr);
+ while (U16(*ptr) != ATOM_CASE_END)
+ if (U8(*ptr) == ATOM_CASE_MAGIC) {
+ (*ptr)++;
+ SDEBUG(" case: ");
+ val =
+ atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
+ ptr);
+ target = U16(*ptr);
+ if (val == src) {
+ SDEBUG(" target: %04X\n", target);
+ *ptr = ctx->start + target;
+ return;
+ }
+ (*ptr) += 2;
+ } else {
+ printk(KERN_INFO "Bad case.\n");
+ return;
+ }
+ (*ptr) += 2;
+}
+
+static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src;
+ SDEBUG(" src1: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+ SDEBUG(" src2: ");
+ src = atom_get_src(ctx, attr, ptr);
+ ctx->ctx->cs_equal = ((dst & src) == 0);
+ SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
+}
+
+static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++);
+ uint32_t dst, src, saved;
+ int dptr = *ptr;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ SDEBUG(" src: ");
+ src = atom_get_src(ctx, attr, ptr);
+ dst ^= src;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
+{
+ printk(KERN_INFO "unimplemented!\n");
+}
+
+static struct {
+ void (*func) (atom_exec_context *, int *, int);
+ int arg;
+} opcode_table[ATOM_OP_CNT] = {
+ {
+ NULL, 0}, {
+ atom_op_move, ATOM_ARG_REG}, {
+ atom_op_move, ATOM_ARG_PS}, {
+ atom_op_move, ATOM_ARG_WS}, {
+ atom_op_move, ATOM_ARG_FB}, {
+ atom_op_move, ATOM_ARG_PLL}, {
+ atom_op_move, ATOM_ARG_MC}, {
+ atom_op_and, ATOM_ARG_REG}, {
+ atom_op_and, ATOM_ARG_PS}, {
+ atom_op_and, ATOM_ARG_WS}, {
+ atom_op_and, ATOM_ARG_FB}, {
+ atom_op_and, ATOM_ARG_PLL}, {
+ atom_op_and, ATOM_ARG_MC}, {
+ atom_op_or, ATOM_ARG_REG}, {
+ atom_op_or, ATOM_ARG_PS}, {
+ atom_op_or, ATOM_ARG_WS}, {
+ atom_op_or, ATOM_ARG_FB}, {
+ atom_op_or, ATOM_ARG_PLL}, {
+ atom_op_or, ATOM_ARG_MC}, {
+ atom_op_shift_left, ATOM_ARG_REG}, {
+ atom_op_shift_left, ATOM_ARG_PS}, {
+ atom_op_shift_left, ATOM_ARG_WS}, {
+ atom_op_shift_left, ATOM_ARG_FB}, {
+ atom_op_shift_left, ATOM_ARG_PLL}, {
+ atom_op_shift_left, ATOM_ARG_MC}, {
+ atom_op_shift_right, ATOM_ARG_REG}, {
+ atom_op_shift_right, ATOM_ARG_PS}, {
+ atom_op_shift_right, ATOM_ARG_WS}, {
+ atom_op_shift_right, ATOM_ARG_FB}, {
+ atom_op_shift_right, ATOM_ARG_PLL}, {
+ atom_op_shift_right, ATOM_ARG_MC}, {
+ atom_op_mul, ATOM_ARG_REG}, {
+ atom_op_mul, ATOM_ARG_PS}, {
+ atom_op_mul, ATOM_ARG_WS}, {
+ atom_op_mul, ATOM_ARG_FB}, {
+ atom_op_mul, ATOM_ARG_PLL}, {
+ atom_op_mul, ATOM_ARG_MC}, {
+ atom_op_div, ATOM_ARG_REG}, {
+ atom_op_div, ATOM_ARG_PS}, {
+ atom_op_div, ATOM_ARG_WS}, {
+ atom_op_div, ATOM_ARG_FB}, {
+ atom_op_div, ATOM_ARG_PLL}, {
+ atom_op_div, ATOM_ARG_MC}, {
+ atom_op_add, ATOM_ARG_REG}, {
+ atom_op_add, ATOM_ARG_PS}, {
+ atom_op_add, ATOM_ARG_WS}, {
+ atom_op_add, ATOM_ARG_FB}, {
+ atom_op_add, ATOM_ARG_PLL}, {
+ atom_op_add, ATOM_ARG_MC}, {
+ atom_op_sub, ATOM_ARG_REG}, {
+ atom_op_sub, ATOM_ARG_PS}, {
+ atom_op_sub, ATOM_ARG_WS}, {
+ atom_op_sub, ATOM_ARG_FB}, {
+ atom_op_sub, ATOM_ARG_PLL}, {
+ atom_op_sub, ATOM_ARG_MC}, {
+ atom_op_setport, ATOM_PORT_ATI}, {
+ atom_op_setport, ATOM_PORT_PCI}, {
+ atom_op_setport, ATOM_PORT_SYSIO}, {
+ atom_op_setregblock, 0}, {
+ atom_op_setfbbase, 0}, {
+ atom_op_compare, ATOM_ARG_REG}, {
+ atom_op_compare, ATOM_ARG_PS}, {
+ atom_op_compare, ATOM_ARG_WS}, {
+ atom_op_compare, ATOM_ARG_FB}, {
+ atom_op_compare, ATOM_ARG_PLL}, {
+ atom_op_compare, ATOM_ARG_MC}, {
+ atom_op_switch, 0}, {
+ atom_op_jump, ATOM_COND_ALWAYS}, {
+ atom_op_jump, ATOM_COND_EQUAL}, {
+ atom_op_jump, ATOM_COND_BELOW}, {
+ atom_op_jump, ATOM_COND_ABOVE}, {
+ atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
+ atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
+ atom_op_jump, ATOM_COND_NOTEQUAL}, {
+ atom_op_test, ATOM_ARG_REG}, {
+ atom_op_test, ATOM_ARG_PS}, {
+ atom_op_test, ATOM_ARG_WS}, {
+ atom_op_test, ATOM_ARG_FB}, {
+ atom_op_test, ATOM_ARG_PLL}, {
+ atom_op_test, ATOM_ARG_MC}, {
+ atom_op_delay, ATOM_UNIT_MILLISEC}, {
+ atom_op_delay, ATOM_UNIT_MICROSEC}, {
+ atom_op_calltable, 0}, {
+ atom_op_repeat, 0}, {
+ atom_op_clear, ATOM_ARG_REG}, {
+ atom_op_clear, ATOM_ARG_PS}, {
+ atom_op_clear, ATOM_ARG_WS}, {
+ atom_op_clear, ATOM_ARG_FB}, {
+ atom_op_clear, ATOM_ARG_PLL}, {
+ atom_op_clear, ATOM_ARG_MC}, {
+ atom_op_nop, 0}, {
+ atom_op_eot, 0}, {
+ atom_op_mask, ATOM_ARG_REG}, {
+ atom_op_mask, ATOM_ARG_PS}, {
+ atom_op_mask, ATOM_ARG_WS}, {
+ atom_op_mask, ATOM_ARG_FB}, {
+ atom_op_mask, ATOM_ARG_PLL}, {
+ atom_op_mask, ATOM_ARG_MC}, {
+ atom_op_postcard, 0}, {
+ atom_op_beep, 0}, {
+ atom_op_savereg, 0}, {
+ atom_op_restorereg, 0}, {
+ atom_op_setdatablock, 0}, {
+ atom_op_xor, ATOM_ARG_REG}, {
+ atom_op_xor, ATOM_ARG_PS}, {
+ atom_op_xor, ATOM_ARG_WS}, {
+ atom_op_xor, ATOM_ARG_FB}, {
+ atom_op_xor, ATOM_ARG_PLL}, {
+ atom_op_xor, ATOM_ARG_MC}, {
+ atom_op_shl, ATOM_ARG_REG}, {
+ atom_op_shl, ATOM_ARG_PS}, {
+ atom_op_shl, ATOM_ARG_WS}, {
+ atom_op_shl, ATOM_ARG_FB}, {
+ atom_op_shl, ATOM_ARG_PLL}, {
+ atom_op_shl, ATOM_ARG_MC}, {
+ atom_op_shr, ATOM_ARG_REG}, {
+ atom_op_shr, ATOM_ARG_PS}, {
+ atom_op_shr, ATOM_ARG_WS}, {
+ atom_op_shr, ATOM_ARG_FB}, {
+ atom_op_shr, ATOM_ARG_PLL}, {
+ atom_op_shr, ATOM_ARG_MC}, {
+atom_op_debug, 0},};
+
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
+{
+ int base = CU16(ctx->cmd_table + 4 + 2 * index);
+ int len, ws, ps, ptr;
+ unsigned char op;
+ atom_exec_context ectx;
+ int ret = 0;
+
+ if (!base)
+ return -EINVAL;
+
+ len = CU16(base + ATOM_CT_SIZE_PTR);
+ ws = CU8(base + ATOM_CT_WS_PTR);
+ ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
+ ptr = base + ATOM_CT_CODE_PTR;
+
+ SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
+
+ ectx.ctx = ctx;
+ ectx.ps_shift = ps / 4;
+ ectx.start = base;
+ ectx.ps = params;
+ ectx.abort = false;
+ ectx.last_jump = 0;
+ if (ws)
+ ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
+ else
+ ectx.ws = NULL;
+
+ debug_depth++;
+ while (1) {
+ op = CU8(ptr++);
+ if (op < ATOM_OP_NAMES_CNT)
+ SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
+ else
+ SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
+ if (ectx.abort) {
+ DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
+ base, len, ws, ps, ptr - 1);
+ ret = -EINVAL;
+ goto free;
+ }
+
+ if (op < ATOM_OP_CNT && op > 0)
+ opcode_table[op].func(&ectx, &ptr,
+ opcode_table[op].arg);
+ else
+ break;
+
+ if (op == ATOM_OP_EOT)
+ break;
+ }
+ debug_depth--;
+ SDEBUG("<<\n");
+
+free:
+ if (ws)
+ kfree(ectx.ws);
+ return ret;
+}
+
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+{
+ int r;
+
+ mutex_lock(&ctx->mutex);
+ /* reset data block */
+ ctx->data_block = 0;
+ /* reset reg block */
+ ctx->reg_block = 0;
+ /* reset fb window */
+ ctx->fb_base = 0;
+ /* reset io mode */
+ ctx->io_mode = ATOM_IO_MM;
+ /* reset divmul */
+ ctx->divmul[0] = 0;
+ ctx->divmul[1] = 0;
+ r = atom_execute_table_locked(ctx, index, params);
+ mutex_unlock(&ctx->mutex);
+ return r;
+}
+
+static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
+
+static void atom_index_iio(struct atom_context *ctx, int base)
+{
+ ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
+ if (!ctx->iio)
+ return;
+ while (CU8(base) == ATOM_IIO_START) {
+ ctx->iio[CU8(base + 1)] = base + 2;
+ base += 2;
+ while (CU8(base) != ATOM_IIO_END)
+ base += atom_iio_len[CU8(base)];
+ base += 3;
+ }
+}
+
+struct atom_context *atom_parse(struct card_info *card, void *bios)
+{
+ int base;
+ struct atom_context *ctx =
+ kzalloc(sizeof(struct atom_context), GFP_KERNEL);
+ char *str;
+ char name[512];
+ int i;
+
+ if (!ctx)
+ return NULL;
+
+ ctx->card = card;
+ ctx->bios = bios;
+
+ if (CU16(0) != ATOM_BIOS_MAGIC) {
+ printk(KERN_INFO "Invalid BIOS magic.\n");
+ kfree(ctx);
+ return NULL;
+ }
+ if (strncmp
+ (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
+ strlen(ATOM_ATI_MAGIC))) {
+ printk(KERN_INFO "Invalid ATI magic.\n");
+ kfree(ctx);
+ return NULL;
+ }
+
+ base = CU16(ATOM_ROM_TABLE_PTR);
+ if (strncmp
+ (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
+ strlen(ATOM_ROM_MAGIC))) {
+ printk(KERN_INFO "Invalid ATOM magic.\n");
+ kfree(ctx);
+ return NULL;
+ }
+
+ ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
+ ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
+ atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
+ if (!ctx->iio) {
+ atom_destroy(ctx);
+ return NULL;
+ }
+
+ str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
+ while (*str && ((*str == '\n') || (*str == '\r')))
+ str++;
+ /* name string isn't always 0 terminated */
+ for (i = 0; i < 511; i++) {
+ name[i] = str[i];
+ if (name[i] < '.' || name[i] > 'z') {
+ name[i] = 0;
+ break;
+ }
+ }
+ printk(KERN_INFO "ATOM BIOS: %s\n", name);
+
+ return ctx;
+}
+
+int atom_asic_init(struct atom_context *ctx)
+{
+ struct radeon_device *rdev = ctx->card->dev->dev_private;
+ int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
+ uint32_t ps[16];
+ int ret;
+
+ memset(ps, 0, 64);
+
+ ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
+ ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
+ if (!ps[0] || !ps[1])
+ return 1;
+
+ if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
+ return 1;
+ ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+ if (ret)
+ return ret;
+
+ memset(ps, 0, 64);
+
+ if (rdev->family < CHIP_R600) {
+ if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
+ atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
+ }
+ return ret;
+}
+
+void atom_destroy(struct atom_context *ctx)
+{
+ kfree(ctx->iio);
+ kfree(ctx);
+}
+
+bool atom_parse_data_header(struct atom_context *ctx, int index,
+ uint16_t * size, uint8_t * frev, uint8_t * crev,
+ uint16_t * data_start)
+{
+ int offset = index * 2 + 4;
+ int idx = CU16(ctx->data_table + offset);
+ u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
+
+ if (!mdt[index])
+ return false;
+
+ if (size)
+ *size = CU16(idx);
+ if (frev)
+ *frev = CU8(idx + 2);
+ if (crev)
+ *crev = CU8(idx + 3);
+ *data_start = idx;
+ return true;
+}
+
+bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
+ uint8_t * crev)
+{
+ int offset = index * 2 + 4;
+ int idx = CU16(ctx->cmd_table + offset);
+ u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
+
+ if (!mct[index])
+ return false;
+
+ if (frev)
+ *frev = CU8(idx + 2);
+ if (crev)
+ *crev = CU8(idx + 3);
+ return true;
+}
+
+int atom_allocate_fb_scratch(struct atom_context *ctx)
+{
+ int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
+ uint16_t data_offset;
+ int usage_bytes = 0;
+ struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
+
+ if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+ firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
+
+ DRM_DEBUG("atom firmware requested %08x %dkb\n",
+ le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
+ le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
+
+ usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+ }
+ ctx->scratch_size_bytes = 0;
+ if (usage_bytes == 0)
+ usage_bytes = 20 * 1024;
+ /* allocate some scratch memory */
+ ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
+ if (!ctx->scratch)
+ return -ENOMEM;
+ ctx->scratch_size_bytes = usage_bytes;
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
new file mode 100644
index 0000000..feba6b8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_H
+#define ATOM_H
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+
+#define ATOM_BIOS_MAGIC 0xAA55
+#define ATOM_ATI_MAGIC_PTR 0x30
+#define ATOM_ATI_MAGIC " 761295520"
+#define ATOM_ROM_TABLE_PTR 0x48
+
+#define ATOM_ROM_MAGIC "ATOM"
+#define ATOM_ROM_MAGIC_PTR 4
+
+#define ATOM_ROM_MSG_PTR 0x10
+#define ATOM_ROM_CMD_PTR 0x1E
+#define ATOM_ROM_DATA_PTR 0x20
+
+#define ATOM_CMD_INIT 0
+#define ATOM_CMD_SETSCLK 0x0A
+#define ATOM_CMD_SETMCLK 0x0B
+#define ATOM_CMD_SETPCLK 0x0C
+#define ATOM_CMD_SPDFANCNTL 0x39
+
+#define ATOM_DATA_FWI_PTR 0xC
+#define ATOM_DATA_IIO_PTR 0x32
+
+#define ATOM_FWI_DEFSCLK_PTR 8
+#define ATOM_FWI_DEFMCLK_PTR 0xC
+#define ATOM_FWI_MAXSCLK_PTR 0x24
+#define ATOM_FWI_MAXMCLK_PTR 0x28
+
+#define ATOM_CT_SIZE_PTR 0
+#define ATOM_CT_WS_PTR 4
+#define ATOM_CT_PS_PTR 5
+#define ATOM_CT_PS_MASK 0x7F
+#define ATOM_CT_CODE_PTR 6
+
+#define ATOM_OP_CNT 123
+#define ATOM_OP_EOT 91
+
+#define ATOM_CASE_MAGIC 0x63
+#define ATOM_CASE_END 0x5A5A
+
+#define ATOM_ARG_REG 0
+#define ATOM_ARG_PS 1
+#define ATOM_ARG_WS 2
+#define ATOM_ARG_FB 3
+#define ATOM_ARG_ID 4
+#define ATOM_ARG_IMM 5
+#define ATOM_ARG_PLL 6
+#define ATOM_ARG_MC 7
+
+#define ATOM_SRC_DWORD 0
+#define ATOM_SRC_WORD0 1
+#define ATOM_SRC_WORD8 2
+#define ATOM_SRC_WORD16 3
+#define ATOM_SRC_BYTE0 4
+#define ATOM_SRC_BYTE8 5
+#define ATOM_SRC_BYTE16 6
+#define ATOM_SRC_BYTE24 7
+
+#define ATOM_WS_QUOTIENT 0x40
+#define ATOM_WS_REMAINDER 0x41
+#define ATOM_WS_DATAPTR 0x42
+#define ATOM_WS_SHIFT 0x43
+#define ATOM_WS_OR_MASK 0x44
+#define ATOM_WS_AND_MASK 0x45
+#define ATOM_WS_FB_WINDOW 0x46
+#define ATOM_WS_ATTRIBUTES 0x47
+#define ATOM_WS_REGPTR 0x48
+
+#define ATOM_IIO_NOP 0
+#define ATOM_IIO_START 1
+#define ATOM_IIO_READ 2
+#define ATOM_IIO_WRITE 3
+#define ATOM_IIO_CLEAR 4
+#define ATOM_IIO_SET 5
+#define ATOM_IIO_MOVE_INDEX 6
+#define ATOM_IIO_MOVE_ATTR 7
+#define ATOM_IIO_MOVE_DATA 8
+#define ATOM_IIO_END 9
+
+#define ATOM_IO_MM 0
+#define ATOM_IO_PCI 1
+#define ATOM_IO_SYSIO 2
+#define ATOM_IO_IIO 0x80
+
+struct card_info {
+ struct drm_device *dev;
+ void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
+ uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */
+ void (* ioreg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
+ uint32_t (* ioreg_read)(struct card_info *, uint32_t); /* filled by driver */
+ void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
+ uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */
+ void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
+ uint32_t (* pll_read)(struct card_info *, uint32_t); /* filled by driver */
+};
+
+struct atom_context {
+ struct card_info *card;
+ struct mutex mutex;
+ void *bios;
+ uint32_t cmd_table, data_table;
+ uint16_t *iio;
+
+ uint16_t data_block;
+ uint32_t fb_base;
+ uint32_t divmul[2];
+ uint16_t io_attr;
+ uint16_t reg_block;
+ uint8_t shift;
+ int cs_equal, cs_above;
+ int io_mode;
+ uint32_t *scratch;
+ int scratch_size_bytes;
+};
+
+extern int atom_debug;
+
+struct atom_context *atom_parse(struct card_info *, void *);
+int atom_execute_table(struct atom_context *, int, uint32_t *);
+int atom_asic_init(struct atom_context *);
+void atom_destroy(struct atom_context *);
+bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
+ uint8_t *frev, uint8_t *crev, uint16_t *data_start);
+bool atom_parse_cmd_header(struct atom_context *ctx, int index,
+ uint8_t *frev, uint8_t *crev);
+int atom_allocate_fb_scratch(struct atom_context *ctx);
+#include "atom-types.h"
+#include "atombios.h"
+#include "ObjectID.h"
+
+#endif
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
new file mode 100644
index 0000000..0ee5737
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -0,0 +1,8012 @@
+/*
+ * Copyright 2006-2007 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/****************************************************************************/
+/*Portion I: Definitions shared between VBIOS and Driver */
+/****************************************************************************/
+
+
+#ifndef _ATOMBIOS_H
+#define _ATOMBIOS_H
+
+#define ATOM_VERSION_MAJOR 0x00020000
+#define ATOM_VERSION_MINOR 0x00000002
+
+#define ATOM_HEADER_VERSION (ATOM_VERSION_MAJOR | ATOM_VERSION_MINOR)
+
+/* Endianness should be specified before inclusion,
+ * default to little endian
+ */
+#ifndef ATOM_BIG_ENDIAN
+#error Endian not specified
+#endif
+
+#ifdef _H2INC
+ #ifndef ULONG
+ typedef unsigned long ULONG;
+ #endif
+
+ #ifndef UCHAR
+ typedef unsigned char UCHAR;
+ #endif
+
+ #ifndef USHORT
+ typedef unsigned short USHORT;
+ #endif
+#endif
+
+#define ATOM_DAC_A 0
+#define ATOM_DAC_B 1
+#define ATOM_EXT_DAC 2
+
+#define ATOM_CRTC1 0
+#define ATOM_CRTC2 1
+#define ATOM_CRTC3 2
+#define ATOM_CRTC4 3
+#define ATOM_CRTC5 4
+#define ATOM_CRTC6 5
+#define ATOM_CRTC_INVALID 0xFF
+
+#define ATOM_DIGA 0
+#define ATOM_DIGB 1
+
+#define ATOM_PPLL1 0
+#define ATOM_PPLL2 1
+#define ATOM_DCPLL 2
+#define ATOM_PPLL0 2
+#define ATOM_EXT_PLL1 8
+#define ATOM_EXT_PLL2 9
+#define ATOM_EXT_CLOCK 10
+#define ATOM_PPLL_INVALID 0xFF
+
+#define ENCODER_REFCLK_SRC_P1PLL 0
+#define ENCODER_REFCLK_SRC_P2PLL 1
+#define ENCODER_REFCLK_SRC_DCPLL 2
+#define ENCODER_REFCLK_SRC_EXTCLK 3
+#define ENCODER_REFCLK_SRC_INVALID 0xFF
+
+#define ATOM_SCALER1 0
+#define ATOM_SCALER2 1
+
+#define ATOM_SCALER_DISABLE 0
+#define ATOM_SCALER_CENTER 1
+#define ATOM_SCALER_EXPANSION 2
+#define ATOM_SCALER_MULTI_EX 3
+
+#define ATOM_DISABLE 0
+#define ATOM_ENABLE 1
+#define ATOM_LCD_BLOFF (ATOM_DISABLE+2)
+#define ATOM_LCD_BLON (ATOM_ENABLE+2)
+#define ATOM_LCD_BL_BRIGHTNESS_CONTROL (ATOM_ENABLE+3)
+#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5)
+#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5)
+#define ATOM_ENCODER_INIT (ATOM_DISABLE+7)
+#define ATOM_INIT (ATOM_DISABLE+7)
+#define ATOM_GET_STATUS (ATOM_DISABLE+8)
+
+#define ATOM_BLANKING 1
+#define ATOM_BLANKING_OFF 0
+
+#define ATOM_CURSOR1 0
+#define ATOM_CURSOR2 1
+
+#define ATOM_ICON1 0
+#define ATOM_ICON2 1
+
+#define ATOM_CRT1 0
+#define ATOM_CRT2 1
+
+#define ATOM_TV_NTSC 1
+#define ATOM_TV_NTSCJ 2
+#define ATOM_TV_PAL 3
+#define ATOM_TV_PALM 4
+#define ATOM_TV_PALCN 5
+#define ATOM_TV_PALN 6
+#define ATOM_TV_PAL60 7
+#define ATOM_TV_SECAM 8
+#define ATOM_TV_CV 16
+
+#define ATOM_DAC1_PS2 1
+#define ATOM_DAC1_CV 2
+#define ATOM_DAC1_NTSC 3
+#define ATOM_DAC1_PAL 4
+
+#define ATOM_DAC2_PS2 ATOM_DAC1_PS2
+#define ATOM_DAC2_CV ATOM_DAC1_CV
+#define ATOM_DAC2_NTSC ATOM_DAC1_NTSC
+#define ATOM_DAC2_PAL ATOM_DAC1_PAL
+
+#define ATOM_PM_ON 0
+#define ATOM_PM_STANDBY 1
+#define ATOM_PM_SUSPEND 2
+#define ATOM_PM_OFF 3
+
+/* Bit0:{=0:single, =1:dual},
+ Bit1 {=0:666RGB, =1:888RGB},
+ Bit2:3:{Grey level}
+ Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}*/
+
+#define ATOM_PANEL_MISC_DUAL 0x00000001
+#define ATOM_PANEL_MISC_888RGB 0x00000002
+#define ATOM_PANEL_MISC_GREY_LEVEL 0x0000000C
+#define ATOM_PANEL_MISC_FPDI 0x00000010
+#define ATOM_PANEL_MISC_GREY_LEVEL_SHIFT 2
+#define ATOM_PANEL_MISC_SPATIAL 0x00000020
+#define ATOM_PANEL_MISC_TEMPORAL 0x00000040
+#define ATOM_PANEL_MISC_API_ENABLED 0x00000080
+
+
+#define MEMTYPE_DDR1 "DDR1"
+#define MEMTYPE_DDR2 "DDR2"
+#define MEMTYPE_DDR3 "DDR3"
+#define MEMTYPE_DDR4 "DDR4"
+
+#define ASIC_BUS_TYPE_PCI "PCI"
+#define ASIC_BUS_TYPE_AGP "AGP"
+#define ASIC_BUS_TYPE_PCIE "PCI_EXPRESS"
+
+/* Maximum size of that FireGL flag string */
+
+#define ATOM_FIREGL_FLAG_STRING "FGL" //Flag used to enable FireGL Support
+#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 //sizeof( ATOM_FIREGL_FLAG_STRING )
+
+#define ATOM_FAKE_DESKTOP_STRING "DSK" //Flag used to enable mobile ASIC on Desktop
+#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING
+
+#define ATOM_M54T_FLAG_STRING "M54T" //Flag used to enable M54T Support
+#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 //sizeof( ATOM_M54T_FLAG_STRING )
+
+#define HW_ASSISTED_I2C_STATUS_FAILURE 2
+#define HW_ASSISTED_I2C_STATUS_SUCCESS 1
+
+#pragma pack(1) /* BIOS data must use byte aligment */
+
+/* Define offset to location of ROM header. */
+
+#define OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER 0x00000048L
+#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L
+
+#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94
+#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */
+#define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f
+#define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e
+
+/* Common header for all ROM Data tables.
+ Every table pointed _ATOM_MASTER_DATA_TABLE has this common header.
+ And the pointer actually points to this header. */
+
+typedef struct _ATOM_COMMON_TABLE_HEADER
+{
+ USHORT usStructureSize;
+ UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */
+ UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */
+ /*Image can't be updated, while Driver needs to carry the new table! */
+}ATOM_COMMON_TABLE_HEADER;
+
+/****************************************************************************/
+// Structure stores the ROM header.
+/****************************************************************************/
+typedef struct _ATOM_ROM_HEADER
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios,
+ atombios should init it as "ATOM", don't change the position */
+ USHORT usBiosRuntimeSegmentAddress;
+ USHORT usProtectedModeInfoOffset;
+ USHORT usConfigFilenameOffset;
+ USHORT usCRC_BlockOffset;
+ USHORT usBIOS_BootupMessageOffset;
+ USHORT usInt10Offset;
+ USHORT usPciBusDevInitCode;
+ USHORT usIoBaseAddress;
+ USHORT usSubsystemVendorID;
+ USHORT usSubsystemID;
+ USHORT usPCI_InfoOffset;
+ USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */
+ USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */
+ UCHAR ucExtendedFunctionCode;
+ UCHAR ucReserved;
+}ATOM_ROM_HEADER;
+
+/*==============================Command Table Portion==================================== */
+
+#ifdef UEFI_BUILD
+ #define UTEMP USHORT
+ #define USHORT void*
+#endif
+
+/****************************************************************************/
+// Structures used in Command.mtb
+/****************************************************************************/
+typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
+ USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1
+ USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON
+ USHORT ASIC_RegistersInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT VRAM_BlockVenderDetection; //Atomic Table, used only by Bios
+ USHORT DIGxEncoderControl; //Only used by Bios
+ USHORT MemoryControllerInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT EnableCRTCMemReq; //Function Table,directly used by various SW components,latest version 2.1
+ USHORT MemoryParamAdjust; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed
+ USHORT DVOEncoderControl; //Function Table,directly used by various SW components,latest version 1.2
+ USHORT GPIOPinControl; //Atomic Table, only used by Bios
+ USHORT SetEngineClock; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT SetMemoryClock; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT SetPixelClock; //Function Table,directly used by various SW components,latest version 1.2
+ USHORT EnableDispPowerGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT ResetMemoryDLL; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT ResetMemoryDevice; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT MemoryPLLInit; //Atomic Table, used only by Bios
+ USHORT AdjustDisplayPll; //Atomic Table, used by various SW componentes.
+ USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios
+ USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios
+ USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2
+ USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3
+ USHORT HW_Misc_Operation; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC1EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC2EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DVOOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT CV1OutputControl; //Atomic Table, Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
+ USHORT GetConditionalGoldenSetting; //Only used by Bios
+ USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT PatchMCSetting; //only used by BIOS
+ USHORT MC_SEQ_Control; //only used by BIOS
+ USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
+ USHORT EnableScaler; //Atomic Table, used only by Bios
+ USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT GetPixelClock; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT EnableVGA_Render; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT GetSCLKOverMCLKRatio; //Atomic Table, only used by Bios
+ USHORT SetCRTC_Timing; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT SetCRTC_OverScan; //Atomic Table, used by various SW components,latest version 1.1
+ USHORT SetCRTC_Replication; //Atomic Table, used only by Bios
+ USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios
+ USHORT UpdateCRTC_DoubleBufferRegisters; //Atomic Table, used only by Bios
+ USHORT LUT_AutoFill; //Atomic Table, only used by Bios
+ USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios
+ USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT GetEngineClock; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT SetCRTC_UsingDTDTiming; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT ExternalEncoderControl; //Atomic Table, directly used by various SW components,latest version 2.1
+ USHORT LVTMAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT VRAM_BlockDetectionByStrap; //Atomic Table, used only by Bios
+ USHORT MemoryCleanUp; //Atomic Table, only used by Bios
+ USHORT ProcessI2cChannelTransaction; //Function Table,only used by Bios
+ USHORT WriteOneByteToHWAssistedI2C; //Function Table,indirectly used by various SW components
+ USHORT ReadHWAssistedI2CStatus; //Atomic Table, indirectly used by various SW components
+ USHORT SpeedFanControl; //Function Table,indirectly used by various SW components,called from ASIC_Init
+ USHORT PowerConnectorDetection; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT MC_Synchronization; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT ComputeMemoryEnginePLL; //Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock
+ USHORT MemoryRefreshConversion; //Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock
+ USHORT VRAM_GetCurrentInfoBlock; //Atomic Table, used only by Bios
+ USHORT DynamicMemorySettings; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT MemoryTraining; //Atomic Table, used only by Bios
+ USHORT EnableSpreadSpectrumOnPPLL; //Atomic Table, directly used by various SW components,latest version 1.2
+ USHORT TMDSAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT SetVoltage; //Function Table,directly and/or indirectly used by various SW components,latest version 1.1
+ USHORT DAC1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC2OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT ComputeMemoryClockParam; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
+ USHORT ClockSource; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT MemoryDeviceInit; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT GetDispObjectInfo; //Atomic Table, indirectly used by various SW components,called from EnableVGARender
+ USHORT DIG1EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG2EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG1TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG2TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT ProcessAuxChannelTransaction; //Function Table,only used by Bios
+ USHORT DPEncoderService; //Function Table,only used by Bios
+ USHORT GetVoltageInfo; //Function Table,only used by Bios since SI
+}ATOM_MASTER_LIST_OF_COMMAND_TABLES;
+
+// For backward compatible
+#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction
+#define DPTranslatorControl DIG2EncoderControl
+#define UNIPHYTransmitterControl DIG1TransmitterControl
+#define LVTMATransmitterControl DIG2TransmitterControl
+#define SetCRTC_DPM_State GetConditionalGoldenSetting
+#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
+#define HPDInterruptService ReadHWAssistedI2CStatus
+#define EnableVGA_Access GetSCLKOverMCLKRatio
+#define EnableYUV GetDispObjectInfo
+#define DynamicClockGating EnableDispPowerGating
+#define SetupHWAssistedI2CStatus ComputeMemoryClockParam
+
+#define TMDSAEncoderControl PatchMCSetting
+#define LVDSEncoderControl MC_SEQ_Control
+#define LCD1OutputControl HW_Misc_Operation
+
+
+typedef struct _ATOM_MASTER_COMMAND_TABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
+}ATOM_MASTER_COMMAND_TABLE;
+
+/****************************************************************************/
+// Structures used in every command table
+/****************************************************************************/
+typedef struct _ATOM_TABLE_ATTRIBUTE
+{
+#if ATOM_BIG_ENDIAN
+ USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag
+ USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword),
+ USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword),
+#else
+ USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword),
+ USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword),
+ USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag
+#endif
+}ATOM_TABLE_ATTRIBUTE;
+
+typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS
+{
+ ATOM_TABLE_ATTRIBUTE sbfAccess;
+ USHORT susAccess;
+}ATOM_TABLE_ATTRIBUTE_ACCESS;
+
+/****************************************************************************/
+// Common header for all command tables.
+// Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header.
+// And the pointer actually points to this header.
+/****************************************************************************/
+typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER
+{
+ ATOM_COMMON_TABLE_HEADER CommonHeader;
+ ATOM_TABLE_ATTRIBUTE TableAttribute;
+}ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
+
+/****************************************************************************/
+// Structures used by ComputeMemoryEnginePLLTable
+/****************************************************************************/
+#define COMPUTE_MEMORY_PLL_PARAM 1
+#define COMPUTE_ENGINE_PLL_PARAM 2
+#define ADJUST_MC_SETTING_PARAM 3
+
+/****************************************************************************/
+// Structures used by AdjustMemoryControllerTable
+/****************************************************************************/
+typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+ ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block
+ ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0]
+ ULONG ulClockFreq:24;
+#else
+ ULONG ulClockFreq:24;
+ ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0]
+ ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block
+#endif
+}ATOM_ADJUST_MEMORY_CLOCK_FREQ;
+#define POINTER_RETURN_FLAG 0x80
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
+{
+ ULONG ulClock; //When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div
+ UCHAR ucAction; //0:reserved //1:Memory //2:Engine
+ UCHAR ucReserved; //may expand to return larger Fbdiv later
+ UCHAR ucFbDiv; //return value
+ UCHAR ucPostDiv; //return value
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2
+{
+ ULONG ulClock; //When return, [23:0] return real clock
+ UCHAR ucAction; //0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register
+ USHORT usFbDiv; //return Feedback value to be written to register
+ UCHAR ucPostDiv; //return post div to be written to register
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
+#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
+
+
+#define SET_CLOCK_FREQ_MASK 0x00FFFFFF //Clock change tables only take bit [23:0] as the requested clock value
+#define USE_NON_BUS_CLOCK_MASK 0x01000000 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
+#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 //Only applicable to memory clock change, when set, using memory self refresh during clock transition
+#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
+#define FIRST_TIME_CHANGE_CLOCK 0x08000000 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
+#define SKIP_SW_PROGRAM_PLL 0x10000000 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
+#define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK
+
+#define b3USE_NON_BUS_CLOCK_MASK 0x01 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
+#define b3USE_MEMORY_SELF_REFRESH 0x02 //Only applicable to memory clock change, when set, using memory self refresh during clock transition
+#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
+#define b3FIRST_TIME_CHANGE_CLOCK 0x08 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
+#define b3SKIP_SW_PROGRAM_PLL 0x10 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
+
+typedef struct _ATOM_COMPUTE_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+ ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
+ ULONG ulClockFreq:24; // in unit of 10kHz
+#else
+ ULONG ulClockFreq:24; // in unit of 10kHz
+ ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
+#endif
+}ATOM_COMPUTE_CLOCK_FREQ;
+
+typedef struct _ATOM_S_MPLL_FB_DIVIDER
+{
+ USHORT usFbDivFrac;
+ USHORT usFbDiv;
+}ATOM_S_MPLL_FB_DIVIDER;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
+{
+ union
+ {
+ ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
+ ULONG ulClockParams; //ULONG access for BE
+ ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
+ };
+ UCHAR ucRefDiv; //Output Parameter
+ UCHAR ucPostDiv; //Output Parameter
+ UCHAR ucCntlFlag; //Output Parameter
+ UCHAR ucReserved;
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
+
+// ucCntlFlag
+#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1
+#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2
+#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4
+#define ATOM_PLL_CNTL_FLAG_SPLL_ISPARE_9 8
+
+
+// V4 are only used for APU which PLL outside GPU
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
+{
+#if ATOM_BIG_ENDIAN
+ ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
+ ULONG ulClock:24; //Input= target clock, output = actual clock
+#else
+ ULONG ulClock:24; //Input= target clock, output = actual clock
+ ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
+#endif
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
+{
+ union
+ {
+ ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
+ ULONG ulClockParams; //ULONG access for BE
+ ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
+ };
+ UCHAR ucRefDiv; //Output Parameter
+ UCHAR ucPostDiv; //Output Parameter
+ union
+ {
+ UCHAR ucCntlFlag; //Output Flags
+ UCHAR ucInputFlag; //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode
+ };
+ UCHAR ucReserved;
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5;
+
+// ucInputFlag
+#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode
+
+// use for ComputeMemoryClockParamTable
+typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1
+{
+ union
+ {
+ ULONG ulClock;
+ ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output:UPPER_WORD=FB_DIV_INTEGER, LOWER_WORD=FB_DIV_FRAC shl (16-FB_FRACTION_BITS)
+ };
+ UCHAR ucDllSpeed; //Output
+ UCHAR ucPostDiv; //Output
+ union{
+ UCHAR ucInputFlag; //Input : ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN: 1-StrobeMode, 0-PerformanceMode
+ UCHAR ucPllCntlFlag; //Output:
+ };
+ UCHAR ucBWCntl;
+}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1;
+
+// definition of ucInputFlag
+#define MPLL_INPUT_FLAG_STROBE_MODE_EN 0x01
+// definition of ucPllCntlFlag
+#define MPLL_CNTL_FLAG_VCO_MODE_MASK 0x03
+#define MPLL_CNTL_FLAG_BYPASS_DQ_PLL 0x04
+#define MPLL_CNTL_FLAG_QDR_ENABLE 0x08
+#define MPLL_CNTL_FLAG_AD_HALF_RATE 0x10
+
+//MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL
+#define MPLL_CNTL_FLAG_BYPASS_AD_PLL 0x04
+
+typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
+{
+ ATOM_COMPUTE_CLOCK_FREQ ulClock;
+ ULONG ulReserved[2];
+}DYNAMICE_MEMORY_SETTINGS_PARAMETER;
+
+typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER
+{
+ ATOM_COMPUTE_CLOCK_FREQ ulClock;
+ ULONG ulMemoryClock;
+ ULONG ulReserved;
+}DYNAMICE_ENGINE_SETTINGS_PARAMETER;
+
+/****************************************************************************/
+// Structures used by SetEngineClockTable
+/****************************************************************************/
+typedef struct _SET_ENGINE_CLOCK_PARAMETERS
+{
+ ULONG ulTargetEngineClock; //In 10Khz unit
+}SET_ENGINE_CLOCK_PARAMETERS;
+
+typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION
+{
+ ULONG ulTargetEngineClock; //In 10Khz unit
+ COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+}SET_ENGINE_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by SetMemoryClockTable
+/****************************************************************************/
+typedef struct _SET_MEMORY_CLOCK_PARAMETERS
+{
+ ULONG ulTargetMemoryClock; //In 10Khz unit
+}SET_MEMORY_CLOCK_PARAMETERS;
+
+typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION
+{
+ ULONG ulTargetMemoryClock; //In 10Khz unit
+ COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+}SET_MEMORY_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by ASIC_Init.ctb
+/****************************************************************************/
+typedef struct _ASIC_INIT_PARAMETERS
+{
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+}ASIC_INIT_PARAMETERS;
+
+typedef struct _ASIC_INIT_PS_ALLOCATION
+{
+ ASIC_INIT_PARAMETERS sASICInitClocks;
+ SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; //Caller doesn't need to init this structure
+}ASIC_INIT_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structure used by DynamicClockGatingTable.ctb
+/****************************************************************************/
+typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS
+{
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[3];
+}DYNAMIC_CLOCK_GATING_PARAMETERS;
+#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS
+
+/****************************************************************************/
+// Structure used by EnableDispPowerGatingTable.ctb
+/****************************************************************************/
+typedef struct _ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1
+{
+ UCHAR ucDispPipeId; // ATOM_CRTC1, ATOM_CRTC2, ...
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[2];
+}ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1;
+
+/****************************************************************************/
+// Structure used by EnableASIC_StaticPwrMgtTable.ctb
+/****************************************************************************/
+typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+{
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[3];
+}ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
+#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+
+/****************************************************************************/
+// Structures used by DAC_LoadDetectionTable.ctb
+/****************************************************************************/
+typedef struct _DAC_LOAD_DETECTION_PARAMETERS
+{
+ USHORT usDeviceID; //{ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT}
+ UCHAR ucDacType; //{ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC}
+ UCHAR ucMisc; //Valid only when table revision =1.3 and above
+}DAC_LOAD_DETECTION_PARAMETERS;
+
+// DAC_LOAD_DETECTION_PARAMETERS.ucMisc
+#define DAC_LOAD_MISC_YPrPb 0x01
+
+typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION
+{
+ DAC_LOAD_DETECTION_PARAMETERS sDacload;
+ ULONG Reserved[2];// Don't set this one, allocation for EXT DAC
+}DAC_LOAD_DETECTION_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb
+/****************************************************************************/
+typedef struct _DAC_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucDacStandard; // See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0)
+ UCHAR ucAction; // 0: turn off encoder
+ // 1: setup and turn on encoder
+ // 7: ATOM_ENCODER_INIT Initialize DAC
+}DAC_ENCODER_CONTROL_PARAMETERS;
+
+#define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS
+
+/****************************************************************************/
+// Structures used by DIG1EncoderControlTable
+// DIG2EncoderControlTable
+// ExternalEncoderControlTable
+/****************************************************************************/
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucConfig;
+ // [2] Link Select:
+ // =0: PHY linkA if bfLane<3
+ // =1: PHY linkB if bfLanes<3
+ // =0: PHY linkA+B if bfLanes=3
+ // [3] Transmitter Sel
+ // =0: UNIPHY or PCIEPHY
+ // =1: LVTMA
+ UCHAR ucAction; // =0: turn off encoder
+ // =1: turn on encoder
+ UCHAR ucEncoderMode;
+ // =0: DP encoder
+ // =1: LVDS encoder
+ // =2: DVI encoder
+ // =3: HDMI encoder
+ // =4: SDVO encoder
+ UCHAR ucLaneNum; // how many lanes to enable
+ UCHAR ucReserved[2];
+}DIG_ENCODER_CONTROL_PARAMETERS;
+#define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS
+#define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS
+
+//ucConfig
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ 0x02
+#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04
+#define ATOM_ENCODER_CONFIG_LINKA 0x00
+#define ATOM_ENCODER_CONFIG_LINKB 0x04
+#define ATOM_ENCODER_CONFIG_LINKA_B ATOM_TRANSMITTER_CONFIG_LINKA
+#define ATOM_ENCODER_CONFIG_LINKB_A ATOM_ENCODER_CONFIG_LINKB
+#define ATOM_ENCODER_CONFIG_TRANSMITTER_SEL_MASK 0x08
+#define ATOM_ENCODER_CONFIG_UNIPHY 0x00
+#define ATOM_ENCODER_CONFIG_LVTMA 0x08
+#define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00
+#define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08
+#define ATOM_ENCODER_CONFIG_DIGB 0x80 // VBIOS Internal use, outside SW should set this bit=0
+// ucAction
+// ATOM_ENABLE: Enable Encoder
+// ATOM_DISABLE: Disable Encoder
+
+//ucEncoderMode
+#define ATOM_ENCODER_MODE_DP 0
+#define ATOM_ENCODER_MODE_LVDS 1
+#define ATOM_ENCODER_MODE_DVI 2
+#define ATOM_ENCODER_MODE_HDMI 3
+#define ATOM_ENCODER_MODE_SDVO 4
+#define ATOM_ENCODER_MODE_DP_AUDIO 5
+#define ATOM_ENCODER_MODE_TV 13
+#define ATOM_ENCODER_MODE_CV 14
+#define ATOM_ENCODER_MODE_CRT 15
+#define ATOM_ENCODER_MODE_DVO 16
+#define ATOM_ENCODER_MODE_DP_SST ATOM_ENCODER_MODE_DP // For DP1.2
+#define ATOM_ENCODER_MODE_DP_MST 5 // For DP1.2
+
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucReserved1:2;
+ UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF
+ UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F
+ UCHAR ucReserved:1;
+ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
+#else
+ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
+ UCHAR ucReserved:1;
+ UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F
+ UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF
+ UCHAR ucReserved1:2;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V2;
+
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
+ UCHAR ucAction;
+ UCHAR ucEncoderMode;
+ // =0: DP encoder
+ // =1: LVDS encoder
+ // =2: DVI encoder
+ // =3: HDMI encoder
+ // =4: SDVO encoder
+ UCHAR ucLaneNum; // how many lanes to enable
+ UCHAR ucStatus; // = DP_LINK_TRAINING_COMPLETE or DP_LINK_TRAINING_INCOMPLETE, only used by VBIOS with command ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS
+ UCHAR ucReserved;
+}DIG_ENCODER_CONTROL_PARAMETERS_V2;
+
+//ucConfig
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01
+#define ATOM_ENCODER_CONFIG_V2_LINK_SEL_MASK 0x04
+#define ATOM_ENCODER_CONFIG_V2_LINKA 0x00
+#define ATOM_ENCODER_CONFIG_V2_LINKB 0x04
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER_SEL_MASK 0x18
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER1 0x00
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10
+
+// ucAction:
+// ATOM_DISABLE
+// ATOM_ENABLE
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3 0x13
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b
+#define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c
+#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d
+#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e
+#define ATOM_ENCODER_CMD_SETUP 0x0f
+#define ATOM_ENCODER_CMD_SETUP_PANEL_MODE 0x10
+
+// ucStatus
+#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10
+#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=3
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucReserved1:1;
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+ UCHAR ucReserved:3;
+ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
+#else
+ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
+ UCHAR ucReserved:3;
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+ UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V3;
+
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01
+#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70
+#define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER 0x00
+#define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER 0x10
+#define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER 0x20
+#define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER 0x30
+#define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER 0x40
+#define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER 0x50
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ ATOM_DIG_ENCODER_CONFIG_V3 acConfig;
+ UCHAR ucAction;
+ union {
+ UCHAR ucEncoderMode;
+ // =0: DP encoder
+ // =1: LVDS encoder
+ // =2: DVI encoder
+ // =3: HDMI encoder
+ // =4: SDVO encoder
+ // =5: DP audio
+ UCHAR ucPanelMode; // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE
+ // =0: external DP
+ // =1: internal DP2
+ // =0x11: internal DP1 for NutMeg/Travis DP translator
+ };
+ UCHAR ucLaneNum; // how many lanes to enable
+ UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+ UCHAR ucReserved;
+}DIG_ENCODER_CONTROL_PARAMETERS_V3;
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=4
+// start from NI
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucReserved1:1;
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+ UCHAR ucReserved:2;
+ UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version
+#else
+ UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version
+ UCHAR ucReserved:2;
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+ UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V4;
+
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK 0x03
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ 0x00
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ 0x01
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ 0x02
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ 0x03
+#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL 0x70
+#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER 0x00
+#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER 0x10
+#define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER 0x20
+#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER 0x30
+#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER 0x40
+#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER 0x50
+#define ATOM_ENCODER_CONFIG_V4_DIG6_ENCODER 0x60
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ union{
+ ATOM_DIG_ENCODER_CONFIG_V4 acConfig;
+ UCHAR ucConfig;
+ };
+ UCHAR ucAction;
+ union {
+ UCHAR ucEncoderMode;
+ // =0: DP encoder
+ // =1: LVDS encoder
+ // =2: DVI encoder
+ // =3: HDMI encoder
+ // =4: SDVO encoder
+ // =5: DP audio
+ UCHAR ucPanelMode; // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE
+ // =0: external DP
+ // =1: internal DP2
+ // =0x11: internal DP1 for NutMeg/Travis DP translator
+ };
+ UCHAR ucLaneNum; // how many lanes to enable
+ UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+ UCHAR ucHPD_ID; // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version
+}DIG_ENCODER_CONTROL_PARAMETERS_V4;
+
+// define ucBitPerColor:
+#define PANEL_BPC_UNDEFINE 0x00
+#define PANEL_6BIT_PER_COLOR 0x01
+#define PANEL_8BIT_PER_COLOR 0x02
+#define PANEL_10BIT_PER_COLOR 0x03
+#define PANEL_12BIT_PER_COLOR 0x04
+#define PANEL_16BIT_PER_COLOR 0x05
+
+//define ucPanelMode
+#define DP_PANEL_MODE_EXTERNAL_DP_MODE 0x00
+#define DP_PANEL_MODE_INTERNAL_DP2_MODE 0x01
+#define DP_PANEL_MODE_INTERNAL_DP1_MODE 0x11
+
+/****************************************************************************/
+// Structures used by UNIPHYTransmitterControlTable
+// LVTMATransmitterControlTable
+// DVOOutputControlTable
+/****************************************************************************/
+typedef struct _ATOM_DP_VS_MODE
+{
+ UCHAR ucLaneSel;
+ UCHAR ucLaneSet;
+}ATOM_DP_VS_MODE;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS
+{
+ union
+ {
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+ ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+ };
+ UCHAR ucConfig;
+ // [0]=0: 4 lane Link,
+ // =1: 8 lane Link ( Dual Links TMDS )
+ // [1]=0: InCoherent mode
+ // =1: Coherent Mode
+ // [2] Link Select:
+ // =0: PHY linkA if bfLane<3
+ // =1: PHY linkB if bfLanes<3
+ // =0: PHY linkA+B if bfLanes=3
+ // [5:4]PCIE lane Sel
+ // =0: lane 0~3 or 0~7
+ // =1: lane 4~7
+ // =2: lane 8~11 or 8~15
+ // =3: lane 12~15
+ UCHAR ucAction; // =0: turn off encoder
+ // =1: turn on encoder
+ UCHAR ucReserved[4];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS;
+
+#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS
+
+//ucInitInfo
+#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff
+
+//ucConfig
+#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01
+#define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02
+#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04
+#define ATOM_TRANSMITTER_CONFIG_LINKA 0x00
+#define ATOM_TRANSMITTER_CONFIG_LINKB 0x04
+#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00
+#define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04
+
+#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
+#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
+#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
+
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PCIE 0x20
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_XTALIN 0x30
+#define ATOM_TRANSMITTER_CONFIG_LANE_SEL_MASK 0xc0
+#define ATOM_TRANSMITTER_CONFIG_LANE_0_3 0x00
+#define ATOM_TRANSMITTER_CONFIG_LANE_0_7 0x00
+#define ATOM_TRANSMITTER_CONFIG_LANE_4_7 0x40
+#define ATOM_TRANSMITTER_CONFIG_LANE_8_11 0x80
+#define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80
+#define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0
+
+//ucAction
+#define ATOM_TRANSMITTER_ACTION_DISABLE 0
+#define ATOM_TRANSMITTER_ACTION_ENABLE 1
+#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2
+#define ATOM_TRANSMITTER_ACTION_LCD_BLON 3
+#define ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL 4
+#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_START 5
+#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_STOP 6
+#define ATOM_TRANSMITTER_ACTION_INIT 7
+#define ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT 8
+#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9
+#define ATOM_TRANSMITTER_ACTION_SETUP 10
+#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11
+#define ATOM_TRANSMITTER_ACTION_POWER_ON 12
+#define ATOM_TRANSMITTER_ACTION_POWER_OFF 13
+
+// Following are used for DigTransmitterControlTable ver1.2
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+ UCHAR ucReserved:1;
+ UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+#else
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
+ UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector
+ UCHAR ucReserved:1;
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V2;
+
+//ucConfig
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01
+
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02
+
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04
+#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00
+#define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04
+
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
+
+// Bit4
+#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10
+
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 //AB
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 //CD
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 //EF
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2
+{
+ union
+ {
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+ ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+ };
+ ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
+ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
+ UCHAR ucReserved[4];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
+
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+#else
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V3;
+
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
+{
+ union
+ {
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+ ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+ };
+ ATOM_DIG_TRANSMITTER_CONFIG_V3 acConfig;
+ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
+ UCHAR ucLaneNum;
+ UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V3;
+
+//ucConfig
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V3_DUAL_LINK_CONNECTOR 0x01
+
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V3_COHERENT 0x02
+
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V3_LINK_SEL_MASK 0x04
+#define ATOM_TRANSMITTER_CONFIG_V3_LINKA 0x00
+#define ATOM_TRANSMITTER_CONFIG_V3_LINKB 0x04
+
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V3_ENCODER_SEL_MASK 0x08
+#define ATOM_TRANSMITTER_CONFIG_V3_DIG1_ENCODER 0x00
+#define ATOM_TRANSMITTER_CONFIG_V3_DIG2_ENCODER 0x08
+
+// Bit5:4
+#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SEL_MASK 0x30
+#define ATOM_TRASMITTER_CONFIG_V3_P1PLL 0x00
+#define ATOM_TRASMITTER_CONFIG_V3_P2PLL 0x10
+#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SRC_EXT 0x20
+
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER_SEL_MASK 0xC0
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER1 0x00 //AB
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF
+
+
+/****************************************************************************/
+// Structures used by UNIPHYTransmitterControlTable V1.4
+// ASIC Families: NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=4
+/****************************************************************************/
+typedef struct _ATOM_DP_VS_MODE_V4
+{
+ UCHAR ucLaneSel;
+ union
+ {
+ UCHAR ucLaneSet;
+ struct {
+#if ATOM_BIG_ENDIAN
+ UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4
+ UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level
+ UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level
+#else
+ UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level
+ UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level
+ UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4
+#endif
+ };
+ };
+}ATOM_DP_VS_MODE_V4;
+
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+#else
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V4;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4
+{
+ union
+ {
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+ ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode Redefined comparing to previous version
+ };
+ union
+ {
+ ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig;
+ UCHAR ucConfig;
+ };
+ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
+ UCHAR ucLaneNum;
+ UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V4;
+
+//ucConfig
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR 0x01
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V4_COHERENT 0x02
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK 0x04
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKA 0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKB 0x04
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK 0x08
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER 0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER 0x08
+// Bit5:4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK 0x30
+#define ATOM_TRANSMITTER_CONFIG_V4_P1PLL 0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_P2PLL 0x10
+#define ATOM_TRANSMITTER_CONFIG_V4_DCPLL 0x20 // New in _V4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT 0x30 // Changed comparing to V3
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK 0xC0
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1 0x00 //AB
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2 0x40 //CD
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3 0x80 //EF
+
+
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V5
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucReservd1:1;
+ UCHAR ucHPDSel:3;
+ UCHAR ucPhyClkSrcId:2;
+ UCHAR ucCoherentMode:1;
+ UCHAR ucReserved:1;
+#else
+ UCHAR ucReserved:1;
+ UCHAR ucCoherentMode:1;
+ UCHAR ucPhyClkSrcId:2;
+ UCHAR ucHPDSel:3;
+ UCHAR ucReservd1:1;
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V5;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
+{
+ USHORT usSymClock; // Encoder Clock in 10kHz,(DP mode)= linkclock/10, (TMDS/LVDS/HDMI)= pixel clock, (HDMI deep color), =pixel clock * deep_color_ratio
+ UCHAR ucPhyId; // 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4= UNIPHYE 5=UNIPHYF
+ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_xxx
+ UCHAR ucLaneNum; // indicate lane number 1-8
+ UCHAR ucConnObjId; // Connector Object Id defined in ObjectId.h
+ UCHAR ucDigMode; // indicate DIG mode
+ union{
+ ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
+ UCHAR ucConfig;
+ };
+ UCHAR ucDigEncoderSel; // indicate DIG front end encoder
+ UCHAR ucDPLaneSet;
+ UCHAR ucReserved;
+ UCHAR ucReserved1;
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5;
+
+//ucPhyId
+#define ATOM_PHY_ID_UNIPHYA 0
+#define ATOM_PHY_ID_UNIPHYB 1
+#define ATOM_PHY_ID_UNIPHYC 2
+#define ATOM_PHY_ID_UNIPHYD 3
+#define ATOM_PHY_ID_UNIPHYE 4
+#define ATOM_PHY_ID_UNIPHYF 5
+#define ATOM_PHY_ID_UNIPHYG 6
+
+// ucDigEncoderSel
+#define ATOM_TRANMSITTER_V5__DIGA_SEL 0x01
+#define ATOM_TRANMSITTER_V5__DIGB_SEL 0x02
+#define ATOM_TRANMSITTER_V5__DIGC_SEL 0x04
+#define ATOM_TRANMSITTER_V5__DIGD_SEL 0x08
+#define ATOM_TRANMSITTER_V5__DIGE_SEL 0x10
+#define ATOM_TRANMSITTER_V5__DIGF_SEL 0x20
+#define ATOM_TRANMSITTER_V5__DIGG_SEL 0x40
+
+// ucDigMode
+#define ATOM_TRANSMITTER_DIGMODE_V5_DP 0
+#define ATOM_TRANSMITTER_DIGMODE_V5_LVDS 1
+#define ATOM_TRANSMITTER_DIGMODE_V5_DVI 2
+#define ATOM_TRANSMITTER_DIGMODE_V5_HDMI 3
+#define ATOM_TRANSMITTER_DIGMODE_V5_SDVO 4
+#define ATOM_TRANSMITTER_DIGMODE_V5_DP_MST 5
+
+// ucDPLaneSet
+#define DP_LANE_SET__0DB_0_4V 0x00
+#define DP_LANE_SET__0DB_0_6V 0x01
+#define DP_LANE_SET__0DB_0_8V 0x02
+#define DP_LANE_SET__0DB_1_2V 0x03
+#define DP_LANE_SET__3_5DB_0_4V 0x08
+#define DP_LANE_SET__3_5DB_0_6V 0x09
+#define DP_LANE_SET__3_5DB_0_8V 0x0a
+#define DP_LANE_SET__6DB_0_4V 0x10
+#define DP_LANE_SET__6DB_0_6V 0x11
+#define DP_LANE_SET__9_5DB_0_4V 0x18
+
+// ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
+// Bit1
+#define ATOM_TRANSMITTER_CONFIG_V5_COHERENT 0x02
+
+// Bit3:2
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_MASK 0x0c
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_SHIFT 0x02
+
+#define ATOM_TRANSMITTER_CONFIG_V5_P1PLL 0x00
+#define ATOM_TRANSMITTER_CONFIG_V5_P2PLL 0x04
+#define ATOM_TRANSMITTER_CONFIG_V5_P0PLL 0x08
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT 0x0c
+// Bit6:4
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_MASK 0x70
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_SHIFT 0x04
+
+#define ATOM_TRANSMITTER_CONFIG_V5_NO_HPD_SEL 0x00
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL 0x10
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL 0x20
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL 0x30
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL 0x40
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL 0x50
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL 0x60
+
+#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION_V1_5 DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
+
+
+/****************************************************************************/
+// Structures used by ExternalEncoderControlTable V1.3
+// ASIC Families: Evergreen, Llano, NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
+{
+ union{
+ USHORT usPixelClock; // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT
+ USHORT usConnectorId; // connector id, valid when ucAction = INIT
+ };
+ UCHAR ucConfig; // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT
+ UCHAR ucAction; //
+ UCHAR ucEncoderMode; // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT
+ UCHAR ucLaneNum; // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT
+ UCHAR ucBitPerColor; // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP
+ UCHAR ucReserved;
+}EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3;
+
+// ucAction
+#define EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT 0x00
+#define EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT 0x01
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT 0x07
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP 0x0f
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11
+#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12
+#define EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP 0x14
+
+// ucConfig
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ 0x02
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MASK 0x70
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1 0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2 0x10
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3 0x20
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3
+{
+ EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder;
+ ULONG ulReserved[2];
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3;
+
+
+/****************************************************************************/
+// Structures used by DAC1OuputControlTable
+// DAC2OuputControlTable
+// LVTMAOutputControlTable (Before DEC30)
+// TMDSAOutputControlTable (Before DEC30)
+/****************************************************************************/
+typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+{
+ UCHAR ucAction; // Possible input:ATOM_ENABLE||ATOMDISABLE
+ // When the display is LCD, in addition to above:
+ // ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START||
+ // ATOM_LCD_SELFTEST_STOP
+
+ UCHAR aucPadding[3]; // padding to DWORD aligned
+}DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
+
+#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+
+
+#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CV1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define TV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define TV1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DFP1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DFP1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DFP2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DFP2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define LCD1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define LCD1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DVO_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION
+#define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS
+
+/****************************************************************************/
+// Structures used by BlankCRTCTable
+/****************************************************************************/
+typedef struct _BLANK_CRTC_PARAMETERS
+{
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucBlanking; // ATOM_BLANKING or ATOM_BLANKINGOFF
+ USHORT usBlackColorRCr;
+ USHORT usBlackColorGY;
+ USHORT usBlackColorBCb;
+}BLANK_CRTC_PARAMETERS;
+#define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS
+
+/****************************************************************************/
+// Structures used by EnableCRTCTable
+// EnableCRTCMemReqTable
+// UpdateCRTC_DoubleBufferRegistersTable
+/****************************************************************************/
+typedef struct _ENABLE_CRTC_PARAMETERS
+{
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[2];
+}ENABLE_CRTC_PARAMETERS;
+#define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS
+
+/****************************************************************************/
+// Structures used by SetCRTC_OverScanTable
+/****************************************************************************/
+typedef struct _SET_CRTC_OVERSCAN_PARAMETERS
+{
+ USHORT usOverscanRight; // right
+ USHORT usOverscanLeft; // left
+ USHORT usOverscanBottom; // bottom
+ USHORT usOverscanTop; // top
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucPadding[3];
+}SET_CRTC_OVERSCAN_PARAMETERS;
+#define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS
+
+/****************************************************************************/
+// Structures used by SetCRTC_ReplicationTable
+/****************************************************************************/
+typedef struct _SET_CRTC_REPLICATION_PARAMETERS
+{
+ UCHAR ucH_Replication; // horizontal replication
+ UCHAR ucV_Replication; // vertical replication
+ UCHAR usCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucPadding;
+}SET_CRTC_REPLICATION_PARAMETERS;
+#define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS
+
+/****************************************************************************/
+// Structures used by SelectCRTC_SourceTable
+/****************************************************************************/
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS
+{
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucDevice; // ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|....
+ UCHAR ucPadding[2];
+}SELECT_CRTC_SOURCE_PARAMETERS;
+#define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS
+
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2
+{
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucEncoderID; // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO
+ UCHAR ucEncodeMode; // Encoding mode, only valid when using DIG1/DIG2/DVO
+ UCHAR ucPadding;
+}SELECT_CRTC_SOURCE_PARAMETERS_V2;
+
+//ucEncoderID
+//#define ASIC_INT_DAC1_ENCODER_ID 0x00
+//#define ASIC_INT_TV_ENCODER_ID 0x02
+//#define ASIC_INT_DIG1_ENCODER_ID 0x03
+//#define ASIC_INT_DAC2_ENCODER_ID 0x04
+//#define ASIC_EXT_TV_ENCODER_ID 0x06
+//#define ASIC_INT_DVO_ENCODER_ID 0x07
+//#define ASIC_INT_DIG2_ENCODER_ID 0x09
+//#define ASIC_EXT_DIG_ENCODER_ID 0x05
+
+//ucEncodeMode
+//#define ATOM_ENCODER_MODE_DP 0
+//#define ATOM_ENCODER_MODE_LVDS 1
+//#define ATOM_ENCODER_MODE_DVI 2
+//#define ATOM_ENCODER_MODE_HDMI 3
+//#define ATOM_ENCODER_MODE_SDVO 4
+//#define ATOM_ENCODER_MODE_TV 13
+//#define ATOM_ENCODER_MODE_CV 14
+//#define ATOM_ENCODER_MODE_CRT 15
+
+/****************************************************************************/
+// Structures used by SetPixelClockTable
+// GetPixelClockTable
+/****************************************************************************/
+//Major revision=1., Minor revision=1
+typedef struct _PIXEL_CLOCK_PARAMETERS
+{
+ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+ // 0 means disable PPLL
+ USHORT usRefDiv; // Reference divider
+ USHORT usFbDiv; // feedback divider
+ UCHAR ucPostDiv; // post divider
+ UCHAR ucFracFbDiv; // fractional feedback divider
+ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
+ UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER
+ UCHAR ucCRTC; // Which CRTC uses this Ppll
+ UCHAR ucPadding;
+}PIXEL_CLOCK_PARAMETERS;
+
+//Major revision=1., Minor revision=2, add ucMiscIfno
+//ucMiscInfo:
+#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1
+#define MISC_DEVICE_INDEX_MASK 0xF0
+#define MISC_DEVICE_INDEX_SHIFT 4
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V2
+{
+ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+ // 0 means disable PPLL
+ USHORT usRefDiv; // Reference divider
+ USHORT usFbDiv; // feedback divider
+ UCHAR ucPostDiv; // post divider
+ UCHAR ucFracFbDiv; // fractional feedback divider
+ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
+ UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER
+ UCHAR ucCRTC; // Which CRTC uses this Ppll
+ UCHAR ucMiscInfo; // Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog
+}PIXEL_CLOCK_PARAMETERS_V2;
+
+//Major revision=1., Minor revision=3, structure/definition change
+//ucEncoderMode:
+//ATOM_ENCODER_MODE_DP
+//ATOM_ENOCDER_MODE_LVDS
+//ATOM_ENOCDER_MODE_DVI
+//ATOM_ENOCDER_MODE_HDMI
+//ATOM_ENOCDER_MODE_SDVO
+//ATOM_ENCODER_MODE_TV 13
+//ATOM_ENCODER_MODE_CV 14
+//ATOM_ENCODER_MODE_CRT 15
+
+//ucDVOConfig
+//#define DVO_ENCODER_CONFIG_RATE_SEL 0x01
+//#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00
+//#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01
+//#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c
+//#define DVO_ENCODER_CONFIG_LOW12BIT 0x00
+//#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04
+//#define DVO_ENCODER_CONFIG_24BIT 0x08
+
+//ucMiscInfo: also changed, see below
+#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01
+#define PIXEL_CLOCK_MISC_VGA_MODE 0x02
+#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04
+#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00
+#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04
+#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08
+#define PIXEL_CLOCK_MISC_REF_DIV_SRC 0x10
+// V1.4 for RoadRunner
+#define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10
+#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20
+
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V3
+{
+ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+ // 0 means disable PPLL. For VGA PPLL,make sure this value is not 0.
+ USHORT usRefDiv; // Reference divider
+ USHORT usFbDiv; // feedback divider
+ UCHAR ucPostDiv; // post divider
+ UCHAR ucFracFbDiv; // fractional feedback divider
+ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
+ UCHAR ucTransmitterId; // graphic encoder id defined in objectId.h
+ union
+ {
+ UCHAR ucEncoderMode; // encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/
+ UCHAR ucDVOConfig; // when use DVO, need to know SDR/DDR, 12bit or 24bit
+ };
+ UCHAR ucMiscInfo; // bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel
+ // bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source
+ // bit[4]=0:use XTALIN as the source of reference divider,=1 use the pre-defined clock as the source of reference divider
+}PIXEL_CLOCK_PARAMETERS_V3;
+
+#define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2
+#define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V5
+{
+ UCHAR ucCRTC; // ATOM_CRTC1~6, indicate the CRTC controller to
+ // drive the pixel clock. not used for DCPLL case.
+ union{
+ UCHAR ucReserved;
+ UCHAR ucFracFbDiv; // [gphan] temporary to prevent build problem. remove it after driver code is changed.
+ };
+ USHORT usPixelClock; // target the pixel clock to drive the CRTC timing
+ // 0 means disable PPLL/DCPLL.
+ USHORT usFbDiv; // feedback divider integer part.
+ UCHAR ucPostDiv; // post divider.
+ UCHAR ucRefDiv; // Reference divider
+ UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+ UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h,
+ // indicate which graphic encoder will be used.
+ UCHAR ucEncoderMode; // Encoder mode:
+ UCHAR ucMiscInfo; // bit[0]= Force program PPLL
+ // bit[1]= when VGA timing is used.
+ // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+ // bit[4]= RefClock source for PPLL.
+ // =0: XTLAIN( default mode )
+ // =1: other external clock source, which is pre-defined
+ // by VBIOS depend on the feature required.
+ // bit[7:5]: reserved.
+ ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V5;
+
+#define PIXEL_CLOCK_V5_MISC_FORCE_PROG_PPLL 0x01
+#define PIXEL_CLOCK_V5_MISC_VGA_MODE 0x02
+#define PIXEL_CLOCK_V5_MISC_HDMI_BPP_MASK 0x0c
+#define PIXEL_CLOCK_V5_MISC_HDMI_24BPP 0x00
+#define PIXEL_CLOCK_V5_MISC_HDMI_30BPP 0x04
+#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08
+#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10
+
+typedef struct _CRTC_PIXEL_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+ ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to
+ // drive the pixel clock. not used for DCPLL case.
+ ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing.
+ // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+#else
+ ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing.
+ // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+ ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to
+ // drive the pixel clock. not used for DCPLL case.
+#endif
+}CRTC_PIXEL_CLOCK_FREQ;
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V6
+{
+ union{
+ CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq; // pixel clock and CRTC id frequency
+ ULONG ulDispEngClkFreq; // dispclk frequency
+ };
+ USHORT usFbDiv; // feedback divider integer part.
+ UCHAR ucPostDiv; // post divider.
+ UCHAR ucRefDiv; // Reference divider
+ UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+ UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h,
+ // indicate which graphic encoder will be used.
+ UCHAR ucEncoderMode; // Encoder mode:
+ UCHAR ucMiscInfo; // bit[0]= Force program PPLL
+ // bit[1]= when VGA timing is used.
+ // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+ // bit[4]= RefClock source for PPLL.
+ // =0: XTLAIN( default mode )
+ // =1: other external clock source, which is pre-defined
+ // by VBIOS depend on the feature required.
+ // bit[7:5]: reserved.
+ ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V6;
+
+#define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL 0x01
+#define PIXEL_CLOCK_V6_MISC_VGA_MODE 0x02
+#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c
+#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08
+#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c
+#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10
+
+typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
+{
+ PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
+}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2;
+
+typedef struct _GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2
+{
+ UCHAR ucStatus;
+ UCHAR ucRefDivSrc; // =1: reference clock source from XTALIN, =0: source from PCIE ref clock
+ UCHAR ucReserved[2];
+}GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2;
+
+typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3
+{
+ PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput;
+}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3;
+
+/****************************************************************************/
+// Structures used by AdjustDisplayPllTable
+/****************************************************************************/
+typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS
+{
+ USHORT usPixelClock;
+ UCHAR ucTransmitterID;
+ UCHAR ucEncodeMode;
+ union
+ {
+ UCHAR ucDVOConfig; //if DVO, need passing link rate and output 12bitlow or 24bit
+ UCHAR ucConfig; //if none DVO, not defined yet
+ };
+ UCHAR ucReserved[3];
+}ADJUST_DISPLAY_PLL_PARAMETERS;
+
+#define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10
+#define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS
+
+typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
+{
+ USHORT usPixelClock; // target pixel clock
+ UCHAR ucTransmitterID; // GPU transmitter id defined in objectid.h
+ UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI
+ UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
+ UCHAR ucExtTransmitterID; // external encoder id.
+ UCHAR ucReserved[2];
+}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
+
+// usDispPllConfig v1.2 for RoadRunner
+#define DISPPLL_CONFIG_DVO_RATE_SEL 0x0001 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_DDR_SPEED 0x0000 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_SDR_SPEED 0x0001 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_OUTPUT_SEL 0x000c // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_LOW12BIT 0x0000 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_UPPER12BIT 0x0004 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_24BIT 0x0008 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_SS_ENABLE 0x0010 // Only used when ucEncoderMode = DP or LVDS
+#define DISPPLL_CONFIG_COHERENT_MODE 0x0020 // Only used when ucEncoderMode = TMDS or HDMI
+#define DISPPLL_CONFIG_DUAL_LINK 0x0040 // Only used when ucEncoderMode = TMDS or LVDS
+
+
+typedef struct _ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3
+{
+ ULONG ulDispPllFreq; // return display PPLL freq which is used to generate the pixclock, and related idclk, symclk etc
+ UCHAR ucRefDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider and post_div ( if it is not given )
+ UCHAR ucPostDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider
+ UCHAR ucReserved[2];
+}ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3;
+
+typedef struct _ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3
+{
+ union
+ {
+ ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 sInput;
+ ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 sOutput;
+ };
+} ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3;
+
+/****************************************************************************/
+// Structures used by EnableYUVTable
+/****************************************************************************/
+typedef struct _ENABLE_YUV_PARAMETERS
+{
+ UCHAR ucEnable; // ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB)
+ UCHAR ucCRTC; // Which CRTC needs this YUV or RGB format
+ UCHAR ucPadding[2];
+}ENABLE_YUV_PARAMETERS;
+#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS
+
+/****************************************************************************/
+// Structures used by GetMemoryClockTable
+/****************************************************************************/
+typedef struct _GET_MEMORY_CLOCK_PARAMETERS
+{
+ ULONG ulReturnMemoryClock; // current memory speed in 10KHz unit
+} GET_MEMORY_CLOCK_PARAMETERS;
+#define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS
+
+/****************************************************************************/
+// Structures used by GetEngineClockTable
+/****************************************************************************/
+typedef struct _GET_ENGINE_CLOCK_PARAMETERS
+{
+ ULONG ulReturnEngineClock; // current engine speed in 10KHz unit
+} GET_ENGINE_CLOCK_PARAMETERS;
+#define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS
+
+/****************************************************************************/
+// Following Structures and constant may be obsolete
+/****************************************************************************/
+//Maxium 8 bytes,the data read in will be placed in the parameter space.
+//Read operaion successeful when the paramter space is non-zero, otherwise read operation failed
+typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+{
+ USHORT usPrescale; //Ratio between Engine clock and I2C clock
+ USHORT usVRAMAddress; //Address in Frame Buffer where to pace raw EDID
+ USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status
+ //WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte
+ UCHAR ucSlaveAddr; //Read from which slave
+ UCHAR ucLineNumber; //Read from which HW assisted line
+}READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
+#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+
+
+#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0
+#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1
+#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2
+#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3
+#define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4
+
+typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+{
+ USHORT usPrescale; //Ratio between Engine clock and I2C clock
+ USHORT usByteOffset; //Write to which byte
+ //Upper portion of usByteOffset is Format of data
+ //1bytePS+offsetPS
+ //2bytesPS+offsetPS
+ //blockID+offsetPS
+ //blockID+offsetID
+ //blockID+counterID+offsetID
+ UCHAR ucData; //PS data1
+ UCHAR ucStatus; //Status byte 1=success, 2=failure, Also is used as PS data2
+ UCHAR ucSlaveAddr; //Write to which slave
+ UCHAR ucLineNumber; //Write from which HW assisted line
+}WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
+
+#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS
+{
+ USHORT usPrescale; //Ratio between Engine clock and I2C clock
+ UCHAR ucSlaveAddr; //Write to which slave
+ UCHAR ucLineNumber; //Write from which HW assisted line
+}SET_UP_HW_I2C_DATA_PARAMETERS;
+
+
+/**************************************************************************/
+#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+
+/****************************************************************************/
+// Structures used by PowerConnectorDetectionTable
+/****************************************************************************/
+typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS
+{
+ UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected
+ UCHAR ucPwrBehaviorId;
+ USHORT usPwrBudget; //how much power currently boot to in unit of watt
+}POWER_CONNECTOR_DETECTION_PARAMETERS;
+
+typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION
+{
+ UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected
+ UCHAR ucReserved;
+ USHORT usPwrBudget; //how much power currently boot to in unit of watt
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
+
+/****************************LVDS SS Command Table Definitions**********************/
+
+/****************************************************************************/
+// Structures used by EnableSpreadSpectrumOnPPLLTable
+/****************************************************************************/
+typedef struct _ENABLE_LVDS_SS_PARAMETERS
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+ UCHAR ucSpreadSpectrumStepSize_Delay; //bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY
+ UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[3];
+}ENABLE_LVDS_SS_PARAMETERS;
+
+//ucTableFormatRevision=1,ucTableContentRevision=2
+typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+ UCHAR ucSpreadSpectrumStep; //
+ UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucSpreadSpectrumDelay;
+ UCHAR ucSpreadSpectrumRange;
+ UCHAR ucPadding;
+}ENABLE_LVDS_SS_PARAMETERS_V2;
+
+//This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS.
+typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; // Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+ UCHAR ucSpreadSpectrumStep; //
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucSpreadSpectrumDelay;
+ UCHAR ucSpreadSpectrumRange;
+ UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL;
+
+typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread.
+ // Bit[1]: 1-Ext. 0-Int.
+ // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+ // Bits[7:4] reserved
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]
+ USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2;
+
+#define ATOM_PPLL_SS_TYPE_V2_DOWN_SPREAD 0x00
+#define ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD 0x01
+#define ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD 0x02
+#define ATOM_PPLL_SS_TYPE_V2_PPLL_SEL_MASK 0x0c
+#define ATOM_PPLL_SS_TYPE_V2_P1PLL 0x00
+#define ATOM_PPLL_SS_TYPE_V2_P2PLL 0x04
+#define ATOM_PPLL_SS_TYPE_V2_DCPLL 0x08
+#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK 0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT 0
+#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8
+
+// Used by DCE5.0
+ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3
+{
+ USHORT usSpreadSpectrumAmountFrac; // SS_AMOUNT_DSFRAC New in DCE5.0
+ UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread.
+ // Bit[1]: 1-Ext. 0-Int.
+ // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+ // Bits[7:4] reserved
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]
+ USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3;
+
+#define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD 0x00
+#define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD 0x01
+#define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD 0x02
+#define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK 0x0c
+#define ATOM_PPLL_SS_TYPE_V3_P1PLL 0x00
+#define ATOM_PPLL_SS_TYPE_V3_P2PLL 0x04
+#define ATOM_PPLL_SS_TYPE_V3_DCPLL 0x08
+#define ATOM_PPLL_SS_TYPE_V3_P0PLL ATOM_PPLL_SS_TYPE_V3_DCPLL
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK 0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT 0
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK 0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT 8
+
+#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL
+
+/**************************************************************************/
+
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION
+{
+ PIXEL_CLOCK_PARAMETERS sPCLKInput;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;//Caller doesn't need to init this portion
+}SET_PIXEL_CLOCK_PS_ALLOCATION;
+
+#define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION
+
+/****************************************************************************/
+// Structures used by ###
+/****************************************************************************/
+typedef struct _MEMORY_TRAINING_PARAMETERS
+{
+ ULONG ulTargetMemoryClock; //In 10Khz unit
+}MEMORY_TRAINING_PARAMETERS;
+#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS
+
+
+/****************************LVDS and other encoder command table definitions **********************/
+
+
+/****************************************************************************/
+// Structures used by LVDSEncoderControlTable (Before DCE30)
+// LVTMAEncoderControlTable (Before DCE30)
+// TMDSAEncoderControlTable (Before DCE30)
+/****************************************************************************/
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucMisc; // bit0=0: Enable single link
+ // =1: Enable dual link
+ // Bit1=0: 666RGB
+ // =1: 888RGB
+ UCHAR ucAction; // 0: turn off encoder
+ // 1: setup and turn on encoder
+}LVDS_ENCODER_CONTROL_PARAMETERS;
+
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS
+
+
+//ucTableFormatRevision=1,ucTableContentRevision=2
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucMisc; // see PANEL_ENCODER_MISC_xx defintions below
+ UCHAR ucAction; // 0: turn off encoder
+ // 1: setup and turn on encoder
+ UCHAR ucTruncate; // bit0=0: Disable truncate
+ // =1: Enable truncate
+ // bit4=0: 666RGB
+ // =1: 888RGB
+ UCHAR ucSpatial; // bit0=0: Disable spatial dithering
+ // =1: Enable spatial dithering
+ // bit4=0: 666RGB
+ // =1: 888RGB
+ UCHAR ucTemporal; // bit0=0: Disable temporal dithering
+ // =1: Enable temporal dithering
+ // bit4=0: 666RGB
+ // =1: 888RGB
+ // bit5=0: Gray level 2
+ // =1: Gray level 4
+ UCHAR ucFRC; // bit4=0: 25FRC_SEL pattern E
+ // =1: 25FRC_SEL pattern F
+ // bit6:5=0: 50FRC_SEL pattern A
+ // =1: 50FRC_SEL pattern B
+ // =2: 50FRC_SEL pattern C
+ // =3: 50FRC_SEL pattern D
+ // bit7=0: 75FRC_SEL pattern E
+ // =1: 75FRC_SEL pattern F
+}LVDS_ENCODER_CONTROL_PARAMETERS_V2;
+
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2
+
+#define LVDS_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V2
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS1_ENCODER_CONTROL_PARAMETERS_V3
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3
+
+/****************************************************************************/
+// Structures used by ###
+/****************************************************************************/
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS
+{
+ UCHAR ucEnable; // Enable or Disable External TMDS encoder
+ UCHAR ucMisc; // Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB}
+ UCHAR ucPadding[2];
+}ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
+
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION
+{
+ ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
+}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
+
+#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
+
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2
+{
+ ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
+}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION
+{
+ DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by DVOEncoderControlTable
+/****************************************************************************/
+//ucTableFormatRevision=1,ucTableContentRevision=3
+
+//ucDVOConfig:
+#define DVO_ENCODER_CONFIG_RATE_SEL 0x01
+#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00
+#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01
+#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c
+#define DVO_ENCODER_CONFIG_LOW12BIT 0x00
+#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04
+#define DVO_ENCODER_CONFIG_24BIT 0x08
+
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3
+{
+ USHORT usPixelClock;
+ UCHAR ucDVOConfig;
+ UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+ UCHAR ucReseved[4];
+}DVO_ENCODER_CONTROL_PARAMETERS_V3;
+#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for
+// bit1=0: non-coherent mode
+// =1: coherent mode
+
+//==========================================================================================
+//Only change is here next time when changing encoder parameter definitions again!
+#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS1_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS2_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS
+#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION
+
+//==========================================================================================
+#define PANEL_ENCODER_MISC_DUAL 0x01
+#define PANEL_ENCODER_MISC_COHERENT 0x02
+#define PANEL_ENCODER_MISC_TMDS_LINKB 0x04
+#define PANEL_ENCODER_MISC_HDMI_TYPE 0x08
+
+#define PANEL_ENCODER_ACTION_DISABLE ATOM_DISABLE
+#define PANEL_ENCODER_ACTION_ENABLE ATOM_ENABLE
+#define PANEL_ENCODER_ACTION_COHERENTSEQ (ATOM_ENABLE+1)
+
+#define PANEL_ENCODER_TRUNCATE_EN 0x01
+#define PANEL_ENCODER_TRUNCATE_DEPTH 0x10
+#define PANEL_ENCODER_SPATIAL_DITHER_EN 0x01
+#define PANEL_ENCODER_SPATIAL_DITHER_DEPTH 0x10
+#define PANEL_ENCODER_TEMPORAL_DITHER_EN 0x01
+#define PANEL_ENCODER_TEMPORAL_DITHER_DEPTH 0x10
+#define PANEL_ENCODER_TEMPORAL_LEVEL_4 0x20
+#define PANEL_ENCODER_25FRC_MASK 0x10
+#define PANEL_ENCODER_25FRC_E 0x00
+#define PANEL_ENCODER_25FRC_F 0x10
+#define PANEL_ENCODER_50FRC_MASK 0x60
+#define PANEL_ENCODER_50FRC_A 0x00
+#define PANEL_ENCODER_50FRC_B 0x20
+#define PANEL_ENCODER_50FRC_C 0x40
+#define PANEL_ENCODER_50FRC_D 0x60
+#define PANEL_ENCODER_75FRC_MASK 0x80
+#define PANEL_ENCODER_75FRC_E 0x00
+#define PANEL_ENCODER_75FRC_F 0x80
+
+/****************************************************************************/
+// Structures used by SetVoltageTable
+/****************************************************************************/
+#define SET_VOLTAGE_TYPE_ASIC_VDDC 1
+#define SET_VOLTAGE_TYPE_ASIC_MVDDC 2
+#define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3
+#define SET_VOLTAGE_TYPE_ASIC_VDDCI 4
+#define SET_VOLTAGE_INIT_MODE 5
+#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 //Gets the Max. voltage for the soldered Asic
+
+#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1
+#define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2
+#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4
+
+#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0
+#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1
+#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2
+
+typedef struct _SET_VOLTAGE_PARAMETERS
+{
+ UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
+ UCHAR ucVoltageMode; // To set all, to set source A or source B or ...
+ UCHAR ucVoltageIndex; // An index to tell which voltage level
+ UCHAR ucReserved;
+}SET_VOLTAGE_PARAMETERS;
+
+typedef struct _SET_VOLTAGE_PARAMETERS_V2
+{
+ UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
+ UCHAR ucVoltageMode; // Not used, maybe use for state machine for differen power mode
+ USHORT usVoltageLevel; // real voltage level
+}SET_VOLTAGE_PARAMETERS_V2;
+
+
+typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
+{
+ UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+ UCHAR ucVoltageMode; // Indicate action: Set voltage level
+ USHORT usVoltageLevel; // real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. )
+}SET_VOLTAGE_PARAMETERS_V1_3;
+
+//ucVoltageType
+#define VOLTAGE_TYPE_VDDC 1
+#define VOLTAGE_TYPE_MVDDC 2
+#define VOLTAGE_TYPE_MVDDQ 3
+#define VOLTAGE_TYPE_VDDCI 4
+
+//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode
+#define ATOM_SET_VOLTAGE 0 //Set voltage Level
+#define ATOM_INIT_VOLTAGE_REGULATOR 3 //Init Regulator
+#define ATOM_SET_VOLTAGE_PHASE 4 //Set Vregulator Phase
+#define ATOM_GET_MAX_VOLTAGE 6 //Get Max Voltage, not used in SetVoltageTable v1.3
+#define ATOM_GET_VOLTAGE_LEVEL 6 //Get Voltage level from vitual voltage ID
+
+// define vitual voltage id in usVoltageLevel
+#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
+#define ATOM_VIRTUAL_VOLTAGE_ID1 0xff02
+#define ATOM_VIRTUAL_VOLTAGE_ID2 0xff03
+#define ATOM_VIRTUAL_VOLTAGE_ID3 0xff04
+
+typedef struct _SET_VOLTAGE_PS_ALLOCATION
+{
+ SET_VOLTAGE_PARAMETERS sASICSetVoltage;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}SET_VOLTAGE_PS_ALLOCATION;
+
+// New Added from SI for GetVoltageInfoTable, input parameter structure
+typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1
+{
+ UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+ UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info
+ USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id
+ ULONG ulReserved;
+}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1;
+
+// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_VID
+typedef struct _GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
+{
+ ULONG ulVotlageGpioState;
+ ULONG ulVoltageGPioMask;
+}GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
+
+// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_STATEx_LEAKAGE_VID
+typedef struct _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
+{
+ USHORT usVoltageLevel;
+ USHORT usVoltageId; // Voltage Id programmed in Voltage Regulator
+ ULONG ulReseved;
+}GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
+
+
+// GetVoltageInfo v1.1 ucVoltageMode
+#define ATOM_GET_VOLTAGE_VID 0x00
+#define ATOM_GET_VOTLAGE_INIT_SEQ 0x03
+#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID 0x04
+// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
+#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
+
+// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
+#define ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
+// undefined power state
+#define ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
+#define ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
+
+/****************************************************************************/
+// Structures used by TVEncoderControlTable
+/****************************************************************************/
+typedef struct _TV_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucTvStandard; // See definition "ATOM_TV_NTSC ..."
+ UCHAR ucAction; // 0: turn off encoder
+ // 1: setup and turn on encoder
+}TV_ENCODER_CONTROL_PARAMETERS;
+
+typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION
+{
+ TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; // Don't set this one
+}TV_ENCODER_CONTROL_PS_ALLOCATION;
+
+//==============================Data Table Portion====================================
+
+/****************************************************************************/
+// Structure used in Data.mtb
+/****************************************************************************/
+typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
+{
+ USHORT UtilityPipeLine; // Offest for the utility to get parser info,Don't change this position!
+ USHORT MultimediaCapabilityInfo; // Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios
+ USHORT MultimediaConfigInfo; // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios
+ USHORT StandardVESA_Timing; // Only used by Bios
+ USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4
+ USHORT PaletteData; // Only used by BIOS
+ USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info
+ USHORT DIGTransmitterInfo; // Internal used by VBIOS only version 3.1
+ USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
+ USHORT SupportedDevicesInfo; // Will be obsolete from R600
+ USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600
+ USHORT VRAM_UsageByFirmware; // Shared by various SW components,latest version 1.3 will be used from R600
+ USHORT GPIO_Pin_LUT; // Shared by various SW components,latest version 1.1
+ USHORT VESA_ToInternalModeLUT; // Only used by Bios
+ USHORT ComponentVideoInfo; // Shared by various SW components,latest version 2.1 will be used from R600
+ USHORT PowerPlayInfo; // Shared by various SW components,latest version 2.1,new design from R600
+ USHORT CompassionateData; // Will be obsolete from R600
+ USHORT SaveRestoreInfo; // Only used by Bios
+ USHORT PPLL_SS_Info; // Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info
+ USHORT OemInfo; // Defined and used by external SW, should be obsolete soon
+ USHORT XTMDS_Info; // Will be obsolete from R600
+ USHORT MclkSS_Info; // Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used
+ USHORT Object_Header; // Shared by various SW components,latest version 1.1
+ USHORT IndirectIOAccess; // Only used by Bios,this table position can't change at all!!
+ USHORT MC_InitParameter; // Only used by command table
+ USHORT ASIC_VDDC_Info; // Will be obsolete from R600
+ USHORT ASIC_InternalSS_Info; // New tabel name from R600, used to be called "ASIC_MVDDC_Info"
+ USHORT TV_VideoMode; // Only used by command table
+ USHORT VRAM_Info; // Only used by command table, latest version 1.3
+ USHORT MemoryTrainingInfo; // Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1
+ USHORT IntegratedSystemInfo; // Shared by various SW components
+ USHORT ASIC_ProfilingInfo; // New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600
+ USHORT VoltageObjectInfo; // Shared by various SW components, latest version 1.1
+ USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
+}ATOM_MASTER_LIST_OF_DATA_TABLES;
+
+typedef struct _ATOM_MASTER_DATA_TABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
+}ATOM_MASTER_DATA_TABLE;
+
+// For backward compatible
+#define LVDS_Info LCD_Info
+#define DAC_Info PaletteData
+#define TMDS_Info DIGTransmitterInfo
+
+/****************************************************************************/
+// Structure used in MultimediaCapabilityInfoTable
+/****************************************************************************/
+typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulSignature; // HW info table signature string "$ATI"
+ UCHAR ucI2C_Type; // I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc)
+ UCHAR ucTV_OutInfo; // Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7)
+ UCHAR ucVideoPortInfo; // Provides the video port capabilities
+ UCHAR ucHostPortInfo; // Provides host port configuration information
+}ATOM_MULTIMEDIA_CAPABILITY_INFO;
+
+/****************************************************************************/
+// Structure used in MultimediaConfigInfoTable
+/****************************************************************************/
+typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulSignature; // MM info table signature sting "$MMT"
+ UCHAR ucTunerInfo; // Type of tuner installed on the adapter (4:0) and video input for tuner (7:5)
+ UCHAR ucAudioChipInfo; // List the audio chip type (3:0) product type (4) and OEM revision (7:5)
+ UCHAR ucProductID; // Defines as OEM ID or ATI board ID dependent on product type setting
+ UCHAR ucMiscInfo1; // Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7)
+ UCHAR ucMiscInfo2; // I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6)
+ UCHAR ucMiscInfo3; // Video Decoder Type (3:0) Video In Standard/Crystal (7:4)
+ UCHAR ucMiscInfo4; // Video Decoder Host Config (2:0) reserved (7:3)
+ UCHAR ucVideoInput0Info;// Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+ UCHAR ucVideoInput1Info;// Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+ UCHAR ucVideoInput2Info;// Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+ UCHAR ucVideoInput3Info;// Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+ UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+}ATOM_MULTIMEDIA_CONFIG_INFO;
+
+
+/****************************************************************************/
+// Structures used in FirmwareInfoTable
+/****************************************************************************/
+
+// usBIOSCapability Definition:
+// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted;
+// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported;
+// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported;
+// Others: Reserved
+#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001
+#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002
+#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008 // (valid from v1.1 ~v1.4):=1: memclk SS enable, =0 memclk SS disable.
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010 // (valid from v1.1 ~v1.4):=1: engclk SS enable, =0 engclk SS disable.
+#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020
+#define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040
+#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080
+#define ATOM_BIOS_INFO_HYPERMEMORY_SUPPORT 0x0100
+#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00
+#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000
+#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_EXT_SS_SUPPORT 0x0008 // (valid from v2.1 ): =1: memclk ss enable with external ss chip
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_EXT_SS_SUPPORT 0x0010 // (valid from v2.1 ): =1: engclk ss enable with external ss chip
+
+#ifndef _H2INC
+
+//Please don't add or expand this bitfield structure below, this one will retire soon.!
+typedef struct _ATOM_FIRMWARE_CAPABILITY
+{
+#if ATOM_BIG_ENDIAN
+ USHORT Reserved:1;
+ USHORT SCL2Redefined:1;
+ USHORT PostWithoutModeSet:1;
+ USHORT HyperMemory_Size:4;
+ USHORT HyperMemory_Support:1;
+ USHORT PPMode_Assigned:1;
+ USHORT WMI_SUPPORT:1;
+ USHORT GPUControlsBL:1;
+ USHORT EngineClockSS_Support:1;
+ USHORT MemoryClockSS_Support:1;
+ USHORT ExtendedDesktopSupport:1;
+ USHORT DualCRTC_Support:1;
+ USHORT FirmwarePosted:1;
+#else
+ USHORT FirmwarePosted:1;
+ USHORT DualCRTC_Support:1;
+ USHORT ExtendedDesktopSupport:1;
+ USHORT MemoryClockSS_Support:1;
+ USHORT EngineClockSS_Support:1;
+ USHORT GPUControlsBL:1;
+ USHORT WMI_SUPPORT:1;
+ USHORT PPMode_Assigned:1;
+ USHORT HyperMemory_Support:1;
+ USHORT HyperMemory_Size:4;
+ USHORT PostWithoutModeSet:1;
+ USHORT SCL2Redefined:1;
+ USHORT Reserved:1;
+#endif
+}ATOM_FIRMWARE_CAPABILITY;
+
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
+{
+ ATOM_FIRMWARE_CAPABILITY sbfAccess;
+ USHORT susAccess;
+}ATOM_FIRMWARE_CAPABILITY_ACCESS;
+
+#else
+
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
+{
+ USHORT susAccess;
+}ATOM_FIRMWARE_CAPABILITY_ACCESS;
+
+#endif
+
+typedef struct _ATOM_FIRMWARE_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulDriverTargetEngineClock; //In 10Khz unit
+ ULONG ulDriverTargetMemoryClock; //In 10Khz unit
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulASICMaxEngineClock; //In 10Khz unit
+ ULONG ulASICMaxMemoryClock; //In 10Khz unit
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucPadding[3]; //Don't use them
+ ULONG aulReservedForBIOS[3]; //Don't use them
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit, the definitions above can't change!!!
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; //In 10Khz unit
+ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
+ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
+ UCHAR ucDesign_ID; //Indicate what is the board design
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+}ATOM_FIRMWARE_INFO;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulDriverTargetEngineClock; //In 10Khz unit
+ ULONG ulDriverTargetMemoryClock; //In 10Khz unit
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulASICMaxEngineClock; //In 10Khz unit
+ ULONG ulASICMaxMemoryClock; //In 10Khz unit
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ UCHAR ucPadding[2]; //Don't use them
+ ULONG aulReservedForBIOS[2]; //Don't use them
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; //In 10Khz unit
+ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
+ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
+ UCHAR ucDesign_ID; //Indicate what is the board design
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_2;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulDriverTargetEngineClock; //In 10Khz unit
+ ULONG ulDriverTargetMemoryClock; //In 10Khz unit
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulASICMaxEngineClock; //In 10Khz unit
+ ULONG ulASICMaxMemoryClock; //In 10Khz unit
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ UCHAR ucPadding[2]; //Don't use them
+ ULONG aulReservedForBIOS; //Don't use them
+ ULONG ul3DAccelerationEngineClock;//In 10Khz unit
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; //In 10Khz unit
+ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
+ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
+ UCHAR ucDesign_ID; //Indicate what is the board design
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_3;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_4
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulDriverTargetEngineClock; //In 10Khz unit
+ ULONG ulDriverTargetMemoryClock; //In 10Khz unit
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulASICMaxEngineClock; //In 10Khz unit
+ ULONG ulASICMaxMemoryClock; //In 10Khz unit
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ USHORT usBootUpVDDCVoltage; //In MV unit
+ USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
+ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
+ ULONG ul3DAccelerationEngineClock;//In 10Khz unit
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; //In 10Khz unit
+ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
+ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
+ UCHAR ucDesign_ID; //Indicate what is the board design
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_4;
+
+//the structure below to be used from Cypress
+typedef struct _ATOM_FIRMWARE_INFO_V2_1
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulReserved1;
+ ULONG ulReserved2;
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock
+ ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit
+ UCHAR ucReserved1; //Was ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ USHORT usBootUpVDDCVoltage; //In MV unit
+ USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
+ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
+ ULONG ulReserved4; //Was ulAsicMaximumVoltage
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usCoreReferenceClock; //In 10Khz unit
+ USHORT usMemoryReferenceClock; //In 10Khz unit
+ USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+ UCHAR ucReserved4[3];
+}ATOM_FIRMWARE_INFO_V2_1;
+
+//the structure below to be used from NI
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+typedef struct _ATOM_FIRMWARE_INFO_V2_2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulReserved[2];
+ ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit*
+ ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit*
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock ?
+ ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage.
+ UCHAR ucReserved3; //Was ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ USHORT usBootUpVDDCVoltage; //In MV unit
+ USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
+ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
+ ULONG ulReserved4; //Was ulAsicMaximumVoltage
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ UCHAR ucRemoteDisplayConfig;
+ UCHAR ucReserved5[3]; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
+ ULONG ulReserved6; //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
+ ULONG ulReserved7; //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
+ USHORT usReserved11; //Was usMaxPixelClock; //In 10Khz unit, Max. Pclk used only for DAC
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usBootUpVDDCIVoltage; //In unit of mv; Was usMinPixelClockPLL_Output;
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usCoreReferenceClock; //In 10Khz unit
+ USHORT usMemoryReferenceClock; //In 10Khz unit
+ USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+ UCHAR ucReserved9[3];
+ USHORT usBootUpMVDDCVoltage; //In unit of mv; Was usMinPixelClockPLL_Output;
+ USHORT usReserved12;
+ ULONG ulReserved10[3]; // New added comparing to previous version
+}ATOM_FIRMWARE_INFO_V2_2;
+
+#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_2
+
+
+// definition of ucRemoteDisplayConfig
+#define REMOTE_DISPLAY_DISABLE 0x00
+#define REMOTE_DISPLAY_ENABLE 0x01
+
+/****************************************************************************/
+// Structures used in IntegratedSystemInfoTable
+/****************************************************************************/
+#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2
+#define IGP_CAP_FLAG_AC_CARD 0x4
+#define IGP_CAP_FLAG_SDVO_CARD 0x8
+#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10
+
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock; //in 10kHz unit
+ ULONG ulBootUpMemoryClock; //in 10kHz unit
+ ULONG ulMaxSystemMemoryClock; //in 10kHz unit
+ ULONG ulMinSystemMemoryClock; //in 10kHz unit
+ UCHAR ucNumberOfCyclesInPeriodHi;
+ UCHAR ucLCDTimingSel; //=0:not valid.!=0 sel this timing descriptor from LCD EDID.
+ USHORT usReserved1;
+ USHORT usInterNBVoltageLow; //An intermidiate PMW value to set the voltage
+ USHORT usInterNBVoltageHigh; //Another intermidiate PMW value to set the voltage
+ ULONG ulReserved[2];
+
+ USHORT usFSBClock; //In MHz unit
+ USHORT usCapabilityFlag; //Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable
+ //Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card
+ //Bit[4]==1: P/2 mode, ==0: P/1 mode
+ USHORT usPCIENBCfgReg7; //bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal
+ USHORT usK8MemoryClock; //in MHz unit
+ USHORT usK8SyncStartDelay; //in 0.01 us unit
+ USHORT usK8DataReturnTime; //in 0.01 us unit
+ UCHAR ucMaxNBVoltage;
+ UCHAR ucMinNBVoltage;
+ UCHAR ucMemoryType; //[7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved
+ UCHAR ucNumberOfCyclesInPeriod; //CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod
+ UCHAR ucStartingPWM_HighTime; //CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime
+ UCHAR ucHTLinkWidth; //16 bit vs. 8 bit
+ UCHAR ucMaxNBVoltageHigh;
+ UCHAR ucMinNBVoltageHigh;
+}ATOM_INTEGRATED_SYSTEM_INFO;
+
+/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO
+ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock
+ For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock
+ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
+ For AMD IGP,for now this can be 0
+ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
+ For AMD IGP,for now this can be 0
+
+usFSBClock: For Intel IGP,it's FSB Freq
+ For AMD IGP,it's HT Link Speed
+
+usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200
+usK8SyncStartDelay: For AMD IGP only. Memory access latency in K8, required for watermark calculation
+usK8DataReturnTime: For AMD IGP only. Memory access latency in K8, required for watermark calculation
+
+VC:Voltage Control
+ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
+ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
+
+ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value.
+ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0
+
+ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
+ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
+
+
+usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all.
+usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all.
+*/
+
+
+/*
+The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST;
+Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need.
+The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries.
+
+SW components can access the IGP system infor structure in the same way as before
+*/
+
+
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock; //in 10kHz unit
+ ULONG ulReserved1[2]; //must be 0x0 for the reserved
+ ULONG ulBootUpUMAClock; //in 10kHz unit
+ ULONG ulBootUpSidePortClock; //in 10kHz unit
+ ULONG ulMinSidePortClock; //in 10kHz unit
+ ULONG ulReserved2[6]; //must be 0x0 for the reserved
+ ULONG ulSystemConfig; //see explanation below
+ ULONG ulBootUpReqDisplayVector;
+ ULONG ulOtherDisplayMisc;
+ ULONG ulDDISlot1Config;
+ ULONG ulDDISlot2Config;
+ UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
+ UCHAR ucUMAChannelNumber;
+ UCHAR ucDockingPinBit;
+ UCHAR ucDockingPinPolarity;
+ ULONG ulDockingPinCFGInfo;
+ ULONG ulCPUCapInfo;
+ USHORT usNumberOfCyclesInPeriod;
+ USHORT usMaxNBVoltage;
+ USHORT usMinNBVoltage;
+ USHORT usBootUpNBVoltage;
+ ULONG ulHTLinkFreq; //in 10Khz
+ USHORT usMinHTLinkWidth;
+ USHORT usMaxHTLinkWidth;
+ USHORT usUMASyncStartDelay;
+ USHORT usUMADataReturnTime;
+ USHORT usLinkStatusZeroTime;
+ USHORT usDACEfuse; //for storing badgap value (for RS880 only)
+ ULONG ulHighVoltageHTLinkFreq; // in 10Khz
+ ULONG ulLowVoltageHTLinkFreq; // in 10Khz
+ USHORT usMaxUpStreamHTLinkWidth;
+ USHORT usMaxDownStreamHTLinkWidth;
+ USHORT usMinUpStreamHTLinkWidth;
+ USHORT usMinDownStreamHTLinkWidth;
+ USHORT usFirmwareVersion; //0 means FW is not supported. Otherwise it's the FW version loaded by SBIOS and driver should enable FW.
+ USHORT usFullT0Time; // Input to calculate minimum HT link change time required by NB P-State. Unit is 0.01us.
+ ULONG ulReserved3[96]; //must be 0x0
+}ATOM_INTEGRATED_SYSTEM_INFO_V2;
+
+/*
+ulBootUpEngineClock: Boot-up Engine Clock in 10Khz;
+ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present
+ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock
+
+ulSystemConfig:
+Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode;
+Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state
+ =0: system boots up at driver control state. Power state depends on PowerPlay table.
+Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used.
+Bit[3]=1: Only one power state(Performance) will be supported.
+ =0: Multiple power states supported from PowerPlay table.
+Bit[4]=1: CLMC is supported and enabled on current system.
+ =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.
+Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.
+ =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied.
+Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored.
+ =0: Voltage settings is determined by powerplay table.
+Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue.
+ =0: Enable CLMC as regular mode, CDLD and CILR will be enabled.
+Bit[8]=1: CDLF is supported and enabled on current system.
+ =0: CDLF is not supported or enabled on current system.
+Bit[9]=1: DLL Shut Down feature is enabled on current system.
+ =0: DLL Shut Down feature is not enabled or supported on current system.
+
+ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions.
+
+ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion;
+ [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSupportedStd definition;
+
+ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design).
+ [3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
+ [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 4=1 lane 3:0; bit 5=1 lane 7:4; bit 6=1 lane 11:8; bit 7=1 lane 15:12)
+ When a DDI connector is not "paired" (meaming two connections mutualexclusive on chassis or docking, only one of them can be connected at one time.
+ in both chassis and docking, SBIOS has to duplicate the same PCIE lane info from chassis to docking or vice versa. For example:
+ one DDI connector is only populated in docking with PCIE lane 8-11, but there is no paired connection on chassis, SBIOS has to copy bit 6 to bit 2.
+
+ [15:8] - Lane configuration attribute;
+ [23:16]- Connector type, possible value:
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A
+ CONNECTOR_OBJECT_ID_DISPLAYPORT
+ CONNECTOR_OBJECT_ID_eDP
+ [31:24]- Reserved
+
+ulDDISlot2Config: Same as Slot1.
+ucMemoryType: SidePort memory type, set it to 0x0 when Sideport memory is not installed. Driver needs this info to change sideport memory clock. Not for display in CCC.
+For IGP, Hypermemory is the only memory type showed in CCC.
+
+ucUMAChannelNumber: how many channels for the UMA;
+
+ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin
+ucDockingPinBit: which bit in this register to read the pin status;
+ucDockingPinPolarity:Polarity of the pin when docked;
+
+ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0
+
+usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
+
+usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode.
+usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode.
+ GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0
+ PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1
+ GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE
+
+usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value.
+
+ulHTLinkFreq: Bootup HT link Frequency in 10Khz.
+usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth.
+ If CDLW enabled, both upstream and downstream width should be the same during bootup.
+usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
+ If CDLW enabled, both upstream and downstream width should be the same during bootup.
+
+usUMASyncStartDelay: Memory access latency, required for watermark calculation
+usUMADataReturnTime: Memory access latency, required for watermark calculation
+usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us
+for Griffin or Greyhound. SBIOS needs to convert to actual time by:
+ if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us)
+ if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us)
+ if T0Ttime [5:4]=10b, then usLinkStatusZeroTime=T0Ttime [3:0]*2.0us (0.0 to 30us)
+ if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us)
+
+ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0.
+ This must be less than or equal to ulHTLinkFreq(bootup frequency).
+ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0.
+ This must be less than or equal to ulHighVoltageHTLinkFreq.
+
+usMaxUpStreamHTLinkWidth: Asymmetric link width support in the future, to replace usMaxHTLinkWidth. Not used for now.
+usMaxDownStreamHTLinkWidth: same as above.
+usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to replace usMinHTLinkWidth. Not used for now.
+usMinDownStreamHTLinkWidth: same as above.
+*/
+
+// ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo - CPU type definition
+#define INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU 0
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN 1
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND 2
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__K8 3
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH 4
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI 5
+
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI // this deff reflects max defined CPU code
+
+#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
+#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
+#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004
+#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008
+#define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010
+#define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020
+#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040
+#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080
+#define SYSTEM_CONFIG_CDLF_ENABLED 0x00000100
+#define SYSTEM_CONFIG_DLL_SHUTDOWN_ENABLED 0x00000200
+
+#define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF
+
+#define b0IGP_DDI_SLOT_LANE_MAP_MASK 0x0F
+#define b0IGP_DDI_SLOT_DOCKING_LANE_MAP_MASK 0xF0
+#define b0IGP_DDI_SLOT_CONFIG_LANE_0_3 0x01
+#define b0IGP_DDI_SLOT_CONFIG_LANE_4_7 0x02
+#define b0IGP_DDI_SLOT_CONFIG_LANE_8_11 0x04
+#define b0IGP_DDI_SLOT_CONFIG_LANE_12_15 0x08
+
+#define IGP_DDI_SLOT_ATTRIBUTE_MASK 0x0000FF00
+#define IGP_DDI_SLOT_CONFIG_REVERSED 0x00000100
+#define b1IGP_DDI_SLOT_CONFIG_REVERSED 0x01
+
+#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000
+
+// IntegratedSystemInfoTable new Rev is V5 after V2, because of the real rev of V2 is v1.4. This rev is used for RR
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock; //in 10kHz unit
+ ULONG ulDentistVCOFreq; //Dentist VCO clock in 10kHz unit, the source of GPU SCLK, LCLK, UCLK and VCLK.
+ ULONG ulLClockFreq; //GPU Lclk freq in 10kHz unit, have relationship with NCLK in NorthBridge
+ ULONG ulBootUpUMAClock; //in 10kHz unit
+ ULONG ulReserved1[8]; //must be 0x0 for the reserved
+ ULONG ulBootUpReqDisplayVector;
+ ULONG ulOtherDisplayMisc;
+ ULONG ulReserved2[4]; //must be 0x0 for the reserved
+ ULONG ulSystemConfig; //TBD
+ ULONG ulCPUCapInfo; //TBD
+ USHORT usMaxNBVoltage; //high NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
+ USHORT usMinNBVoltage; //low NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
+ USHORT usBootUpNBVoltage; //boot up NB voltage
+ UCHAR ucHtcTmpLmt; //bit [22:16] of D24F3x64 Hardware Thermal Control (HTC) Register, may not be needed, TBD
+ UCHAR ucTjOffset; //bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed, TBD
+ ULONG ulReserved3[4]; //must be 0x0 for the reserved
+ ULONG ulDDISlot1Config; //see above ulDDISlot1Config definition
+ ULONG ulDDISlot2Config;
+ ULONG ulDDISlot3Config;
+ ULONG ulDDISlot4Config;
+ ULONG ulReserved4[4]; //must be 0x0 for the reserved
+ UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
+ UCHAR ucUMAChannelNumber;
+ USHORT usReserved;
+ ULONG ulReserved5[4]; //must be 0x0 for the reserved
+ ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];//arrays with values for CSR M3 arbiter for default
+ ULONG ulCSR_M3_ARB_CNTL_UVD[10]; //arrays with values for CSR M3 arbiter for UVD playback
+ ULONG ulCSR_M3_ARB_CNTL_FS3D[10];//arrays with values for CSR M3 arbiter for Full Screen 3D applications
+ ULONG ulReserved6[61]; //must be 0x0
+}ATOM_INTEGRATED_SYSTEM_INFO_V5;
+
+#define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000
+#define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001
+#define ATOM_TV_INT_ENCODER1_INDEX 0x00000002
+#define ATOM_DFP_INT_ENCODER1_INDEX 0x00000003
+#define ATOM_CRT_INT_ENCODER2_INDEX 0x00000004
+#define ATOM_LCD_EXT_ENCODER1_INDEX 0x00000005
+#define ATOM_TV_EXT_ENCODER1_INDEX 0x00000006
+#define ATOM_DFP_EXT_ENCODER1_INDEX 0x00000007
+#define ATOM_CV_INT_ENCODER1_INDEX 0x00000008
+#define ATOM_DFP_INT_ENCODER2_INDEX 0x00000009
+#define ATOM_CRT_EXT_ENCODER1_INDEX 0x0000000A
+#define ATOM_CV_EXT_ENCODER1_INDEX 0x0000000B
+#define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C
+#define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D
+
+// define ASIC internal encoder id ( bit vector ), used for CRTC_SourceSelTable
+#define ASIC_INT_DAC1_ENCODER_ID 0x00
+#define ASIC_INT_TV_ENCODER_ID 0x02
+#define ASIC_INT_DIG1_ENCODER_ID 0x03
+#define ASIC_INT_DAC2_ENCODER_ID 0x04
+#define ASIC_EXT_TV_ENCODER_ID 0x06
+#define ASIC_INT_DVO_ENCODER_ID 0x07
+#define ASIC_INT_DIG2_ENCODER_ID 0x09
+#define ASIC_EXT_DIG_ENCODER_ID 0x05
+#define ASIC_EXT_DIG2_ENCODER_ID 0x08
+#define ASIC_INT_DIG3_ENCODER_ID 0x0a
+#define ASIC_INT_DIG4_ENCODER_ID 0x0b
+#define ASIC_INT_DIG5_ENCODER_ID 0x0c
+#define ASIC_INT_DIG6_ENCODER_ID 0x0d
+#define ASIC_INT_DIG7_ENCODER_ID 0x0e
+
+//define Encoder attribute
+#define ATOM_ANALOG_ENCODER 0
+#define ATOM_DIGITAL_ENCODER 1
+#define ATOM_DP_ENCODER 2
+
+#define ATOM_ENCODER_ENUM_MASK 0x70
+#define ATOM_ENCODER_ENUM_ID1 0x00
+#define ATOM_ENCODER_ENUM_ID2 0x10
+#define ATOM_ENCODER_ENUM_ID3 0x20
+#define ATOM_ENCODER_ENUM_ID4 0x30
+#define ATOM_ENCODER_ENUM_ID5 0x40
+#define ATOM_ENCODER_ENUM_ID6 0x50
+
+#define ATOM_DEVICE_CRT1_INDEX 0x00000000
+#define ATOM_DEVICE_LCD1_INDEX 0x00000001
+#define ATOM_DEVICE_TV1_INDEX 0x00000002
+#define ATOM_DEVICE_DFP1_INDEX 0x00000003
+#define ATOM_DEVICE_CRT2_INDEX 0x00000004
+#define ATOM_DEVICE_LCD2_INDEX 0x00000005
+#define ATOM_DEVICE_DFP6_INDEX 0x00000006
+#define ATOM_DEVICE_DFP2_INDEX 0x00000007
+#define ATOM_DEVICE_CV_INDEX 0x00000008
+#define ATOM_DEVICE_DFP3_INDEX 0x00000009
+#define ATOM_DEVICE_DFP4_INDEX 0x0000000A
+#define ATOM_DEVICE_DFP5_INDEX 0x0000000B
+
+#define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C
+#define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D
+#define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E
+#define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1)
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1 )
+
+#define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1)
+
+#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX )
+#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX )
+#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX )
+#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX )
+#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX )
+#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX )
+#define ATOM_DEVICE_DFP6_SUPPORT (0x1L << ATOM_DEVICE_DFP6_INDEX )
+#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX )
+#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX )
+#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX )
+#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX )
+#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX )
+
+#define ATOM_DEVICE_CRT_SUPPORT (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
+#define ATOM_DEVICE_DFP_SUPPORT (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | ATOM_DEVICE_DFP5_SUPPORT | ATOM_DEVICE_DFP6_SUPPORT)
+#define ATOM_DEVICE_TV_SUPPORT (ATOM_DEVICE_TV1_SUPPORT)
+#define ATOM_DEVICE_LCD_SUPPORT (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
+
+#define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0
+#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004
+#define ATOM_DEVICE_CONNECTOR_VGA 0x00000001
+#define ATOM_DEVICE_CONNECTOR_DVI_I 0x00000002
+#define ATOM_DEVICE_CONNECTOR_DVI_D 0x00000003
+#define ATOM_DEVICE_CONNECTOR_DVI_A 0x00000004
+#define ATOM_DEVICE_CONNECTOR_SVIDEO 0x00000005
+#define ATOM_DEVICE_CONNECTOR_COMPOSITE 0x00000006
+#define ATOM_DEVICE_CONNECTOR_LVDS 0x00000007
+#define ATOM_DEVICE_CONNECTOR_DIGI_LINK 0x00000008
+#define ATOM_DEVICE_CONNECTOR_SCART 0x00000009
+#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_A 0x0000000A
+#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_B 0x0000000B
+#define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E
+#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F
+
+
+#define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F
+#define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000
+#define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000
+#define ATOM_DEVICE_DAC_INFO_DACA 0x00000001
+#define ATOM_DEVICE_DAC_INFO_DACB 0x00000002
+#define ATOM_DEVICE_DAC_INFO_EXDAC 0x00000003
+
+#define ATOM_DEVICE_I2C_ID_NOI2C 0x00000000
+
+#define ATOM_DEVICE_I2C_LINEMUX_MASK 0x0000000F
+#define ATOM_DEVICE_I2C_LINEMUX_SHIFT 0x00000000
+
+#define ATOM_DEVICE_I2C_ID_MASK 0x00000070
+#define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004
+#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001
+#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002
+#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 //For IGP RS600
+#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 //For IGP RS690
+
+#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080
+#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007
+#define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000
+#define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001
+
+// usDeviceSupport:
+// Bits0 = 0 - no CRT1 support= 1- CRT1 is supported
+// Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported
+// Bit 2 = 0 - no TV1 support= 1- TV1 is supported
+// Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported
+// Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported
+// Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported
+// Bit 6 = 0 - no DFP6 support= 1- DFP6 is supported
+// Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported
+// Bit 8 = 0 - no CV support= 1- CV is supported
+// Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported
+// Bit 10 = 0 - no DFP4 support= 1- DFP4 is supported
+// Bit 11 = 0 - no DFP5 support= 1- DFP5 is supported
+//
+//
+
+/****************************************************************************/
+/* Structure used in MclkSS_InfoTable */
+/****************************************************************************/
+// ucI2C_ConfigID
+// [7:0] - I2C LINE Associate ID
+// = 0 - no I2C
+// [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection)
+// = 0, [6:0]=SW assisted I2C ID
+// [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use
+// = 2, HW engine for Multimedia use
+// = 3-7 Reserved for future I2C engines
+// [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C
+
+typedef struct _ATOM_I2C_ID_CONFIG
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR bfHW_Capable:1;
+ UCHAR bfHW_EngineID:3;
+ UCHAR bfI2C_LineMux:4;
+#else
+ UCHAR bfI2C_LineMux:4;
+ UCHAR bfHW_EngineID:3;
+ UCHAR bfHW_Capable:1;
+#endif
+}ATOM_I2C_ID_CONFIG;
+
+typedef union _ATOM_I2C_ID_CONFIG_ACCESS
+{
+ ATOM_I2C_ID_CONFIG sbfAccess;
+ UCHAR ucAccess;
+}ATOM_I2C_ID_CONFIG_ACCESS;
+
+
+/****************************************************************************/
+// Structure used in GPIO_I2C_InfoTable
+/****************************************************************************/
+typedef struct _ATOM_GPIO_I2C_ASSIGMENT
+{
+ USHORT usClkMaskRegisterIndex;
+ USHORT usClkEnRegisterIndex;
+ USHORT usClkY_RegisterIndex;
+ USHORT usClkA_RegisterIndex;
+ USHORT usDataMaskRegisterIndex;
+ USHORT usDataEnRegisterIndex;
+ USHORT usDataY_RegisterIndex;
+ USHORT usDataA_RegisterIndex;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+ UCHAR ucClkMaskShift;
+ UCHAR ucClkEnShift;
+ UCHAR ucClkY_Shift;
+ UCHAR ucClkA_Shift;
+ UCHAR ucDataMaskShift;
+ UCHAR ucDataEnShift;
+ UCHAR ucDataY_Shift;
+ UCHAR ucDataA_Shift;
+ UCHAR ucReserved1;
+ UCHAR ucReserved2;
+}ATOM_GPIO_I2C_ASSIGMENT;
+
+typedef struct _ATOM_GPIO_I2C_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
+}ATOM_GPIO_I2C_INFO;
+
+/****************************************************************************/
+// Common Structure used in other structures
+/****************************************************************************/
+
+#ifndef _H2INC
+
+//Please don't add or expand this bitfield structure below, this one will retire soon.!
+typedef struct _ATOM_MODE_MISC_INFO
+{
+#if ATOM_BIG_ENDIAN
+ USHORT Reserved:6;
+ USHORT RGB888:1;
+ USHORT DoubleClock:1;
+ USHORT Interlace:1;
+ USHORT CompositeSync:1;
+ USHORT V_ReplicationBy2:1;
+ USHORT H_ReplicationBy2:1;
+ USHORT VerticalCutOff:1;
+ USHORT VSyncPolarity:1; //0=Active High, 1=Active Low
+ USHORT HSyncPolarity:1; //0=Active High, 1=Active Low
+ USHORT HorizontalCutOff:1;
+#else
+ USHORT HorizontalCutOff:1;
+ USHORT HSyncPolarity:1; //0=Active High, 1=Active Low
+ USHORT VSyncPolarity:1; //0=Active High, 1=Active Low
+ USHORT VerticalCutOff:1;
+ USHORT H_ReplicationBy2:1;
+ USHORT V_ReplicationBy2:1;
+ USHORT CompositeSync:1;
+ USHORT Interlace:1;
+ USHORT DoubleClock:1;
+ USHORT RGB888:1;
+ USHORT Reserved:6;
+#endif
+}ATOM_MODE_MISC_INFO;
+
+typedef union _ATOM_MODE_MISC_INFO_ACCESS
+{
+ ATOM_MODE_MISC_INFO sbfAccess;
+ USHORT usAccess;
+}ATOM_MODE_MISC_INFO_ACCESS;
+
+#else
+
+typedef union _ATOM_MODE_MISC_INFO_ACCESS
+{
+ USHORT usAccess;
+}ATOM_MODE_MISC_INFO_ACCESS;
+
+#endif
+
+// usModeMiscInfo-
+#define ATOM_H_CUTOFF 0x01
+#define ATOM_HSYNC_POLARITY 0x02 //0=Active High, 1=Active Low
+#define ATOM_VSYNC_POLARITY 0x04 //0=Active High, 1=Active Low
+#define ATOM_V_CUTOFF 0x08
+#define ATOM_H_REPLICATIONBY2 0x10
+#define ATOM_V_REPLICATIONBY2 0x20
+#define ATOM_COMPOSITESYNC 0x40
+#define ATOM_INTERLACE 0x80
+#define ATOM_DOUBLE_CLOCK_MODE 0x100
+#define ATOM_RGB888_MODE 0x200
+
+//usRefreshRate-
+#define ATOM_REFRESH_43 43
+#define ATOM_REFRESH_47 47
+#define ATOM_REFRESH_56 56
+#define ATOM_REFRESH_60 60
+#define ATOM_REFRESH_65 65
+#define ATOM_REFRESH_70 70
+#define ATOM_REFRESH_72 72
+#define ATOM_REFRESH_75 75
+#define ATOM_REFRESH_85 85
+
+// ATOM_MODE_TIMING data are exactly the same as VESA timing data.
+// Translation from EDID to ATOM_MODE_TIMING, use the following formula.
+//
+// VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK
+// = EDID_HA + EDID_HBL
+// VESA_HDISP = VESA_ACTIVE = EDID_HA
+// VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH
+// = EDID_HA + EDID_HSO
+// VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW
+// VESA_BORDER = EDID_BORDER
+
+/****************************************************************************/
+// Structure used in SetCRTC_UsingDTDTimingTable
+/****************************************************************************/
+typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS
+{
+ USHORT usH_Size;
+ USHORT usH_Blanking_Time;
+ USHORT usV_Size;
+ USHORT usV_Blanking_Time;
+ USHORT usH_SyncOffset;
+ USHORT usH_SyncWidth;
+ USHORT usV_SyncOffset;
+ USHORT usV_SyncWidth;
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ UCHAR ucH_Border; // From DFP EDID
+ UCHAR ucV_Border;
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucPadding[3];
+}SET_CRTC_USING_DTD_TIMING_PARAMETERS;
+
+/****************************************************************************/
+// Structure used in SetCRTC_TimingTable
+/****************************************************************************/
+typedef struct _SET_CRTC_TIMING_PARAMETERS
+{
+ USHORT usH_Total; // horizontal total
+ USHORT usH_Disp; // horizontal display
+ USHORT usH_SyncStart; // horozontal Sync start
+ USHORT usH_SyncWidth; // horizontal Sync width
+ USHORT usV_Total; // vertical total
+ USHORT usV_Disp; // vertical display
+ USHORT usV_SyncStart; // vertical Sync start
+ USHORT usV_SyncWidth; // vertical Sync width
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucOverscanRight; // right
+ UCHAR ucOverscanLeft; // left
+ UCHAR ucOverscanBottom; // bottom
+ UCHAR ucOverscanTop; // top
+ UCHAR ucReserved;
+}SET_CRTC_TIMING_PARAMETERS;
+#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS
+
+/****************************************************************************/
+// Structure used in StandardVESA_TimingTable
+// AnalogTV_InfoTable
+// ComponentVideoInfoTable
+/****************************************************************************/
+typedef struct _ATOM_MODE_TIMING
+{
+ USHORT usCRTC_H_Total;
+ USHORT usCRTC_H_Disp;
+ USHORT usCRTC_H_SyncStart;
+ USHORT usCRTC_H_SyncWidth;
+ USHORT usCRTC_V_Total;
+ USHORT usCRTC_V_Disp;
+ USHORT usCRTC_V_SyncStart;
+ USHORT usCRTC_V_SyncWidth;
+ USHORT usPixelClock; //in 10Khz unit
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ USHORT usCRTC_OverscanRight;
+ USHORT usCRTC_OverscanLeft;
+ USHORT usCRTC_OverscanBottom;
+ USHORT usCRTC_OverscanTop;
+ USHORT usReserve;
+ UCHAR ucInternalModeNumber;
+ UCHAR ucRefreshRate;
+}ATOM_MODE_TIMING;
+
+typedef struct _ATOM_DTD_FORMAT
+{
+ USHORT usPixClk;
+ USHORT usHActive;
+ USHORT usHBlanking_Time;
+ USHORT usVActive;
+ USHORT usVBlanking_Time;
+ USHORT usHSyncOffset;
+ USHORT usHSyncWidth;
+ USHORT usVSyncOffset;
+ USHORT usVSyncWidth;
+ USHORT usImageHSize;
+ USHORT usImageVSize;
+ UCHAR ucHBorder;
+ UCHAR ucVBorder;
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ UCHAR ucInternalModeNumber;
+ UCHAR ucRefreshRate;
+}ATOM_DTD_FORMAT;
+
+/****************************************************************************/
+// Structure used in LVDS_InfoTable
+// * Need a document to describe this table
+/****************************************************************************/
+#define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
+#define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
+#define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
+#define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=1
+typedef struct _ATOM_LVDS_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT sLCDTiming;
+ USHORT usModePatchTableOffset;
+ USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
+ USHORT usOffDelayInMs;
+ UCHAR ucPowerSequenceDigOntoDEin10Ms;
+ UCHAR ucPowerSequenceDEtoBLOnin10Ms;
+ UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
+ // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
+ // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
+ // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
+ UCHAR ucPanelDefaultRefreshRate;
+ UCHAR ucPanelIdentification;
+ UCHAR ucSS_Id;
+}ATOM_LVDS_INFO;
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=2
+typedef struct _ATOM_LVDS_INFO_V12
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT sLCDTiming;
+ USHORT usExtInfoTableOffset;
+ USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
+ USHORT usOffDelayInMs;
+ UCHAR ucPowerSequenceDigOntoDEin10Ms;
+ UCHAR ucPowerSequenceDEtoBLOnin10Ms;
+ UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
+ // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
+ // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
+ // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
+ UCHAR ucPanelDefaultRefreshRate;
+ UCHAR ucPanelIdentification;
+ UCHAR ucSS_Id;
+ USHORT usLCDVenderID;
+ USHORT usLCDProductID;
+ UCHAR ucLCDPanel_SpecialHandlingCap;
+ UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+ UCHAR ucReserved[2];
+}ATOM_LVDS_INFO_V12;
+
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12.
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL
+#define LCDPANEL_CAP_READ_EDID 0x1
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define LCDPANEL_CAP_DRR_SUPPORTED 0x2
+
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define LCDPANEL_CAP_eDP 0x4
+
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6 5 4
+ // 0 0 0 - Color bit depth is undefined
+ // 0 0 1 - 6 Bits per Primary Color
+ // 0 1 0 - 8 Bits per Primary Color
+ // 0 1 1 - 10 Bits per Primary Color
+ // 1 0 0 - 12 Bits per Primary Color
+ // 1 0 1 - 14 Bits per Primary Color
+ // 1 1 0 - 16 Bits per Primary Color
+ // 1 1 1 - Reserved
+
+#define PANEL_COLOR_BIT_DEPTH_MASK 0x70
+
+// Bit7:{=0:Random Dithering disabled;1 Random Dithering enabled}
+#define PANEL_RANDOM_DITHER 0x80
+#define PANEL_RANDOM_DITHER_MASK 0x80
+
+#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 // no need to change this
+
+/****************************************************************************/
+// Structures used by LCD_InfoTable V1.3 Note: previous version was called ATOM_LVDS_INFO_V12
+// ASIC Families: NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/
+typedef struct _ATOM_LCD_INFO_V13
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT sLCDTiming;
+ USHORT usExtInfoTableOffset;
+ USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
+ ULONG ulReserved0;
+ UCHAR ucLCD_Misc; // Reorganized in V13
+ // Bit0: {=0:single, =1:dual},
+ // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888} // was {=0:666RGB, =1:888RGB},
+ // Bit3:2: {Grey level}
+ // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h)
+ // Bit7 Reserved. was for ATOM_PANEL_MISC_API_ENABLED, still need it?
+ UCHAR ucPanelDefaultRefreshRate;
+ UCHAR ucPanelIdentification;
+ UCHAR ucSS_Id;
+ USHORT usLCDVenderID;
+ USHORT usLCDProductID;
+ UCHAR ucLCDPanel_SpecialHandlingCap; // Reorganized in V13
+ // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own
+ // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED
+ // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1)
+ // Bit7-3: Reserved
+ UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+ USHORT usBacklightPWM; // Backlight PWM in Hz. New in _V13
+
+ UCHAR ucPowerSequenceDIGONtoDE_in4Ms;
+ UCHAR ucPowerSequenceDEtoVARY_BL_in4Ms;
+ UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms;
+ UCHAR ucPowerSequenceDEtoDIGON_in4Ms;
+
+ UCHAR ucOffDelay_in4Ms;
+ UCHAR ucPowerSequenceVARY_BLtoBLON_in4Ms;
+ UCHAR ucPowerSequenceBLONtoVARY_BL_in4Ms;
+ UCHAR ucReserved1;
+
+ UCHAR ucDPCD_eDP_CONFIGURATION_CAP; // dpcd 0dh
+ UCHAR ucDPCD_MAX_LINK_RATE; // dpcd 01h
+ UCHAR ucDPCD_MAX_LANE_COUNT; // dpcd 02h
+ UCHAR ucDPCD_MAX_DOWNSPREAD; // dpcd 03h
+
+ USHORT usMaxPclkFreqInSingleLink; // Max PixelClock frequency in single link mode.
+ UCHAR uceDPToLVDSRxId;
+ UCHAR ucLcdReservd;
+ ULONG ulReserved[2];
+}ATOM_LCD_INFO_V13;
+
+#define ATOM_LCD_INFO_LAST ATOM_LCD_INFO_V13
+
+//Definitions for ucLCD_Misc
+#define ATOM_PANEL_MISC_V13_DUAL 0x00000001
+#define ATOM_PANEL_MISC_V13_FPDI 0x00000002
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL 0x0000000C
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT 2
+#define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK 0x70
+#define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR 0x10
+#define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR 0x20
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6 5 4
+ // 0 0 0 - Color bit depth is undefined
+ // 0 0 1 - 6 Bits per Primary Color
+ // 0 1 0 - 8 Bits per Primary Color
+ // 0 1 1 - 10 Bits per Primary Color
+ // 1 0 0 - 12 Bits per Primary Color
+ // 1 0 1 - 14 Bits per Primary Color
+ // 1 1 0 - 16 Bits per Primary Color
+ // 1 1 1 - Reserved
+
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12.
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL
+#define LCDPANEL_CAP_V13_READ_EDID 0x1 // = LCDPANEL_CAP_READ_EDID no change comparing to previous version
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define LCDPANEL_CAP_V13_DRR_SUPPORTED 0x2 // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version
+
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define LCDPANEL_CAP_V13_eDP 0x4 // = LCDPANEL_CAP_eDP no change comparing to previous version
+
+//uceDPToLVDSRxId
+#define eDP_TO_LVDS_RX_DISABLE 0x00 // no eDP->LVDS translator chip
+#define eDP_TO_LVDS_COMMON_ID 0x01 // common eDP->LVDS translator chip without AMD SW init
+#define eDP_TO_LVDS_RT_ID 0x02 // RT tanslator which require AMD SW init
+
+typedef struct _ATOM_PATCH_RECORD_MODE
+{
+ UCHAR ucRecordType;
+ USHORT usHDisp;
+ USHORT usVDisp;
+}ATOM_PATCH_RECORD_MODE;
+
+typedef struct _ATOM_LCD_RTS_RECORD
+{
+ UCHAR ucRecordType;
+ UCHAR ucRTSValue;
+}ATOM_LCD_RTS_RECORD;
+
+//!! If the record below exits, it shoud always be the first record for easy use in command table!!!
+// The record below is only used when LVDS_Info is present. From ATOM_LVDS_INFO_V12, use ucLCDPanel_SpecialHandlingCap instead.
+typedef struct _ATOM_LCD_MODE_CONTROL_CAP
+{
+ UCHAR ucRecordType;
+ USHORT usLCDCap;
+}ATOM_LCD_MODE_CONTROL_CAP;
+
+#define LCD_MODE_CAP_BL_OFF 1
+#define LCD_MODE_CAP_CRTC_OFF 2
+#define LCD_MODE_CAP_PANEL_OFF 4
+
+typedef struct _ATOM_FAKE_EDID_PATCH_RECORD
+{
+ UCHAR ucRecordType;
+ UCHAR ucFakeEDIDLength;
+ UCHAR ucFakeEDIDString[1]; // This actually has ucFakeEdidLength elements.
+} ATOM_FAKE_EDID_PATCH_RECORD;
+
+typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD
+{
+ UCHAR ucRecordType;
+ USHORT usHSize;
+ USHORT usVSize;
+}ATOM_PANEL_RESOLUTION_PATCH_RECORD;
+
+#define LCD_MODE_PATCH_RECORD_MODE_TYPE 1
+#define LCD_RTS_RECORD_TYPE 2
+#define LCD_CAP_RECORD_TYPE 3
+#define LCD_FAKE_EDID_PATCH_RECORD_TYPE 4
+#define LCD_PANEL_RESOLUTION_RECORD_TYPE 5
+#define LCD_EDID_OFFSET_PATCH_RECORD_TYPE 6
+#define ATOM_RECORD_END_TYPE 0xFF
+
+/****************************Spread Spectrum Info Table Definitions **********************/
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=2
+typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Bit2=1: PCIE REFCLK SS =0 iternal PPLL SS Others:TBD
+ UCHAR ucSS_Step;
+ UCHAR ucSS_Delay;
+ UCHAR ucSS_Id;
+ UCHAR ucRecommendedRef_Div;
+ UCHAR ucSS_Range; //it was reserved for V11
+}ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
+
+#define ATOM_MAX_SS_ENTRY 16
+#define ATOM_DP_SS_ID1 0x0f1 // SS ID for internal DP stream at 2.7Ghz. if ATOM_DP_SS_ID2 does not exist in SS_InfoTable, it is used for internal DP stream at 1.62Ghz as well.
+#define ATOM_DP_SS_ID2 0x0f2 // SS ID for internal DP stream at 1.62Ghz, if it exists in SS_InfoTable.
+#define ATOM_LVLINK_2700MHz_SS_ID 0x0f3 // SS ID for LV link translator chip at 2.7Ghz
+#define ATOM_LVLINK_1620MHz_SS_ID 0x0f4 // SS ID for LV link translator chip at 1.62Ghz
+
+
+#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000
+#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000
+#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001
+#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001
+#define ATOM_INTERNAL_SS_MASK 0x00000000
+#define ATOM_EXTERNAL_SS_MASK 0x00000002
+#define EXEC_SS_STEP_SIZE_SHIFT 2
+#define EXEC_SS_DELAY_SHIFT 4
+#define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4
+
+typedef struct _ATOM_SPREAD_SPECTRUM_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY];
+}ATOM_SPREAD_SPECTRUM_INFO;
+
+/****************************************************************************/
+// Structure used in AnalogTV_InfoTable (Top level)
+/****************************************************************************/
+//ucTVBootUpDefaultStd definition:
+
+//ATOM_TV_NTSC 1
+//ATOM_TV_NTSCJ 2
+//ATOM_TV_PAL 3
+//ATOM_TV_PALM 4
+//ATOM_TV_PALCN 5
+//ATOM_TV_PALN 6
+//ATOM_TV_PAL60 7
+//ATOM_TV_SECAM 8
+
+//ucTVSupportedStd definition:
+#define NTSC_SUPPORT 0x1
+#define NTSCJ_SUPPORT 0x2
+
+#define PAL_SUPPORT 0x4
+#define PALM_SUPPORT 0x8
+#define PALCN_SUPPORT 0x10
+#define PALN_SUPPORT 0x20
+#define PAL60_SUPPORT 0x40
+#define SECAM_SUPPORT 0x80
+
+#define MAX_SUPPORTED_TV_TIMING 2
+
+typedef struct _ATOM_ANALOG_TV_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucTV_SupportedStandard;
+ UCHAR ucTV_BootUpDefaultStandard;
+ UCHAR ucExt_TV_ASIC_ID;
+ UCHAR ucExt_TV_ASIC_SlaveAddr;
+ /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];*/
+ ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING];
+}ATOM_ANALOG_TV_INFO;
+
+#define MAX_SUPPORTED_TV_TIMING_V1_2 3
+
+typedef struct _ATOM_ANALOG_TV_INFO_V1_2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucTV_SupportedStandard;
+ UCHAR ucTV_BootUpDefaultStandard;
+ UCHAR ucExt_TV_ASIC_ID;
+ UCHAR ucExt_TV_ASIC_SlaveAddr;
+ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING_V1_2];
+}ATOM_ANALOG_TV_INFO_V1_2;
+
+typedef struct _ATOM_DPCD_INFO
+{
+ UCHAR ucRevisionNumber; //10h : Revision 1.0; 11h : Revision 1.1
+ UCHAR ucMaxLinkRate; //06h : 1.62Gbps per lane; 0Ah = 2.7Gbps per lane
+ UCHAR ucMaxLane; //Bits 4:0 = MAX_LANE_COUNT (1/2/4). Bit 7 = ENHANCED_FRAME_CAP
+ UCHAR ucMaxDownSpread; //Bit0 = 0: No Down spread; Bit0 = 1: 0.5% (Subject to change according to DP spec)
+}ATOM_DPCD_INFO;
+
+#define ATOM_DPCD_MAX_LANE_MASK 0x1F
+
+/**************************************************************************/
+// VRAM usage and their defintions
+
+// One chunk of VRAM used by Bios are for HWICON surfaces,EDID data.
+// Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below.
+// All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned!
+// To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR
+// To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX
+
+#ifndef VESA_MEMORY_IN_64K_BLOCK
+#define VESA_MEMORY_IN_64K_BLOCK 0x100 //256*64K=16Mb (Max. VESA memory is 16Mb!)
+#endif
+
+#define ATOM_EDID_RAW_DATASIZE 256 //In Bytes
+#define ATOM_HWICON_SURFACE_SIZE 4096 //In Bytes
+#define ATOM_HWICON_INFOTABLE_SIZE 32
+#define MAX_DTD_MODE_IN_VRAM 6
+#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT)
+#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
+//20 bytes for Encoder Type and DPCD in STD EDID area
+#define DFP_ENCODER_TYPE_OFFSET (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20)
+#define ATOM_DP_DPCD_OFFSET (DFP_ENCODER_TYPE_OFFSET + 4 )
+
+#define ATOM_HWICON1_SURFACE_ADDR 0
+#define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
+#define ATOM_HWICON_INFOTABLE_ADDR (ATOM_HWICON2_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
+#define ATOM_CRT1_EDID_ADDR (ATOM_HWICON_INFOTABLE_ADDR + ATOM_HWICON_INFOTABLE_SIZE)
+#define ATOM_CRT1_DTD_MODE_TBL_ADDR (ATOM_CRT1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CRT1_STD_MODE_TBL_ADDR (ATOM_CRT1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP1_EDID_ADDR (ATOM_TV1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP1_DTD_MODE_TBL_ADDR (ATOM_DFP1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP1_STD_MODE_TBL_ADDR (ATOM_DFP1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_CRT2_EDID_ADDR (ATOM_DFP1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_CRT2_DTD_MODE_TBL_ADDR (ATOM_CRT2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CRT2_STD_MODE_TBL_ADDR (ATOM_CRT2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP6_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP6_DTD_MODE_TBL_ADDR (ATOM_DFP6_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP6_STD_MODE_TBL_ADDR (ATOM_DFP6_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP2_EDID_ADDR (ATOM_DFP6_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_CV_EDID_ADDR (ATOM_DFP2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_CV_DTD_MODE_TBL_ADDR (ATOM_CV_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CV_STD_MODE_TBL_ADDR (ATOM_CV_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP3_EDID_ADDR (ATOM_CV_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP3_DTD_MODE_TBL_ADDR (ATOM_DFP3_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP3_STD_MODE_TBL_ADDR (ATOM_DFP3_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP4_EDID_ADDR (ATOM_DFP3_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP4_DTD_MODE_TBL_ADDR (ATOM_DFP4_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP4_STD_MODE_TBL_ADDR (ATOM_DFP4_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP5_EDID_ADDR (ATOM_DFP4_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 1024)
+#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START + 512
+
+//The size below is in Kb!
+#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
+
+#define ATOM_VRAM_RESERVE_V2_SIZE 32
+
+#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L
+#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
+#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
+#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0
+
+/***********************************************************************************/
+// Structure used in VRAM_UsageByFirmwareTable
+// Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm
+// at running time.
+// note2: From RV770, the memory is more than 32bit addressable, so we will change
+// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains
+// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
+// (in offset to start of memory address) is KB aligned instead of byte aligend.
+/***********************************************************************************/
+// Note3:
+/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter,
+for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can have:
+
+If (ulStartAddrUsedByFirmware!=0)
+FBAccessAreaOffset= ulStartAddrUsedByFirmware - usFBUsedbyDrvInKB;
+Reserved area has been claimed by VBIOS including this FB access area; CAIL doesn't need to reserve any extra area for this purpose
+else //Non VGA case
+ if (FB_Size<=2Gb)
+ FBAccessAreaOffset= FB_Size - usFBUsedbyDrvInKB;
+ else
+ FBAccessAreaOffset= Aper_Size - usFBUsedbyDrvInKB
+
+CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/
+
+/***********************************************************************************/
+#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1
+
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO
+{
+ ULONG ulStartAddrUsedByFirmware;
+ USHORT usFirmwareUseInKb;
+ USHORT usReserved;
+}ATOM_FIRMWARE_VRAM_RESERVE_INFO;
+
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_FIRMWARE_VRAM_RESERVE_INFO asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+}ATOM_VRAM_USAGE_BY_FIRMWARE;
+
+// change verion to 1.5, when allow driver to allocate the vram area for command table access.
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5
+{
+ ULONG ulStartAddrUsedByFirmware;
+ USHORT usFirmwareUseInKb;
+ USHORT usFBUsedByDrvInKb;
+}ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5;
+
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5 asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+}ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5;
+
+/****************************************************************************/
+// Structure used in GPIO_Pin_LUTTable
+/****************************************************************************/
+typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
+{
+ USHORT usGpioPin_AIndex;
+ UCHAR ucGpioPinBitShift;
+ UCHAR ucGPIO_ID;
+}ATOM_GPIO_PIN_ASSIGNMENT;
+
+typedef struct _ATOM_GPIO_PIN_LUT
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1];
+}ATOM_GPIO_PIN_LUT;
+
+/****************************************************************************/
+// Structure used in ComponentVideoInfoTable
+/****************************************************************************/
+#define GPIO_PIN_ACTIVE_HIGH 0x1
+
+#define MAX_SUPPORTED_CV_STANDARDS 5
+
+// definitions for ATOM_D_INFO.ucSettings
+#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F // [4:0]
+#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 // [6:5] = must be zeroed out
+#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 // [7]
+
+typedef struct _ATOM_GPIO_INFO
+{
+ USHORT usAOffset;
+ UCHAR ucSettings;
+ UCHAR ucReserved;
+}ATOM_GPIO_INFO;
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector)
+#define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i
+#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 //[7];
+#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F //[6:0]
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode
+//Line 3 out put 5V.
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 //represent gpio 3 state for 16:9
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 //represent gpio 4 state for 16:9
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0
+
+//Line 3 out put 2.2V
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 //represent gpio 3 state for 4:3 Letter box
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 //represent gpio 4 state for 4:3 Letter box
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2
+
+//Line 3 out put 0V
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 //represent gpio 3 state for 4:3
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 //represent gpio 4 state for 4:3
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4
+
+#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F // bit [5:0]
+
+#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 //bit 7
+
+//GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks.
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 //bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 //bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
+
+
+typedef struct _ATOM_COMPONENT_VIDEO_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMask_PinRegisterIndex;
+ USHORT usEN_PinRegisterIndex;
+ USHORT usY_PinRegisterIndex;
+ USHORT usA_PinRegisterIndex;
+ UCHAR ucBitShift;
+ UCHAR ucPinActiveState; //ucPinActiveState: Bit0=1 active high, =0 active low
+ ATOM_DTD_FORMAT sReserved; // must be zeroed out
+ UCHAR ucMiscInfo;
+ UCHAR uc480i;
+ UCHAR uc480p;
+ UCHAR uc720p;
+ UCHAR uc1080i;
+ UCHAR ucLetterBoxMode;
+ UCHAR ucReserved[3];
+ UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
+ ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+}ATOM_COMPONENT_VIDEO_INFO;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=1
+typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucMiscInfo;
+ UCHAR uc480i;
+ UCHAR uc480p;
+ UCHAR uc720p;
+ UCHAR uc1080i;
+ UCHAR ucReserved;
+ UCHAR ucLetterBoxMode;
+ UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
+ ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+}ATOM_COMPONENT_VIDEO_INFO_V21;
+
+#define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21
+
+/****************************************************************************/
+// Structure used in object_InfoTable
+/****************************************************************************/
+typedef struct _ATOM_OBJECT_HEADER
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ USHORT usConnectorObjectTableOffset;
+ USHORT usRouterObjectTableOffset;
+ USHORT usEncoderObjectTableOffset;
+ USHORT usProtectionObjectTableOffset; //only available when Protection block is independent.
+ USHORT usDisplayPathTableOffset;
+}ATOM_OBJECT_HEADER;
+
+typedef struct _ATOM_OBJECT_HEADER_V3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ USHORT usConnectorObjectTableOffset;
+ USHORT usRouterObjectTableOffset;
+ USHORT usEncoderObjectTableOffset;
+ USHORT usProtectionObjectTableOffset; //only available when Protection block is independent.
+ USHORT usDisplayPathTableOffset;
+ USHORT usMiscObjectTableOffset;
+}ATOM_OBJECT_HEADER_V3;
+
+typedef struct _ATOM_DISPLAY_OBJECT_PATH
+{
+ USHORT usDeviceTag; //supported device
+ USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH
+ USHORT usConnObjectId; //Connector Object ID
+ USHORT usGPUObjectId; //GPU ID
+ USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
+}ATOM_DISPLAY_OBJECT_PATH;
+
+typedef struct _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH
+{
+ USHORT usDeviceTag; //supported device
+ USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH
+ USHORT usConnObjectId; //Connector Object ID
+ USHORT usGPUObjectId; //GPU ID
+ USHORT usGraphicObjIds[2]; //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder
+}ATOM_DISPLAY_EXTERNAL_OBJECT_PATH;
+
+typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
+{
+ UCHAR ucNumOfDispPath;
+ UCHAR ucVersion;
+ UCHAR ucPadding[2];
+ ATOM_DISPLAY_OBJECT_PATH asDispPath[1];
+}ATOM_DISPLAY_OBJECT_PATH_TABLE;
+
+
+typedef struct _ATOM_OBJECT //each object has this structure
+{
+ USHORT usObjectID;
+ USHORT usSrcDstTableOffset;
+ USHORT usRecordOffset; //this pointing to a bunch of records defined below
+ USHORT usReserved;
+}ATOM_OBJECT;
+
+typedef struct _ATOM_OBJECT_TABLE //Above 4 object table offset pointing to a bunch of objects all have this structure
+{
+ UCHAR ucNumberOfObjects;
+ UCHAR ucPadding[3];
+ ATOM_OBJECT asObjects[1];
+}ATOM_OBJECT_TABLE;
+
+typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset pointing to this structure
+{
+ UCHAR ucNumberOfSrc;
+ USHORT usSrcObjectID[1];
+ UCHAR ucNumberOfDst;
+ USHORT usDstObjectID[1];
+}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
+
+
+//Two definitions below are for OPM on MXM module designs
+
+#define EXT_HPDPIN_LUTINDEX_0 0
+#define EXT_HPDPIN_LUTINDEX_1 1
+#define EXT_HPDPIN_LUTINDEX_2 2
+#define EXT_HPDPIN_LUTINDEX_3 3
+#define EXT_HPDPIN_LUTINDEX_4 4
+#define EXT_HPDPIN_LUTINDEX_5 5
+#define EXT_HPDPIN_LUTINDEX_6 6
+#define EXT_HPDPIN_LUTINDEX_7 7
+#define MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES (EXT_HPDPIN_LUTINDEX_7+1)
+
+#define EXT_AUXDDC_LUTINDEX_0 0
+#define EXT_AUXDDC_LUTINDEX_1 1
+#define EXT_AUXDDC_LUTINDEX_2 2
+#define EXT_AUXDDC_LUTINDEX_3 3
+#define EXT_AUXDDC_LUTINDEX_4 4
+#define EXT_AUXDDC_LUTINDEX_5 5
+#define EXT_AUXDDC_LUTINDEX_6 6
+#define EXT_AUXDDC_LUTINDEX_7 7
+#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1)
+
+//ucChannelMapping are defined as following
+//for DP connector, eDP, DP to VGA/LVDS
+//Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucDP_Lane3_Source:2;
+ UCHAR ucDP_Lane2_Source:2;
+ UCHAR ucDP_Lane1_Source:2;
+ UCHAR ucDP_Lane0_Source:2;
+#else
+ UCHAR ucDP_Lane0_Source:2;
+ UCHAR ucDP_Lane1_Source:2;
+ UCHAR ucDP_Lane2_Source:2;
+ UCHAR ucDP_Lane3_Source:2;
+#endif
+}ATOM_DP_CONN_CHANNEL_MAPPING;
+
+//for DVI/HDMI, in dual link case, both links have to have same mapping.
+//Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucDVI_CLK_Source:2;
+ UCHAR ucDVI_DATA0_Source:2;
+ UCHAR ucDVI_DATA1_Source:2;
+ UCHAR ucDVI_DATA2_Source:2;
+#else
+ UCHAR ucDVI_DATA2_Source:2;
+ UCHAR ucDVI_DATA1_Source:2;
+ UCHAR ucDVI_DATA0_Source:2;
+ UCHAR ucDVI_CLK_Source:2;
+#endif
+}ATOM_DVI_CONN_CHANNEL_MAPPING;
+
+typedef struct _EXT_DISPLAY_PATH
+{
+ USHORT usDeviceTag; //A bit vector to show what devices are supported
+ USHORT usDeviceACPIEnum; //16bit device ACPI id.
+ USHORT usDeviceConnector; //A physical connector for displays to plug in, using object connector definitions
+ UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT
+ UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT
+ USHORT usExtEncoderObjId; //external encoder object id
+ union{
+ UCHAR ucChannelMapping; // if ucChannelMapping=0, using default one to one mapping
+ ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
+ ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
+ };
+ UCHAR ucChPNInvert; // bit vector for up to 8 lanes, =0: P and N is not invert, =1 P and N is inverted
+ USHORT usCaps;
+ USHORT usReserved;
+}EXT_DISPLAY_PATH;
+
+#define NUMBER_OF_UCHAR_FOR_GUID 16
+#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
+
+//usCaps
+#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x01
+
+typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string
+ EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
+ UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
+ UCHAR uc3DStereoPinId; // use for eDP panel
+ UCHAR ucRemoteDisplayConfig;
+ UCHAR uceDPToLVDSRxId;
+ UCHAR Reserved[4]; // for potential expansion
+}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
+
+//Related definitions, all records are different but they have a commond header
+typedef struct _ATOM_COMMON_RECORD_HEADER
+{
+ UCHAR ucRecordType; //An emun to indicate the record type
+ UCHAR ucRecordSize; //The size of the whole record in byte
+}ATOM_COMMON_RECORD_HEADER;
+
+
+#define ATOM_I2C_RECORD_TYPE 1
+#define ATOM_HPD_INT_RECORD_TYPE 2
+#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3
+#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4
+#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7
+#define ATOM_JTAG_RECORD_TYPE 8 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9
+#define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10
+#define ATOM_CONNECTOR_CF_RECORD_TYPE 11
+#define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12
+#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13
+#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14
+#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15
+#define ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE 16 //This is for the case when connectors are not known to object table
+#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table
+#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
+#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
+#define ATOM_ENCODER_CAP_RECORD_TYPE 20
+
+
+//Must be updated when new record type is added,equal to that record definition!
+#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE
+
+typedef struct _ATOM_I2C_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ATOM_I2C_ID_CONFIG sucI2cId;
+ UCHAR ucI2CAddr; //The slave address, it's 0 when the record is attached to connector for DDC
+}ATOM_I2C_RECORD;
+
+typedef struct _ATOM_HPD_INT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucHPDIntGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info
+ UCHAR ucPlugged_PinState;
+}ATOM_HPD_INT_RECORD;
+
+
+typedef struct _ATOM_OUTPUT_PROTECTION_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucProtectionFlag;
+ UCHAR ucReserved;
+}ATOM_OUTPUT_PROTECTION_RECORD;
+
+typedef struct _ATOM_CONNECTOR_DEVICE_TAG
+{
+ ULONG ulACPIDeviceEnum; //Reserved for now
+ USHORT usDeviceID; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT"
+ USHORT usPadding;
+}ATOM_CONNECTOR_DEVICE_TAG;
+
+typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucNumberOfDevice;
+ UCHAR ucReserved;
+ ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation
+}ATOM_CONNECTOR_DEVICE_TAG_RECORD;
+
+
+typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucConfigGPIOID;
+ UCHAR ucConfigGPIOState; //Set to 1 when it's active high to enable external flow in
+ UCHAR ucFlowinGPIPID;
+ UCHAR ucExtInGPIPID;
+}ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
+
+typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucCTL1GPIO_ID;
+ UCHAR ucCTL1GPIOState; //Set to 1 when it's active high
+ UCHAR ucCTL2GPIO_ID;
+ UCHAR ucCTL2GPIOState; //Set to 1 when it's active high
+ UCHAR ucCTL3GPIO_ID;
+ UCHAR ucCTL3GPIOState; //Set to 1 when it's active high
+ UCHAR ucCTLFPGA_IN_ID;
+ UCHAR ucPadding[3];
+}ATOM_ENCODER_FPGA_CONTROL_RECORD;
+
+typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info
+ UCHAR ucTVActiveState; //Indicating when the pin==0 or 1 when TV is connected
+}ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
+
+typedef struct _ATOM_JTAG_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucTMSGPIO_ID;
+ UCHAR ucTMSGPIOState; //Set to 1 when it's active high
+ UCHAR ucTCKGPIO_ID;
+ UCHAR ucTCKGPIOState; //Set to 1 when it's active high
+ UCHAR ucTDOGPIO_ID;
+ UCHAR ucTDOGPIOState; //Set to 1 when it's active high
+ UCHAR ucTDIGPIO_ID;
+ UCHAR ucTDIGPIOState; //Set to 1 when it's active high
+ UCHAR ucPadding[2];
+}ATOM_JTAG_RECORD;
+
+
+//The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually
+typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR
+{
+ UCHAR ucGPIOID; // GPIO_ID, find the corresponding ID in GPIO_LUT table
+ UCHAR ucGPIO_PinState; // Pin state showing how to set-up the pin
+}ATOM_GPIO_PIN_CONTROL_PAIR;
+
+typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucFlags; // Future expnadibility
+ UCHAR ucNumberOfPins; // Number of GPIO pins used to control the object
+ ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; // the real gpio pin pair determined by number of pins ucNumberOfPins
+}ATOM_OBJECT_GPIO_CNTL_RECORD;
+
+//Definitions for GPIO pin state
+#define GPIO_PIN_TYPE_INPUT 0x00
+#define GPIO_PIN_TYPE_OUTPUT 0x10
+#define GPIO_PIN_TYPE_HW_CONTROL 0x20
+
+//For GPIO_PIN_TYPE_OUTPUT the following is defined
+#define GPIO_PIN_OUTPUT_STATE_MASK 0x01
+#define GPIO_PIN_OUTPUT_STATE_SHIFT 0
+#define GPIO_PIN_STATE_ACTIVE_LOW 0x0
+#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1
+
+// Indexes to GPIO array in GLSync record
+// GLSync record is for Frame Lock/Gen Lock feature.
+#define ATOM_GPIO_INDEX_GLSYNC_REFCLK 0
+#define ATOM_GPIO_INDEX_GLSYNC_HSYNC 1
+#define ATOM_GPIO_INDEX_GLSYNC_VSYNC 2
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_REQ 3
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT 4
+#define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5
+#define ATOM_GPIO_INDEX_GLSYNC_V_RESET 6
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_CNTL 7
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_SEL 8
+#define ATOM_GPIO_INDEX_GLSYNC_MAX 9
+
+typedef struct _ATOM_ENCODER_DVO_CF_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ULONG ulStrengthControl; // DVOA strength control for CF
+ UCHAR ucPadding[2];
+}ATOM_ENCODER_DVO_CF_RECORD;
+
+// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
+#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by HW encoder
+#define ATOM_ENCODER_CAP_RECORD_HBR2_EN 0x02 // DP1.2 HBR2 setting is qualified and HBR2 can be enabled
+
+typedef struct _ATOM_ENCODER_CAP_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ union {
+ USHORT usEncoderCap;
+ struct {
+#if ATOM_BIG_ENDIAN
+ USHORT usReserved:14; // Bit1-15 may be defined for other capability in future
+ USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable
+ USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
+#else
+ USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
+ USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable
+ USHORT usReserved:14; // Bit1-15 may be defined for other capability in future
+#endif
+ };
+ };
+}ATOM_ENCODER_CAP_RECORD;
+
+// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
+#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1
+#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2
+
+typedef struct _ATOM_CONNECTOR_CF_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ USHORT usMaxPixClk;
+ UCHAR ucFlowCntlGpioId;
+ UCHAR ucSwapCntlGpioId;
+ UCHAR ucConnectedDvoBundle;
+ UCHAR ucPadding;
+}ATOM_CONNECTOR_CF_RECORD;
+
+typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ATOM_DTD_FORMAT asTiming;
+}ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
+
+typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader; //ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE
+ UCHAR ucSubConnectorType; //CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A
+ UCHAR ucReserved;
+}ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
+
+
+typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucMuxType; //decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state
+ UCHAR ucMuxControlPin;
+ UCHAR ucMuxState[2]; //for alligment purpose
+}ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
+
+typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucMuxType;
+ UCHAR ucMuxControlPin;
+ UCHAR ucMuxState[2]; //for alligment purpose
+}ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
+
+// define ucMuxType
+#define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f
+#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01
+
+typedef struct _ATOM_CONNECTOR_HPDPIN_LUT_RECORD //record for ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucHPDPINMap[MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES]; //An fixed size array which maps external pins to internal GPIO_PIN_INFO table
+}ATOM_CONNECTOR_HPDPIN_LUT_RECORD;
+
+typedef struct _ATOM_CONNECTOR_AUXDDC_LUT_RECORD //record for ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ATOM_I2C_ID_CONFIG ucAUXDDCMap[MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES]; //An fixed size array which maps external pins to internal DDC ID
+}ATOM_CONNECTOR_AUXDDC_LUT_RECORD;
+
+typedef struct _ATOM_OBJECT_LINK_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ USHORT usObjectID; //could be connector, encorder or other object in object.h
+}ATOM_OBJECT_LINK_RECORD;
+
+typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ USHORT usReserved;
+}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
+
+/****************************************************************************/
+// ASIC voltage data table
+/****************************************************************************/
+typedef struct _ATOM_VOLTAGE_INFO_HEADER
+{
+ USHORT usVDDCBaseLevel; //In number of 50mv unit
+ USHORT usReserved; //For possible extension table offset
+ UCHAR ucNumOfVoltageEntries;
+ UCHAR ucBytesPerVoltageEntry;
+ UCHAR ucVoltageStep; //Indicating in how many mv increament is one step, 0.5mv unit
+ UCHAR ucDefaultVoltageEntry;
+ UCHAR ucVoltageControlI2cLine;
+ UCHAR ucVoltageControlAddress;
+ UCHAR ucVoltageControlOffset;
+}ATOM_VOLTAGE_INFO_HEADER;
+
+typedef struct _ATOM_VOLTAGE_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VOLTAGE_INFO_HEADER viHeader;
+ UCHAR ucVoltageEntries[64]; //64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry
+}ATOM_VOLTAGE_INFO;
+
+
+typedef struct _ATOM_VOLTAGE_FORMULA
+{
+ USHORT usVoltageBaseLevel; // In number of 1mv unit
+ USHORT usVoltageStep; // Indicating in how many mv increament is one step, 1mv unit
+ UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage
+ UCHAR ucFlag; // bit0=0 :step is 1mv =1 0.5mv
+ UCHAR ucBaseVID; // if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep
+ UCHAR ucReserved;
+ UCHAR ucVIDAdjustEntries[32]; // 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries
+}ATOM_VOLTAGE_FORMULA;
+
+typedef struct _VOLTAGE_LUT_ENTRY
+{
+ USHORT usVoltageCode; // The Voltage ID, either GPIO or I2C code
+ USHORT usVoltageValue; // The corresponding Voltage Value, in mV
+}VOLTAGE_LUT_ENTRY;
+
+typedef struct _ATOM_VOLTAGE_FORMULA_V2
+{
+ UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage
+ UCHAR ucReserved[3];
+ VOLTAGE_LUT_ENTRY asVIDAdjustEntries[32];// 32 is for allocation, the actual number of entries is in ucNumOfVoltageEntries
+}ATOM_VOLTAGE_FORMULA_V2;
+
+typedef struct _ATOM_VOLTAGE_CONTROL
+{
+ UCHAR ucVoltageControlId; //Indicate it is controlled by I2C or GPIO or HW state machine
+ UCHAR ucVoltageControlI2cLine;
+ UCHAR ucVoltageControlAddress;
+ UCHAR ucVoltageControlOffset;
+ USHORT usGpioPin_AIndex; //GPIO_PAD register index
+ UCHAR ucGpioPinBitShift[9]; //at most 8 pin support 255 VIDs, termintate with 0xff
+ UCHAR ucReserved;
+}ATOM_VOLTAGE_CONTROL;
+
+// Define ucVoltageControlId
+#define VOLTAGE_CONTROLLED_BY_HW 0x00
+#define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F
+#define VOLTAGE_CONTROLLED_BY_GPIO 0x80
+#define VOLTAGE_CONTROL_ID_LM64 0x01 //I2C control, used for R5xx Core Voltage
+#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
+#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage
+#define VOLTAGE_CONTROL_ID_DS4402 0x04
+#define VOLTAGE_CONTROL_ID_UP6266 0x05
+#define VOLTAGE_CONTROL_ID_SCORPIO 0x06
+#define VOLTAGE_CONTROL_ID_VT1556M 0x07
+#define VOLTAGE_CONTROL_ID_CHL822x 0x08
+#define VOLTAGE_CONTROL_ID_VT1586M 0x09
+#define VOLTAGE_CONTROL_ID_UP1637 0x0A
+
+typedef struct _ATOM_VOLTAGE_OBJECT
+{
+ UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
+ UCHAR ucSize; //Size of Object
+ ATOM_VOLTAGE_CONTROL asControl; //describ how to control
+ ATOM_VOLTAGE_FORMULA asFormula; //Indicate How to convert real Voltage to VID
+}ATOM_VOLTAGE_OBJECT;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_V2
+{
+ UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
+ UCHAR ucSize; //Size of Object
+ ATOM_VOLTAGE_CONTROL asControl; //describ how to control
+ ATOM_VOLTAGE_FORMULA_V2 asFormula; //Indicate How to convert real Voltage to VID
+}ATOM_VOLTAGE_OBJECT_V2;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VOLTAGE_OBJECT asVoltageObj[3]; //Info for Voltage control
+}ATOM_VOLTAGE_OBJECT_INFO;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VOLTAGE_OBJECT_V2 asVoltageObj[3]; //Info for Voltage control
+}ATOM_VOLTAGE_OBJECT_INFO_V2;
+
+typedef struct _ATOM_LEAKID_VOLTAGE
+{
+ UCHAR ucLeakageId;
+ UCHAR ucReserved;
+ USHORT usVoltage;
+}ATOM_LEAKID_VOLTAGE;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
+ UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
+ UCHAR ucVoltageMode; //Indicate voltage control mode: Init/Set/Leakage/Set phase
+ USHORT usSize; //Size of Object
+}ATOM_VOLTAGE_OBJECT_HEADER_V3;
+
+typedef struct _VOLTAGE_LUT_ENTRY_V2
+{
+ ULONG ulVoltageId; // The Voltage ID which is used to program GPIO register
+ USHORT usVoltageValue; // The corresponding Voltage Value, in mV
+}VOLTAGE_LUT_ENTRY_V2;
+
+typedef struct _LEAKAGE_VOLTAGE_LUT_ENTRY_V2
+{
+ USHORT usVoltageLevel; // The Voltage ID which is used to program GPIO register
+ USHORT usVoltageId;
+ USHORT usLeakageId; // The corresponding Voltage Value, in mV
+}LEAKAGE_VOLTAGE_LUT_ENTRY_V2;
+
+typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
+{
+ ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
+ UCHAR ucVoltageRegulatorId; //Indicate Voltage Regulator Id
+ UCHAR ucVoltageControlI2cLine;
+ UCHAR ucVoltageControlAddress;
+ UCHAR ucVoltageControlOffset;
+ ULONG ulReserved;
+ VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff
+}ATOM_I2C_VOLTAGE_OBJECT_V3;
+
+typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
+{
+ ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
+ UCHAR ucVoltageGpioCntlId; // default is 0 which indicate control through CG VID mode
+ UCHAR ucGpioEntryNum; // indiate the entry numbers of Votlage/Gpio value Look up table
+ UCHAR ucPhaseDelay; // phase delay in unit of micro second
+ UCHAR ucReserved;
+ ULONG ulGpioMaskVal; // GPIO Mask value
+ VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[1];
+}ATOM_GPIO_VOLTAGE_OBJECT_V3;
+
+typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+{
+ ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
+ UCHAR ucLeakageCntlId; // default is 0
+ UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table
+ UCHAR ucReserved[2];
+ ULONG ulMaxVoltageLevel;
+ LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];
+}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
+
+typedef union _ATOM_VOLTAGE_OBJECT_V3{
+ ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
+ ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
+ ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
+}ATOM_VOLTAGE_OBJECT_V3;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VOLTAGE_OBJECT_V3 asVoltageObj[3]; //Info for Voltage control
+}ATOM_VOLTAGE_OBJECT_INFO_V3_1;
+
+typedef struct _ATOM_ASIC_PROFILE_VOLTAGE
+{
+ UCHAR ucProfileId;
+ UCHAR ucReserved;
+ USHORT usSize;
+ USHORT usEfuseSpareStartAddr;
+ USHORT usFuseIndex[8]; //from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id,
+ ATOM_LEAKID_VOLTAGE asLeakVol[2]; //Leakid and relatd voltage
+}ATOM_ASIC_PROFILE_VOLTAGE;
+
+//ucProfileId
+#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1
+#define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1
+#define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2
+
+typedef struct _ATOM_ASIC_PROFILING_INFO
+{
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
+}ATOM_ASIC_PROFILING_INFO;
+
+typedef struct _ATOM_POWER_SOURCE_OBJECT
+{
+ UCHAR ucPwrSrcId; // Power source
+ UCHAR ucPwrSensorType; // GPIO, I2C or none
+ UCHAR ucPwrSensId; // if GPIO detect, it is GPIO id, if I2C detect, it is I2C id
+ UCHAR ucPwrSensSlaveAddr; // Slave address if I2C detect
+ UCHAR ucPwrSensRegIndex; // I2C register Index if I2C detect
+ UCHAR ucPwrSensRegBitMask; // detect which bit is used if I2C detect
+ UCHAR ucPwrSensActiveState; // high active or low active
+ UCHAR ucReserve[3]; // reserve
+ USHORT usSensPwr; // in unit of watt
+}ATOM_POWER_SOURCE_OBJECT;
+
+typedef struct _ATOM_POWER_SOURCE_INFO
+{
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ UCHAR asPwrbehave[16];
+ ATOM_POWER_SOURCE_OBJECT asPwrObj[1];
+}ATOM_POWER_SOURCE_INFO;
+
+
+//Define ucPwrSrcId
+#define POWERSOURCE_PCIE_ID1 0x00
+#define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01
+#define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02
+#define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04
+#define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08
+
+//define ucPwrSensorId
+#define POWER_SENSOR_ALWAYS 0x00
+#define POWER_SENSOR_GPIO 0x01
+#define POWER_SENSOR_I2C 0x02
+
+typedef struct _ATOM_CLK_VOLT_CAPABILITY
+{
+ ULONG ulVoltageIndex; // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table
+ ULONG ulMaximumSupportedCLK; // Maximum clock supported with specified voltage index, unit in 10kHz
+}ATOM_CLK_VOLT_CAPABILITY;
+
+typedef struct _ATOM_AVAILABLE_SCLK_LIST
+{
+ ULONG ulSupportedSCLK; // Maximum clock supported with specified voltage index, unit in 10kHz
+ USHORT usVoltageIndex; // The Voltage Index indicated by FUSE for specified SCLK
+ USHORT usVoltageID; // The Voltage ID indicated by FUSE for specified SCLK
+}ATOM_AVAILABLE_SCLK_LIST;
+
+// ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition
+#define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE 1 // refer to ulSystemConfig bit[0]
+
+// this IntegrateSystemInfoTable is used for Liano/Ontario APU
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock;
+ ULONG ulDentistVCOFreq;
+ ULONG ulBootUpUMAClock;
+ ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4];
+ ULONG ulBootUpReqDisplayVector;
+ ULONG ulOtherDisplayMisc;
+ ULONG ulGPUCapInfo;
+ ULONG ulSB_MMIO_Base_Addr;
+ USHORT usRequestedPWMFreqInHz;
+ UCHAR ucHtcTmpLmt;
+ UCHAR ucHtcHystLmt;
+ ULONG ulMinEngineClock;
+ ULONG ulSystemConfig;
+ ULONG ulCPUCapInfo;
+ USHORT usNBP0Voltage;
+ USHORT usNBP1Voltage;
+ USHORT usBootUpNBVoltage;
+ USHORT usExtDispConnInfoOffset;
+ USHORT usPanelRefreshRateRange;
+ UCHAR ucMemoryType;
+ UCHAR ucUMAChannelNumber;
+ ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];
+ ULONG ulCSR_M3_ARB_CNTL_UVD[10];
+ ULONG ulCSR_M3_ARB_CNTL_FS3D[10];
+ ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
+ ULONG ulGMCRestoreResetTime;
+ ULONG ulMinimumNClk;
+ ULONG ulIdleNClk;
+ ULONG ulDDR_DLL_PowerUpTime;
+ ULONG ulDDR_PLL_PowerUpTime;
+ USHORT usPCIEClkSSPercentage;
+ USHORT usPCIEClkSSType;
+ USHORT usLvdsSSPercentage;
+ USHORT usLvdsSSpreadRateIn10Hz;
+ USHORT usHDMISSPercentage;
+ USHORT usHDMISSpreadRateIn10Hz;
+ USHORT usDVISSPercentage;
+ USHORT usDVISSpreadRateIn10Hz;
+ ULONG SclkDpmBoostMargin;
+ ULONG SclkDpmThrottleMargin;
+ USHORT SclkDpmTdpLimitPG;
+ USHORT SclkDpmTdpLimitBoost;
+ ULONG ulBoostEngineCLock;
+ UCHAR ulBoostVid_2bit;
+ UCHAR EnableBoost;
+ USHORT GnbTdpLimit;
+ USHORT usMaxLVDSPclkFreqInSingleLink;
+ UCHAR ucLvdsMisc;
+ UCHAR ucLVDSReserved;
+ ULONG ulReserved3[15];
+ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+}ATOM_INTEGRATED_SYSTEM_INFO_V6;
+
+// ulGPUCapInfo
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION 0x08
+
+//ucLVDSMisc:
+#define SYS_INFO_LVDSMISC__888_FPDI_MODE 0x01
+#define SYS_INFO_LVDSMISC__DL_CH_SWAP 0x02
+#define SYS_INFO_LVDSMISC__888_BPC 0x04
+#define SYS_INFO_LVDSMISC__OVERRIDE_EN 0x08
+#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW 0x10
+
+// not used any more
+#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW 0x04
+#define SYS_INFO_LVDSMISC__HSYNC_ACTIVE_LOW 0x08
+
+/**********************************************************************************************************************
+ ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
+ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
+ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
+sDISPCLK_Voltage: Report Display clock voltage requirement.
+
+ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects:
+ ATOM_DEVICE_CRT1_SUPPORT 0x0001
+ ATOM_DEVICE_CRT2_SUPPORT 0x0010
+ ATOM_DEVICE_DFP1_SUPPORT 0x0008
+ ATOM_DEVICE_DFP6_SUPPORT 0x0040
+ ATOM_DEVICE_DFP2_SUPPORT 0x0080
+ ATOM_DEVICE_DFP3_SUPPORT 0x0200
+ ATOM_DEVICE_DFP4_SUPPORT 0x0400
+ ATOM_DEVICE_DFP5_SUPPORT 0x0800
+ ATOM_DEVICE_LCD1_SUPPORT 0x0002
+ulOtherDisplayMisc: Other display related flags, not defined yet.
+ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
+ =1: TMDS/HDMI Coherent Mode use signel PLL mode.
+ bit[3]=0: Enable HW AUX mode detection logic
+ =1: Disable HW AUX mode dettion logic
+ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
+
+usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
+ Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+
+ When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+ 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+ VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+ Changing BL using VBIOS function is functional in both driver and non-driver present environment;
+ and enabling VariBri under the driver environment from PP table is optional.
+
+ 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+ that BL control from GPU is expected.
+ VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+ Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+ it's per platform
+ and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt.
+ Threshold on value to enter HTC_active state.
+ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt.
+ To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
+ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled
+ =1: PCIE Power Gating Enabled
+ Bit[1]=0: DDR-DLL shut-down feature disabled.
+ 1: DDR-DLL shut-down feature enabled.
+ Bit[2]=0: DDR-PLL Power down feature disabled.
+ 1: DDR-PLL Power down feature enabled.
+ulCPUCapInfo: TBD
+usNBP0Voltage: VID for voltage on NB P0 State
+usNBP1Voltage: VID for voltage on NB P1 State
+usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
+usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+ to indicate a range.
+ SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
+ SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
+ SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
+ SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
+ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+ucUMAChannelNumber: System memory channel numbers.
+ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
+ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
+ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
+ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
+ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
+ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
+usPCIEClkSSPercentage: PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType: PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
+usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
+usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
+usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
+ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
+ [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
+ [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
+ [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
+ [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
+**********************************************************************************************************************/
+
+// this Table is used for Liano/Ontario APU
+typedef struct _ATOM_FUSION_SYSTEM_INFO_V1
+{
+ ATOM_INTEGRATED_SYSTEM_INFO_V6 sIntegratedSysInfo;
+ ULONG ulPowerplayTable[128];
+}ATOM_FUSION_SYSTEM_INFO_V1;
+/**********************************************************************************************************************
+ ATOM_FUSION_SYSTEM_INFO_V1 Description
+sIntegratedSysInfo: refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition.
+ulPowerplayTable[128]: This 512 bytes memory is used to save ATOM_PPLIB_POWERPLAYTABLE3, starting form ulPowerplayTable[0]
+**********************************************************************************************************************/
+
+// this IntegrateSystemInfoTable is used for Trinity APU
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock;
+ ULONG ulDentistVCOFreq;
+ ULONG ulBootUpUMAClock;
+ ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4];
+ ULONG ulBootUpReqDisplayVector;
+ ULONG ulOtherDisplayMisc;
+ ULONG ulGPUCapInfo;
+ ULONG ulSB_MMIO_Base_Addr;
+ USHORT usRequestedPWMFreqInHz;
+ UCHAR ucHtcTmpLmt;
+ UCHAR ucHtcHystLmt;
+ ULONG ulMinEngineClock;
+ ULONG ulSystemConfig;
+ ULONG ulCPUCapInfo;
+ USHORT usNBP0Voltage;
+ USHORT usNBP1Voltage;
+ USHORT usBootUpNBVoltage;
+ USHORT usExtDispConnInfoOffset;
+ USHORT usPanelRefreshRateRange;
+ UCHAR ucMemoryType;
+ UCHAR ucUMAChannelNumber;
+ UCHAR strVBIOSMsg[40];
+ ULONG ulReserved[20];
+ ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
+ ULONG ulGMCRestoreResetTime;
+ ULONG ulMinimumNClk;
+ ULONG ulIdleNClk;
+ ULONG ulDDR_DLL_PowerUpTime;
+ ULONG ulDDR_PLL_PowerUpTime;
+ USHORT usPCIEClkSSPercentage;
+ USHORT usPCIEClkSSType;
+ USHORT usLvdsSSPercentage;
+ USHORT usLvdsSSpreadRateIn10Hz;
+ USHORT usHDMISSPercentage;
+ USHORT usHDMISSpreadRateIn10Hz;
+ USHORT usDVISSPercentage;
+ USHORT usDVISSpreadRateIn10Hz;
+ ULONG SclkDpmBoostMargin;
+ ULONG SclkDpmThrottleMargin;
+ USHORT SclkDpmTdpLimitPG;
+ USHORT SclkDpmTdpLimitBoost;
+ ULONG ulBoostEngineCLock;
+ UCHAR ulBoostVid_2bit;
+ UCHAR EnableBoost;
+ USHORT GnbTdpLimit;
+ USHORT usMaxLVDSPclkFreqInSingleLink;
+ UCHAR ucLvdsMisc;
+ UCHAR ucLVDSReserved;
+ UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+ UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+ UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+ UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+ UCHAR ucLVDSOffToOnDelay_in4Ms;
+ UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+ UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+ UCHAR ucLVDSReserved1;
+ ULONG ulLCDBitDepthControlVal;
+ ULONG ulNbpStateMemclkFreq[4];
+ USHORT usNBP2Voltage;
+ USHORT usNBP3Voltage;
+ ULONG ulNbpStateNClkFreq[4];
+ UCHAR ucNBDPMEnable;
+ UCHAR ucReserved[3];
+ UCHAR ucDPMState0VclkFid;
+ UCHAR ucDPMState0DclkFid;
+ UCHAR ucDPMState1VclkFid;
+ UCHAR ucDPMState1DclkFid;
+ UCHAR ucDPMState2VclkFid;
+ UCHAR ucDPMState2DclkFid;
+ UCHAR ucDPMState3VclkFid;
+ UCHAR ucDPMState3DclkFid;
+ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+}ATOM_INTEGRATED_SYSTEM_INFO_V1_7;
+
+// ulOtherDisplayMisc
+#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01
+#define INTEGRATED_SYSTEM_INFO__GET_BOOTUP_DISPLAY_CALLBACK_FUNC_SUPPORT 0x02
+#define INTEGRATED_SYSTEM_INFO__GET_EXPANSION_CALLBACK_FUNC_SUPPORT 0x04
+#define INTEGRATED_SYSTEM_INFO__FAST_BOOT_SUPPORT 0x08
+
+// ulGPUCapInfo
+#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
+#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02
+#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08
+
+/**********************************************************************************************************************
+ ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
+ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
+ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
+sDISPCLK_Voltage: Report Display clock voltage requirement.
+
+ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Trinity projects:
+ ATOM_DEVICE_CRT1_SUPPORT 0x0001
+ ATOM_DEVICE_DFP1_SUPPORT 0x0008
+ ATOM_DEVICE_DFP6_SUPPORT 0x0040
+ ATOM_DEVICE_DFP2_SUPPORT 0x0080
+ ATOM_DEVICE_DFP3_SUPPORT 0x0200
+ ATOM_DEVICE_DFP4_SUPPORT 0x0400
+ ATOM_DEVICE_DFP5_SUPPORT 0x0800
+ ATOM_DEVICE_LCD1_SUPPORT 0x0002
+ulOtherDisplayMisc: bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS.
+ =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS.
+ bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS
+ =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS
+ bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS
+ =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS
+ bit[3]=0: VBIOS fast boot is disable
+ =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open)
+ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
+ =1: TMDS/HDMI Coherent Mode use signel PLL mode.
+ bit[1]=0: DP mode use cascade PLL mode ( New for Trinity )
+ =1: DP mode use single PLL mode
+ bit[3]=0: Enable AUX HW mode detection logic
+ =1: Disable AUX HW mode detection logic
+
+ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
+
+usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
+ Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+
+ When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+ 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+ VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+ Changing BL using VBIOS function is functional in both driver and non-driver present environment;
+ and enabling VariBri under the driver environment from PP table is optional.
+
+ 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+ that BL control from GPU is expected.
+ VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+ Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+ it's per platform
+ and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt.
+ Threshold on value to enter HTC_active state.
+ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt.
+ To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
+ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled
+ =1: PCIE Power Gating Enabled
+ Bit[1]=0: DDR-DLL shut-down feature disabled.
+ 1: DDR-DLL shut-down feature enabled.
+ Bit[2]=0: DDR-PLL Power down feature disabled.
+ 1: DDR-PLL Power down feature enabled.
+ulCPUCapInfo: TBD
+usNBP0Voltage: VID for voltage on NB P0 State
+usNBP1Voltage: VID for voltage on NB P1 State
+usNBP2Voltage: VID for voltage on NB P2 State
+usNBP3Voltage: VID for voltage on NB P3 State
+usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
+usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+ to indicate a range.
+ SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
+ SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
+ SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
+ SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
+ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+ucUMAChannelNumber: System memory channel numbers.
+ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
+ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
+ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
+ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
+ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
+ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
+usPCIEClkSSPercentage: PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType: PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
+usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
+usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
+usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
+ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
+ [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
+ [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
+ [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
+ [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
+ucLVDSPwrOnSeqDIGONtoDE_in4Ms: LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
+ =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
+ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucLVDSPwrOnDEtoVARY_BL_in4Ms: LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ).
+ =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
+ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffVARY_BLtoDE_in4Ms: LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off.
+ =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
+ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffDEtoDIGON_in4Ms: LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off.
+ =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
+ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSOffToOnDelay_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active.
+ =0 means to use VBIOS default delay which is 125 ( 500ms ).
+ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOnVARY_BLtoBLON_in4Ms: LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
+ =0 means to use VBIOS default delay which is 0 ( 0ms ).
+ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffBLONtoVARY_BL_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
+ =0 means to use VBIOS default delay which is 0 ( 0ms ).
+ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB pstate.
+
+**********************************************************************************************************************/
+
+/**************************************************************************/
+// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
+//Memory SS Info Table
+//Define Memory Clock SS chip ID
+#define ICS91719 1
+#define ICS91720 2
+
+//Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol
+typedef struct _ATOM_I2C_DATA_RECORD
+{
+ UCHAR ucNunberOfBytes; //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop"
+ UCHAR ucI2CData[1]; //I2C data in bytes, should be less than 16 bytes usually
+}ATOM_I2C_DATA_RECORD;
+
+
+//Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information
+typedef struct _ATOM_I2C_DEVICE_SETUP_INFO
+{
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //I2C line and HW/SW assisted cap.
+ UCHAR ucSSChipID; //SS chip being used
+ UCHAR ucSSChipSlaveAddr; //Slave Address to set up this SS chip
+ UCHAR ucNumOfI2CDataRecords; //number of data block
+ ATOM_I2C_DATA_RECORD asI2CData[1];
+}ATOM_I2C_DEVICE_SETUP_INFO;
+
+//==========================================================================================
+typedef struct _ATOM_ASIC_MVDD_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1];
+}ATOM_ASIC_MVDD_INFO;
+
+//==========================================================================================
+#define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO
+
+//==========================================================================================
+/**************************************************************************/
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT
+{
+ ULONG ulTargetClockRange; //Clock Out frequence (VCO ), in unit of 10Khz
+ USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
+ USHORT usSpreadRateInKhz; //in unit of kHz, modulation freq
+ UCHAR ucClockIndication; //Indicate which clock source needs SS
+ UCHAR ucSpreadSpectrumMode; //Bit1=0 Down Spread,=1 Center Spread.
+ UCHAR ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT;
+
+//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type.
+//SS is not required or enabled if a match is not found.
+#define ASIC_INTERNAL_MEMORY_SS 1
+#define ASIC_INTERNAL_ENGINE_SS 2
+#define ASIC_INTERNAL_UVD_SS 3
+#define ASIC_INTERNAL_SS_ON_TMDS 4
+#define ASIC_INTERNAL_SS_ON_HDMI 5
+#define ASIC_INTERNAL_SS_ON_LVDS 6
+#define ASIC_INTERNAL_SS_ON_DP 7
+#define ASIC_INTERNAL_SS_ON_DCPLL 8
+#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
+#define ASIC_INTERNAL_VCE_SS 10
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
+{
+ ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
+ //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
+ USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
+ USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
+ UCHAR ucClockIndication; //Indicate which clock source needs SS
+ UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
+ UCHAR ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT_V2;
+
+//ucSpreadSpectrumMode
+//#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000
+//#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000
+//#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001
+//#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001
+//#define ATOM_INTERNAL_SS_MASK 0x00000000
+//#define ATOM_EXTERNAL_SS_MASK 0x00000002
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4];
+}ATOM_ASIC_INTERNAL_SS_INFO;
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[1]; //this is point only.
+}ATOM_ASIC_INTERNAL_SS_INFO_V2;
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
+{
+ ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
+ //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
+ USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
+ USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
+ UCHAR ucClockIndication; //Indicate which clock source needs SS
+ UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
+ UCHAR ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT_V3;
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[1]; //this is pointer only.
+}ATOM_ASIC_INTERNAL_SS_INFO_V3;
+
+
+//==============================Scratch Pad Definition Portion===============================
+#define ATOM_DEVICE_CONNECT_INFO_DEF 0
+#define ATOM_ROM_LOCATION_DEF 1
+#define ATOM_TV_STANDARD_DEF 2
+#define ATOM_ACTIVE_INFO_DEF 3
+#define ATOM_LCD_INFO_DEF 4
+#define ATOM_DOS_REQ_INFO_DEF 5
+#define ATOM_ACC_CHANGE_INFO_DEF 6
+#define ATOM_DOS_MODE_INFO_DEF 7
+#define ATOM_I2C_CHANNEL_STATUS_DEF 8
+#define ATOM_I2C_CHANNEL_STATUS1_DEF 9
+#define ATOM_INTERNAL_TIMER_DEF 10
+
+// BIOS_0_SCRATCH Definition
+#define ATOM_S0_CRT1_MONO 0x00000001L
+#define ATOM_S0_CRT1_COLOR 0x00000002L
+#define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR)
+
+#define ATOM_S0_TV1_COMPOSITE_A 0x00000004L
+#define ATOM_S0_TV1_SVIDEO_A 0x00000008L
+#define ATOM_S0_TV1_MASK_A (ATOM_S0_TV1_COMPOSITE_A+ATOM_S0_TV1_SVIDEO_A)
+
+#define ATOM_S0_CV_A 0x00000010L
+#define ATOM_S0_CV_DIN_A 0x00000020L
+#define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A)
+
+
+#define ATOM_S0_CRT2_MONO 0x00000100L
+#define ATOM_S0_CRT2_COLOR 0x00000200L
+#define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR)
+
+#define ATOM_S0_TV1_COMPOSITE 0x00000400L
+#define ATOM_S0_TV1_SVIDEO 0x00000800L
+#define ATOM_S0_TV1_SCART 0x00004000L
+#define ATOM_S0_TV1_MASK (ATOM_S0_TV1_COMPOSITE+ATOM_S0_TV1_SVIDEO+ATOM_S0_TV1_SCART)
+
+#define ATOM_S0_CV 0x00001000L
+#define ATOM_S0_CV_DIN 0x00002000L
+#define ATOM_S0_CV_MASK (ATOM_S0_CV+ATOM_S0_CV_DIN)
+
+#define ATOM_S0_DFP1 0x00010000L
+#define ATOM_S0_DFP2 0x00020000L
+#define ATOM_S0_LCD1 0x00040000L
+#define ATOM_S0_LCD2 0x00080000L
+#define ATOM_S0_DFP6 0x00100000L
+#define ATOM_S0_DFP3 0x00200000L
+#define ATOM_S0_DFP4 0x00400000L
+#define ATOM_S0_DFP5 0x00800000L
+
+#define ATOM_S0_DFP_MASK ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5 | ATOM_S0_DFP6
+
+#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L // If set, indicates we are running a PCIE asic with
+ // the FAD/HDP reg access bug. Bit is read by DAL, this is obsolete from RV5xx
+
+#define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L
+#define ATOM_S0_THERMAL_STATE_SHIFT 26
+
+#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L
+#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29
+
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S0_CRT1_MONOb0 0x01
+#define ATOM_S0_CRT1_COLORb0 0x02
+#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
+
+#define ATOM_S0_TV1_COMPOSITEb0 0x04
+#define ATOM_S0_TV1_SVIDEOb0 0x08
+#define ATOM_S0_TV1_MASKb0 (ATOM_S0_TV1_COMPOSITEb0+ATOM_S0_TV1_SVIDEOb0)
+
+#define ATOM_S0_CVb0 0x10
+#define ATOM_S0_CV_DINb0 0x20
+#define ATOM_S0_CV_MASKb0 (ATOM_S0_CVb0+ATOM_S0_CV_DINb0)
+
+#define ATOM_S0_CRT2_MONOb1 0x01
+#define ATOM_S0_CRT2_COLORb1 0x02
+#define ATOM_S0_CRT2_MASKb1 (ATOM_S0_CRT2_MONOb1+ATOM_S0_CRT2_COLORb1)
+
+#define ATOM_S0_TV1_COMPOSITEb1 0x04
+#define ATOM_S0_TV1_SVIDEOb1 0x08
+#define ATOM_S0_TV1_SCARTb1 0x40
+#define ATOM_S0_TV1_MASKb1 (ATOM_S0_TV1_COMPOSITEb1+ATOM_S0_TV1_SVIDEOb1+ATOM_S0_TV1_SCARTb1)
+
+#define ATOM_S0_CVb1 0x10
+#define ATOM_S0_CV_DINb1 0x20
+#define ATOM_S0_CV_MASKb1 (ATOM_S0_CVb1+ATOM_S0_CV_DINb1)
+
+#define ATOM_S0_DFP1b2 0x01
+#define ATOM_S0_DFP2b2 0x02
+#define ATOM_S0_LCD1b2 0x04
+#define ATOM_S0_LCD2b2 0x08
+#define ATOM_S0_DFP6b2 0x10
+#define ATOM_S0_DFP3b2 0x20
+#define ATOM_S0_DFP4b2 0x40
+#define ATOM_S0_DFP5b2 0x80
+
+
+#define ATOM_S0_THERMAL_STATE_MASKb3 0x1C
+#define ATOM_S0_THERMAL_STATE_SHIFTb3 2
+
+#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0
+#define ATOM_S0_LCD1_SHIFT 18
+
+// BIOS_1_SCRATCH Definition
+#define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL
+#define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L
+
+// BIOS_2_SCRATCH Definition
+#define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL
+#define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L
+#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8
+
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L
+
+#define ATOM_S2_DEVICE_DPMS_STATE 0x00010000L
+#define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L
+
+#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0
+#define ATOM_S2_DISPLAY_ROTATION_90_DEGREE 0x1
+#define ATOM_S2_DISPLAY_ROTATION_180_DEGREE 0x2
+#define ATOM_S2_DISPLAY_ROTATION_270_DEGREE 0x3
+#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
+#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L
+
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F
+#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
+#define ATOM_S2_DEVICE_DPMS_STATEb2 0x01
+
+#define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGEb3 0x10
+#define ATOM_S2_TMDS_COHERENT_MODEb3 0x10 // used by VBIOS code only, use coherent mode for TMDS/HDMI mode
+#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20
+#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0
+
+
+// BIOS_3_SCRATCH Definition
+#define ATOM_S3_CRT1_ACTIVE 0x00000001L
+#define ATOM_S3_LCD1_ACTIVE 0x00000002L
+#define ATOM_S3_TV1_ACTIVE 0x00000004L
+#define ATOM_S3_DFP1_ACTIVE 0x00000008L
+#define ATOM_S3_CRT2_ACTIVE 0x00000010L
+#define ATOM_S3_LCD2_ACTIVE 0x00000020L
+#define ATOM_S3_DFP6_ACTIVE 0x00000040L
+#define ATOM_S3_DFP2_ACTIVE 0x00000080L
+#define ATOM_S3_CV_ACTIVE 0x00000100L
+#define ATOM_S3_DFP3_ACTIVE 0x00000200L
+#define ATOM_S3_DFP4_ACTIVE 0x00000400L
+#define ATOM_S3_DFP5_ACTIVE 0x00000800L
+
+#define ATOM_S3_DEVICE_ACTIVE_MASK 0x00000FFFL
+
+#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L
+#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L
+
+#define ATOM_S3_CRT1_CRTC_ACTIVE 0x00010000L
+#define ATOM_S3_LCD1_CRTC_ACTIVE 0x00020000L
+#define ATOM_S3_TV1_CRTC_ACTIVE 0x00040000L
+#define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L
+#define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L
+#define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L
+#define ATOM_S3_DFP6_CRTC_ACTIVE 0x00400000L
+#define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L
+#define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L
+#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L
+#define ATOM_S3_DFP4_CRTC_ACTIVE 0x04000000L
+#define ATOM_S3_DFP5_CRTC_ACTIVE 0x08000000L
+
+#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L
+#define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L
+//Below two definitions are not supported in pplib, but in the old powerplay in DAL
+#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L
+#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S3_CRT1_ACTIVEb0 0x01
+#define ATOM_S3_LCD1_ACTIVEb0 0x02
+#define ATOM_S3_TV1_ACTIVEb0 0x04
+#define ATOM_S3_DFP1_ACTIVEb0 0x08
+#define ATOM_S3_CRT2_ACTIVEb0 0x10
+#define ATOM_S3_LCD2_ACTIVEb0 0x20
+#define ATOM_S3_DFP6_ACTIVEb0 0x40
+#define ATOM_S3_DFP2_ACTIVEb0 0x80
+#define ATOM_S3_CV_ACTIVEb1 0x01
+#define ATOM_S3_DFP3_ACTIVEb1 0x02
+#define ATOM_S3_DFP4_ACTIVEb1 0x04
+#define ATOM_S3_DFP5_ACTIVEb1 0x08
+
+#define ATOM_S3_ACTIVE_CRTC1w0 0xFFF
+
+#define ATOM_S3_CRT1_CRTC_ACTIVEb2 0x01
+#define ATOM_S3_LCD1_CRTC_ACTIVEb2 0x02
+#define ATOM_S3_TV1_CRTC_ACTIVEb2 0x04
+#define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08
+#define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10
+#define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20
+#define ATOM_S3_DFP6_CRTC_ACTIVEb2 0x40
+#define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80
+#define ATOM_S3_CV_CRTC_ACTIVEb3 0x01
+#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02
+#define ATOM_S3_DFP4_CRTC_ACTIVEb3 0x04
+#define ATOM_S3_DFP5_CRTC_ACTIVEb3 0x08
+
+#define ATOM_S3_ACTIVE_CRTC2w1 0xFFF
+
+// BIOS_4_SCRATCH Definition
+#define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL
+#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L
+#define ATOM_S4_LCD1_REFRESH_SHIFT 8
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF
+#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0
+#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0
+
+// BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!!
+#define ATOM_S5_DOS_REQ_CRT1b0 0x01
+#define ATOM_S5_DOS_REQ_LCD1b0 0x02
+#define ATOM_S5_DOS_REQ_TV1b0 0x04
+#define ATOM_S5_DOS_REQ_DFP1b0 0x08
+#define ATOM_S5_DOS_REQ_CRT2b0 0x10
+#define ATOM_S5_DOS_REQ_LCD2b0 0x20
+#define ATOM_S5_DOS_REQ_DFP6b0 0x40
+#define ATOM_S5_DOS_REQ_DFP2b0 0x80
+#define ATOM_S5_DOS_REQ_CVb1 0x01
+#define ATOM_S5_DOS_REQ_DFP3b1 0x02
+#define ATOM_S5_DOS_REQ_DFP4b1 0x04
+#define ATOM_S5_DOS_REQ_DFP5b1 0x08
+
+#define ATOM_S5_DOS_REQ_DEVICEw0 0x0FFF
+
+#define ATOM_S5_DOS_REQ_CRT1 0x0001
+#define ATOM_S5_DOS_REQ_LCD1 0x0002
+#define ATOM_S5_DOS_REQ_TV1 0x0004
+#define ATOM_S5_DOS_REQ_DFP1 0x0008
+#define ATOM_S5_DOS_REQ_CRT2 0x0010
+#define ATOM_S5_DOS_REQ_LCD2 0x0020
+#define ATOM_S5_DOS_REQ_DFP6 0x0040
+#define ATOM_S5_DOS_REQ_DFP2 0x0080
+#define ATOM_S5_DOS_REQ_CV 0x0100
+#define ATOM_S5_DOS_REQ_DFP3 0x0200
+#define ATOM_S5_DOS_REQ_DFP4 0x0400
+#define ATOM_S5_DOS_REQ_DFP5 0x0800
+
+#define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0
+#define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0
+#define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0
+#define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1
+#define ATOM_S5_DOS_FORCE_DEVICEw1 (ATOM_S5_DOS_FORCE_CRT1b2+ATOM_S5_DOS_FORCE_TV1b2+ATOM_S5_DOS_FORCE_CRT2b2+\
+ (ATOM_S5_DOS_FORCE_CVb3<<8))
+
+// BIOS_6_SCRATCH Definition
+#define ATOM_S6_DEVICE_CHANGE 0x00000001L
+#define ATOM_S6_SCALER_CHANGE 0x00000002L
+#define ATOM_S6_LID_CHANGE 0x00000004L
+#define ATOM_S6_DOCKING_CHANGE 0x00000008L
+#define ATOM_S6_ACC_MODE 0x00000010L
+#define ATOM_S6_EXT_DESKTOP_MODE 0x00000020L
+#define ATOM_S6_LID_STATE 0x00000040L
+#define ATOM_S6_DOCK_STATE 0x00000080L
+#define ATOM_S6_CRITICAL_STATE 0x00000100L
+#define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L
+#define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L
+#define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L
+#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L //Normal expansion Request bit for LCD
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L //Aspect ratio expansion Request bit for LCD
+
+#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L //This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion
+#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L //This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion
+
+#define ATOM_S6_ACC_REQ_CRT1 0x00010000L
+#define ATOM_S6_ACC_REQ_LCD1 0x00020000L
+#define ATOM_S6_ACC_REQ_TV1 0x00040000L
+#define ATOM_S6_ACC_REQ_DFP1 0x00080000L
+#define ATOM_S6_ACC_REQ_CRT2 0x00100000L
+#define ATOM_S6_ACC_REQ_LCD2 0x00200000L
+#define ATOM_S6_ACC_REQ_DFP6 0x00400000L
+#define ATOM_S6_ACC_REQ_DFP2 0x00800000L
+#define ATOM_S6_ACC_REQ_CV 0x01000000L
+#define ATOM_S6_ACC_REQ_DFP3 0x02000000L
+#define ATOM_S6_ACC_REQ_DFP4 0x04000000L
+#define ATOM_S6_ACC_REQ_DFP5 0x08000000L
+
+#define ATOM_S6_ACC_REQ_MASK 0x0FFF0000L
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE 0x10000000L
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH 0x20000000L
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L
+#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S6_DEVICE_CHANGEb0 0x01
+#define ATOM_S6_SCALER_CHANGEb0 0x02
+#define ATOM_S6_LID_CHANGEb0 0x04
+#define ATOM_S6_DOCKING_CHANGEb0 0x08
+#define ATOM_S6_ACC_MODEb0 0x10
+#define ATOM_S6_EXT_DESKTOP_MODEb0 0x20
+#define ATOM_S6_LID_STATEb0 0x40
+#define ATOM_S6_DOCK_STATEb0 0x80
+#define ATOM_S6_CRITICAL_STATEb1 0x01
+#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02
+#define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04
+#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08
+#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20
+
+#define ATOM_S6_ACC_REQ_CRT1b2 0x01
+#define ATOM_S6_ACC_REQ_LCD1b2 0x02
+#define ATOM_S6_ACC_REQ_TV1b2 0x04
+#define ATOM_S6_ACC_REQ_DFP1b2 0x08
+#define ATOM_S6_ACC_REQ_CRT2b2 0x10
+#define ATOM_S6_ACC_REQ_LCD2b2 0x20
+#define ATOM_S6_ACC_REQ_DFP6b2 0x40
+#define ATOM_S6_ACC_REQ_DFP2b2 0x80
+#define ATOM_S6_ACC_REQ_CVb3 0x01
+#define ATOM_S6_ACC_REQ_DFP3b3 0x02
+#define ATOM_S6_ACC_REQ_DFP4b3 0x04
+#define ATOM_S6_ACC_REQ_DFP5b3 0x08
+
+#define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCHb3 0x20
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGEb3 0x40
+#define ATOM_S6_CONFIG_DISPLAY_CHANGEb3 0x80
+
+#define ATOM_S6_DEVICE_CHANGE_SHIFT 0
+#define ATOM_S6_SCALER_CHANGE_SHIFT 1
+#define ATOM_S6_LID_CHANGE_SHIFT 2
+#define ATOM_S6_DOCKING_CHANGE_SHIFT 3
+#define ATOM_S6_ACC_MODE_SHIFT 4
+#define ATOM_S6_EXT_DESKTOP_MODE_SHIFT 5
+#define ATOM_S6_LID_STATE_SHIFT 6
+#define ATOM_S6_DOCK_STATE_SHIFT 7
+#define ATOM_S6_CRITICAL_STATE_SHIFT 8
+#define ATOM_S6_HW_I2C_BUSY_STATE_SHIFT 9
+#define ATOM_S6_THERMAL_STATE_CHANGE_SHIFT 10
+#define ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT 11
+#define ATOM_S6_REQ_SCALER_SHIFT 12
+#define ATOM_S6_REQ_SCALER_ARATIO_SHIFT 13
+#define ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT 14
+#define ATOM_S6_I2C_STATE_CHANGE_SHIFT 15
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT 28
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH_SHIFT 29
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30
+#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31
+
+// BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!!
+#define ATOM_S7_DOS_MODE_TYPEb0 0x03
+#define ATOM_S7_DOS_MODE_VGAb0 0x00
+#define ATOM_S7_DOS_MODE_VESAb0 0x01
+#define ATOM_S7_DOS_MODE_EXTb0 0x02
+#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0 0x0C
+#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0 0xF0
+#define ATOM_S7_DOS_8BIT_DAC_ENb1 0x01
+#define ATOM_S7_DOS_MODE_NUMBERw1 0x0FFFF
+
+#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8
+
+// BIOS_8_SCRATCH Definition
+#define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF
+#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000
+
+#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0
+#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16
+
+// BIOS_9_SCRATCH Definition
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK
+#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK
+#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT
+#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT
+#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16
+#endif
+
+
+#define ATOM_FLAG_SET 0x20
+#define ATOM_FLAG_CLEAR 0
+#define CLEAR_ATOM_S6_ACC_MODE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
+#define SET_ATOM_S6_DEVICE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SCALER_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_LID_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
+
+#define SET_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_DOCK_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_THERMAL_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
+
+#define SET_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_I2C_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DISPLAY_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DEVICE_RECONFIG ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S0_LCD1 ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 )| ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
+#define SET_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
+
+/****************************************************************************/
+//Portion II: Definitinos only used in Driver
+/****************************************************************************/
+
+// Macros used by driver
+#ifdef __cplusplus
+#define GetIndexIntoMasterTable(MasterOrData, FieldName) ((reinterpret_cast<char*>(&(static_cast<ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*>(0))->FieldName)-static_cast<char*>(0))/sizeof(USHORT))
+
+#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F)
+#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F)
+#else // not __cplusplus
+#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT))
+
+#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
+#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
+#endif // __cplusplus
+
+#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION
+#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION
+
+/****************************************************************************/
+//Portion III: Definitinos only used in VBIOS
+/****************************************************************************/
+#define ATOM_DAC_SRC 0x80
+#define ATOM_SRC_DAC1 0
+#define ATOM_SRC_DAC2 0x80
+
+typedef struct _MEMORY_PLLINIT_PARAMETERS
+{
+ ULONG ulTargetMemoryClock; //In 10Khz unit
+ UCHAR ucAction; //not define yet
+ UCHAR ucFbDiv_Hi; //Fbdiv Hi byte
+ UCHAR ucFbDiv; //FB value
+ UCHAR ucPostDiv; //Post div
+}MEMORY_PLLINIT_PARAMETERS;
+
+#define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS
+
+
+#define GPIO_PIN_WRITE 0x01
+#define GPIO_PIN_READ 0x00
+
+typedef struct _GPIO_PIN_CONTROL_PARAMETERS
+{
+ UCHAR ucGPIO_ID; //return value, read from GPIO pins
+ UCHAR ucGPIOBitShift; //define which bit in uGPIOBitVal need to be update
+ UCHAR ucGPIOBitVal; //Set/Reset corresponding bit defined in ucGPIOBitMask
+ UCHAR ucAction; //=GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write
+}GPIO_PIN_CONTROL_PARAMETERS;
+
+typedef struct _ENABLE_SCALER_PARAMETERS
+{
+ UCHAR ucScaler; // ATOM_SCALER1, ATOM_SCALER2
+ UCHAR ucEnable; // ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION
+ UCHAR ucTVStandard; //
+ UCHAR ucPadding[1];
+}ENABLE_SCALER_PARAMETERS;
+#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS
+
+//ucEnable:
+#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0
+#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1
+#define SCALER_ENABLE_2TAP_ALPHA_MODE 2
+#define SCALER_ENABLE_MULTITAP_MODE 3
+
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS
+{
+ ULONG usHWIconHorzVertPosn; // Hardware Icon Vertical position
+ UCHAR ucHWIconVertOffset; // Hardware Icon Vertical offset
+ UCHAR ucHWIconHorzOffset; // Hardware Icon Horizontal offset
+ UCHAR ucSelection; // ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+}ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
+
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION
+{
+ ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon;
+ ENABLE_CRTC_PARAMETERS sReserved;
+}ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS
+{
+ USHORT usHight; // Image Hight
+ USHORT usWidth; // Image Width
+ UCHAR ucSurface; // Surface 1 or 2
+ UCHAR ucPadding[3];
+}ENABLE_GRAPH_SURFACE_PARAMETERS;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2
+{
+ USHORT usHight; // Image Hight
+ USHORT usWidth; // Image Width
+ UCHAR ucSurface; // Surface 1 or 2
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[2];
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3
+{
+ USHORT usHight; // Image Hight
+ USHORT usWidth; // Image Width
+ UCHAR ucSurface; // Surface 1 or 2
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ USHORT usDeviceId; // Active Device Id for this surface. If no device, set to 0.
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4
+{
+ USHORT usHight; // Image Hight
+ USHORT usWidth; // Image Width
+ USHORT usGraphPitch;
+ UCHAR ucColorDepth;
+ UCHAR ucPixelFormat;
+ UCHAR ucSurface; // Surface 1 or 2
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucModeType;
+ UCHAR ucReserved;
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4;
+
+// ucEnable
+#define ATOM_GRAPH_CONTROL_SET_PITCH 0x0f
+#define ATOM_GRAPH_CONTROL_SET_DISP_START 0x10
+
+typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
+{
+ ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
+ ENABLE_YUV_PS_ALLOCATION sReserved; // Don't set this one
+}ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
+
+typedef struct _MEMORY_CLEAN_UP_PARAMETERS
+{
+ USHORT usMemoryStart; //in 8Kb boundary, offset from memory base address
+ USHORT usMemorySize; //8Kb blocks aligned
+}MEMORY_CLEAN_UP_PARAMETERS;
+#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS
+
+typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS
+{
+ USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC
+ USHORT usY_Size;
+}GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
+
+typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2
+{
+ union{
+ USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC
+ USHORT usSurface;
+ };
+ USHORT usY_Size;
+ USHORT usDispXStart;
+ USHORT usDispYStart;
+}GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2;
+
+
+typedef struct _PALETTE_DATA_CONTROL_PARAMETERS_V3
+{
+ UCHAR ucLutId;
+ UCHAR ucAction;
+ USHORT usLutStartIndex;
+ USHORT usLutLength;
+ USHORT usLutOffsetInVram;
+}PALETTE_DATA_CONTROL_PARAMETERS_V3;
+
+// ucAction:
+#define PALETTE_DATA_AUTO_FILL 1
+#define PALETTE_DATA_READ 2
+#define PALETTE_DATA_WRITE 3
+
+
+typedef struct _INTERRUPT_SERVICE_PARAMETERS_V2
+{
+ UCHAR ucInterruptId;
+ UCHAR ucServiceId;
+ UCHAR ucStatus;
+ UCHAR ucReserved;
+}INTERRUPT_SERVICE_PARAMETER_V2;
+
+// ucInterruptId
+#define HDP1_INTERRUPT_ID 1
+#define HDP2_INTERRUPT_ID 2
+#define HDP3_INTERRUPT_ID 3
+#define HDP4_INTERRUPT_ID 4
+#define HDP5_INTERRUPT_ID 5
+#define HDP6_INTERRUPT_ID 6
+#define SW_INTERRUPT_ID 11
+
+// ucAction
+#define INTERRUPT_SERVICE_GEN_SW_INT 1
+#define INTERRUPT_SERVICE_GET_STATUS 2
+
+ // ucStatus
+#define INTERRUPT_STATUS__INT_TRIGGER 1
+#define INTERRUPT_STATUS__HPD_HIGH 2
+
+typedef struct _INDIRECT_IO_ACCESS
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR IOAccessSequence[256];
+} INDIRECT_IO_ACCESS;
+
+#define INDIRECT_READ 0x00
+#define INDIRECT_WRITE 0x80
+
+#define INDIRECT_IO_MM 0
+#define INDIRECT_IO_PLL 1
+#define INDIRECT_IO_MC 2
+#define INDIRECT_IO_PCIE 3
+#define INDIRECT_IO_PCIEP 4
+#define INDIRECT_IO_NBMISC 5
+
+#define INDIRECT_IO_PLL_READ INDIRECT_IO_PLL | INDIRECT_READ
+#define INDIRECT_IO_PLL_WRITE INDIRECT_IO_PLL | INDIRECT_WRITE
+#define INDIRECT_IO_MC_READ INDIRECT_IO_MC | INDIRECT_READ
+#define INDIRECT_IO_MC_WRITE INDIRECT_IO_MC | INDIRECT_WRITE
+#define INDIRECT_IO_PCIE_READ INDIRECT_IO_PCIE | INDIRECT_READ
+#define INDIRECT_IO_PCIE_WRITE INDIRECT_IO_PCIE | INDIRECT_WRITE
+#define INDIRECT_IO_PCIEP_READ INDIRECT_IO_PCIEP | INDIRECT_READ
+#define INDIRECT_IO_PCIEP_WRITE INDIRECT_IO_PCIEP | INDIRECT_WRITE
+#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ
+#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE
+
+typedef struct _ATOM_OEM_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+}ATOM_OEM_INFO;
+
+typedef struct _ATOM_TV_MODE
+{
+ UCHAR ucVMode_Num; //Video mode number
+ UCHAR ucTV_Mode_Num; //Internal TV mode number
+}ATOM_TV_MODE;
+
+typedef struct _ATOM_BIOS_INT_TVSTD_MODE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usTV_Mode_LUT_Offset; // Pointer to standard to internal number conversion table
+ USHORT usTV_FIFO_Offset; // Pointer to FIFO entry table
+ USHORT usNTSC_Tbl_Offset; // Pointer to SDTV_Mode_NTSC table
+ USHORT usPAL_Tbl_Offset; // Pointer to SDTV_Mode_PAL table
+ USHORT usCV_Tbl_Offset; // Pointer to SDTV_Mode_PAL table
+}ATOM_BIOS_INT_TVSTD_MODE;
+
+
+typedef struct _ATOM_TV_MODE_SCALER_PTR
+{
+ USHORT ucFilter0_Offset; //Pointer to filter format 0 coefficients
+ USHORT usFilter1_Offset; //Pointer to filter format 0 coefficients
+ UCHAR ucTV_Mode_Num;
+}ATOM_TV_MODE_SCALER_PTR;
+
+typedef struct _ATOM_STANDARD_VESA_TIMING
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT aModeTimings[16]; // 16 is not the real array number, just for initial allocation
+}ATOM_STANDARD_VESA_TIMING;
+
+
+typedef struct _ATOM_STD_FORMAT
+{
+ USHORT usSTD_HDisp;
+ USHORT usSTD_VDisp;
+ USHORT usSTD_RefreshRate;
+ USHORT usReserved;
+}ATOM_STD_FORMAT;
+
+typedef struct _ATOM_VESA_TO_EXTENDED_MODE
+{
+ USHORT usVESA_ModeNumber;
+ USHORT usExtendedModeNumber;
+}ATOM_VESA_TO_EXTENDED_MODE;
+
+typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
+}ATOM_VESA_TO_INTENAL_MODE_LUT;
+
+/*************** ATOM Memory Related Data Structure ***********************/
+typedef struct _ATOM_MEMORY_VENDOR_BLOCK{
+ UCHAR ucMemoryType;
+ UCHAR ucMemoryVendor;
+ UCHAR ucAdjMCId;
+ UCHAR ucDynClkId;
+ ULONG ulDllResetClkRange;
+}ATOM_MEMORY_VENDOR_BLOCK;
+
+
+typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG{
+#if ATOM_BIG_ENDIAN
+ ULONG ucMemBlkId:8;
+ ULONG ulMemClockRange:24;
+#else
+ ULONG ulMemClockRange:24;
+ ULONG ucMemBlkId:8;
+#endif
+}ATOM_MEMORY_SETTING_ID_CONFIG;
+
+typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS
+{
+ ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
+ ULONG ulAccess;
+}ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
+
+
+typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{
+ ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID;
+ ULONG aulMemData[1];
+}ATOM_MEMORY_SETTING_DATA_BLOCK;
+
+
+typedef struct _ATOM_INIT_REG_INDEX_FORMAT{
+ USHORT usRegIndex; // MC register index
+ UCHAR ucPreRegDataLength; // offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf
+}ATOM_INIT_REG_INDEX_FORMAT;
+
+
+typedef struct _ATOM_INIT_REG_BLOCK{
+ USHORT usRegIndexTblSize; //size of asRegIndexBuf
+ USHORT usRegDataBlkSize; //size of ATOM_MEMORY_SETTING_DATA_BLOCK
+ ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1];
+ ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1];
+}ATOM_INIT_REG_BLOCK;
+
+#define END_OF_REG_INDEX_BLOCK 0x0ffff
+#define END_OF_REG_DATA_BLOCK 0x00000000
+#define ATOM_INIT_REG_MASK_FLAG 0x80 //Not used in BIOS
+#define CLOCK_RANGE_HIGHEST 0x00ffffff
+
+#define VALUE_DWORD SIZEOF ULONG
+#define VALUE_SAME_AS_ABOVE 0
+#define VALUE_MASK_DWORD 0x84
+
+#define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1)
+#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1)
+#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1)
+//#define ACCESS_MCIODEBUGIND 0x40 //defined in BIOS code
+#define ACCESS_PLACEHOLDER 0x80
+
+typedef struct _ATOM_MC_INIT_PARAM_TABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usAdjustARB_SEQDataOffset;
+ USHORT usMCInitMemTypeTblOffset;
+ USHORT usMCInitCommonTblOffset;
+ USHORT usMCInitPowerDownTblOffset;
+ ULONG ulARB_SEQDataBuf[32];
+ ATOM_INIT_REG_BLOCK asMCInitMemType;
+ ATOM_INIT_REG_BLOCK asMCInitCommon;
+}ATOM_MC_INIT_PARAM_TABLE;
+
+
+#define _4Mx16 0x2
+#define _4Mx32 0x3
+#define _8Mx16 0x12
+#define _8Mx32 0x13
+#define _16Mx16 0x22
+#define _16Mx32 0x23
+#define _32Mx16 0x32
+#define _32Mx32 0x33
+#define _64Mx8 0x41
+#define _64Mx16 0x42
+#define _64Mx32 0x43
+#define _128Mx8 0x51
+#define _128Mx16 0x52
+#define _256Mx8 0x61
+#define _256Mx16 0x62
+
+#define SAMSUNG 0x1
+#define INFINEON 0x2
+#define ELPIDA 0x3
+#define ETRON 0x4
+#define NANYA 0x5
+#define HYNIX 0x6
+#define MOSEL 0x7
+#define WINBOND 0x8
+#define ESMT 0x9
+#define MICRON 0xF
+
+#define QIMONDA INFINEON
+#define PROMOS MOSEL
+#define KRETON INFINEON
+#define ELIXIR NANYA
+
+/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
+
+#define UCODE_ROM_START_ADDRESS 0x1b800
+#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode
+
+//uCode block header for reference
+
+typedef struct _MCuCodeHeader
+{
+ ULONG ulSignature;
+ UCHAR ucRevision;
+ UCHAR ucChecksum;
+ UCHAR ucReserved1;
+ UCHAR ucReserved2;
+ USHORT usParametersLength;
+ USHORT usUCodeLength;
+ USHORT usReserved1;
+ USHORT usReserved2;
+} MCuCodeHeader;
+
+//////////////////////////////////////////////////////////////////////////////////
+
+#define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16
+
+#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF
+typedef struct _ATOM_VRAM_MODULE_V1
+{
+ ULONG ulReserved;
+ USHORT usEMRSValue;
+ USHORT usMRSValue;
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved;
+ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender
+ UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
+ UCHAR ucRow; // Number of Row,in power of 2;
+ UCHAR ucColumn; // Number of Column,in power of 2;
+ UCHAR ucBank; // Nunber of Bank;
+ UCHAR ucRank; // Number of Rank, in power of 2
+ UCHAR ucChannelNum; // Number of channel;
+ UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
+ UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
+ UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
+ UCHAR ucReserved[2];
+}ATOM_VRAM_MODULE_V1;
+
+
+typedef struct _ATOM_VRAM_MODULE_V2
+{
+ ULONG ulReserved;
+ ULONG ulFlags; // To enable/disable functionalities based on memory type
+ ULONG ulEngineClock; // Override of default engine clock for particular memory type
+ ULONG ulMemoryClock; // Override of default memory clock for particular memory type
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usEMRSValue;
+ USHORT usMRSValue;
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
+ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
+ UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
+ UCHAR ucRow; // Number of Row,in power of 2;
+ UCHAR ucColumn; // Number of Column,in power of 2;
+ UCHAR ucBank; // Nunber of Bank;
+ UCHAR ucRank; // Number of Rank, in power of 2
+ UCHAR ucChannelNum; // Number of channel;
+ UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
+ UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
+ UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
+ UCHAR ucRefreshRateFactor;
+ UCHAR ucReserved[3];
+}ATOM_VRAM_MODULE_V2;
+
+
+typedef struct _ATOM_MEMORY_TIMING_FORMAT
+{
+ ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
+ union{
+ USHORT usMRS; // mode register
+ USHORT usDDR3_MR0;
+ };
+ union{
+ USHORT usEMRS; // extended mode register
+ USHORT usDDR3_MR1;
+ };
+ UCHAR ucCL; // CAS latency
+ UCHAR ucWL; // WRITE Latency
+ UCHAR uctRAS; // tRAS
+ UCHAR uctRC; // tRC
+ UCHAR uctRFC; // tRFC
+ UCHAR uctRCDR; // tRCDR
+ UCHAR uctRCDW; // tRCDW
+ UCHAR uctRP; // tRP
+ UCHAR uctRRD; // tRRD
+ UCHAR uctWR; // tWR
+ UCHAR uctWTR; // tWTR
+ UCHAR uctPDIX; // tPDIX
+ UCHAR uctFAW; // tFAW
+ UCHAR uctAOND; // tAOND
+ union
+ {
+ struct {
+ UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
+ UCHAR ucReserved;
+ };
+ USHORT usDDR3_MR2;
+ };
+}ATOM_MEMORY_TIMING_FORMAT;
+
+
+typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1
+{
+ ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
+ USHORT usMRS; // mode register
+ USHORT usEMRS; // extended mode register
+ UCHAR ucCL; // CAS latency
+ UCHAR ucWL; // WRITE Latency
+ UCHAR uctRAS; // tRAS
+ UCHAR uctRC; // tRC
+ UCHAR uctRFC; // tRFC
+ UCHAR uctRCDR; // tRCDR
+ UCHAR uctRCDW; // tRCDW
+ UCHAR uctRP; // tRP
+ UCHAR uctRRD; // tRRD
+ UCHAR uctWR; // tWR
+ UCHAR uctWTR; // tWTR
+ UCHAR uctPDIX; // tPDIX
+ UCHAR uctFAW; // tFAW
+ UCHAR uctAOND; // tAOND
+ UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
+////////////////////////////////////GDDR parameters///////////////////////////////////
+ UCHAR uctCCDL; //
+ UCHAR uctCRCRL; //
+ UCHAR uctCRCWL; //
+ UCHAR uctCKE; //
+ UCHAR uctCKRSE; //
+ UCHAR uctCKRSX; //
+ UCHAR uctFAW32; //
+ UCHAR ucMR5lo; //
+ UCHAR ucMR5hi; //
+ UCHAR ucTerminator;
+}ATOM_MEMORY_TIMING_FORMAT_V1;
+
+typedef struct _ATOM_MEMORY_TIMING_FORMAT_V2
+{
+ ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
+ USHORT usMRS; // mode register
+ USHORT usEMRS; // extended mode register
+ UCHAR ucCL; // CAS latency
+ UCHAR ucWL; // WRITE Latency
+ UCHAR uctRAS; // tRAS
+ UCHAR uctRC; // tRC
+ UCHAR uctRFC; // tRFC
+ UCHAR uctRCDR; // tRCDR
+ UCHAR uctRCDW; // tRCDW
+ UCHAR uctRP; // tRP
+ UCHAR uctRRD; // tRRD
+ UCHAR uctWR; // tWR
+ UCHAR uctWTR; // tWTR
+ UCHAR uctPDIX; // tPDIX
+ UCHAR uctFAW; // tFAW
+ UCHAR uctAOND; // tAOND
+ UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
+////////////////////////////////////GDDR parameters///////////////////////////////////
+ UCHAR uctCCDL; //
+ UCHAR uctCRCRL; //
+ UCHAR uctCRCWL; //
+ UCHAR uctCKE; //
+ UCHAR uctCKRSE; //
+ UCHAR uctCKRSX; //
+ UCHAR uctFAW32; //
+ UCHAR ucMR4lo; //
+ UCHAR ucMR4hi; //
+ UCHAR ucMR5lo; //
+ UCHAR ucMR5hi; //
+ UCHAR ucTerminator;
+ UCHAR ucReserved;
+}ATOM_MEMORY_TIMING_FORMAT_V2;
+
+typedef struct _ATOM_MEMORY_FORMAT
+{
+ ULONG ulDllDisClock; // memory DLL will be disable when target memory clock is below this clock
+ union{
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usDDR3_Reserved; // Not used for DDR3 memory
+ };
+ union{
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usDDR3_MR3; // Used for DDR3 memory
+ };
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
+ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
+ UCHAR ucRow; // Number of Row,in power of 2;
+ UCHAR ucColumn; // Number of Column,in power of 2;
+ UCHAR ucBank; // Nunber of Bank;
+ UCHAR ucRank; // Number of Rank, in power of 2
+ UCHAR ucBurstSize; // burst size, 0= burst size=4 1= burst size=8
+ UCHAR ucDllDisBit; // position of DLL Enable/Disable bit in EMRS ( Extended Mode Register )
+ UCHAR ucRefreshRateFactor; // memory refresh rate in unit of ms
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucPreamble; //[7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemAttrib; // Memory Device Addribute, like RDBI/WDBI etc
+ ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; //Memory Timing block sort from lower clock to higher clock
+}ATOM_MEMORY_FORMAT;
+
+
+typedef struct _ATOM_VRAM_MODULE_V3
+{
+ ULONG ulChannelMapCfg; // board dependent paramenter:Channel combination
+ USHORT usSize; // size of ATOM_VRAM_MODULE_V3
+ USHORT usDefaultMVDDQ; // board dependent parameter:Default Memory Core Voltage
+ USHORT usDefaultMVDDC; // board dependent parameter:Default Memory IO Voltage
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucChannelNum; // board dependent parameter:Number of channel;
+ UCHAR ucChannelSize; // board dependent parameter:32bit or 64bit
+ UCHAR ucVREFI; // board dependnt parameter: EXT or INT +160mv to -140mv
+ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+ UCHAR ucFlag; // To enable/disable functionalities based on memory type
+ ATOM_MEMORY_FORMAT asMemory; // describ all of video memory parameters from memory spec
+}ATOM_VRAM_MODULE_V3;
+
+
+//ATOM_VRAM_MODULE_V3.ucNPL_RT
+#define NPL_RT_MASK 0x0f
+#define BATTERY_ODT_MASK 0xc0
+
+#define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3
+
+typedef struct _ATOM_VRAM_MODULE_V4
+{
+ ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
+ USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+ USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+ UCHAR ucChannelNum; // Number of channels present in this module config
+ UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucFlag; // To enable/disable functionalities based on memory type
+ UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
+ UCHAR ucVREFI; // board dependent parameter
+ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+ UCHAR ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+ union{
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usDDR3_Reserved;
+ };
+ union{
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usDDR3_MR3; // Used for DDR3 memory
+ };
+ UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
+ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+ UCHAR ucReserved2[2];
+ ATOM_MEMORY_TIMING_FORMAT asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V4;
+
+#define VRAM_MODULE_V4_MISC_RANK_MASK 0x3
+#define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1
+#define VRAM_MODULE_V4_MISC_BL_MASK 0x4
+#define VRAM_MODULE_V4_MISC_BL8 0x4
+#define VRAM_MODULE_V4_MISC_DUAL_CS 0x10
+
+typedef struct _ATOM_VRAM_MODULE_V5
+{
+ ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
+ USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+ USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+ UCHAR ucChannelNum; // Number of channels present in this module config
+ UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucFlag; // To enable/disable functionalities based on memory type
+ UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
+ UCHAR ucVREFI; // board dependent parameter
+ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+ UCHAR ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
+ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+ UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
+ UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+ ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V5;
+
+typedef struct _ATOM_VRAM_MODULE_V6
+{
+ ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
+ USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+ USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+ UCHAR ucChannelNum; // Number of channels present in this module config
+ UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucFlag; // To enable/disable functionalities based on memory type
+ UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
+ UCHAR ucVREFI; // board dependent parameter
+ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+ UCHAR ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
+ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+ UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
+ UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+ ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V6;
+
+typedef struct _ATOM_VRAM_MODULE_V7
+{
+// Design Specific Values
+ ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP
+ USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7
+ USHORT usPrivateReserved; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+ USHORT usEnableChannels; // bit vector which indicate which channels are enabled
+ UCHAR ucExtMemoryID; // Current memory module ID
+ UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
+ UCHAR ucChannelNum; // Number of mem. channels supported in this module
+ UCHAR ucChannelWidth; // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucReserve; // Former container for Mx_FLAGS like DBI_AC_MODE_ENABLE_ASIC for GDDR4. Not used now.
+ UCHAR ucMisc; // RANK_OF_THISMEMORY etc.
+ UCHAR ucVREFI; // Not used.
+ UCHAR ucNPL_RT; // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+ USHORT usSEQSettingOffset;
+ UCHAR ucReserved;
+// Memory Module specific values
+ USHORT usEMRS2Value; // EMRS2/MR2 Value.
+ USHORT usEMRS3Value; // EMRS3/MR3 Value.
+ UCHAR ucMemoryVenderID; // [7:4] Revision, [3:0] Vendor code
+ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+ UCHAR ucFIFODepth; // FIFO depth can be detected during vendor detection, here is hardcoded per memory
+ UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+ char strMemPNString[20]; // part number end with '0'.
+}ATOM_VRAM_MODULE_V7;
+
+typedef struct _ATOM_VRAM_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucNumOfVRAMModule;
+ ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_V2;
+
+typedef struct _ATOM_VRAM_INFO_V3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+ USHORT usRerseved;
+ UCHAR aVID_PinsShift[9]; // 8 bit strap maximum+terminator
+ UCHAR ucNumOfVRAMModule;
+ ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+ ATOM_INIT_REG_BLOCK asMemPatch; // for allocation
+ // ATOM_INIT_REG_BLOCK aMemAdjust;
+}ATOM_VRAM_INFO_V3;
+
+#define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3
+
+typedef struct _ATOM_VRAM_INFO_V4
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+ USHORT usRerseved;
+ UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
+ ULONG ulMemDQ7_0BitRemap; // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21]
+ UCHAR ucReservde[4];
+ UCHAR ucNumOfVRAMModule;
+ ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+ ATOM_INIT_REG_BLOCK asMemPatch; // for allocation
+ // ATOM_INIT_REG_BLOCK aMemAdjust;
+}ATOM_VRAM_INFO_V4;
+
+typedef struct _ATOM_VRAM_INFO_HEADER_V2_1
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+ USHORT usPerBytePresetOffset; // offset of ATOM_INIT_REG_BLOCK structure for Per Byte Offset Preset Settings
+ USHORT usReserved[3];
+ UCHAR ucNumOfVRAMModule; // indicate number of VRAM module
+ UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list
+ UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version
+ UCHAR ucReserved;
+ ATOM_VRAM_MODULE_V7 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_HEADER_V2_1;
+
+
+typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR aVID_PinsShift[9]; //8 bit strap maximum+terminator
+}ATOM_VRAM_GPIO_DETECTION_INFO;
+
+
+typedef struct _ATOM_MEMORY_TRAINING_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucTrainingLoop;
+ UCHAR ucReserved[3];
+ ATOM_INIT_REG_BLOCK asMemTrainingSetting;
+}ATOM_MEMORY_TRAINING_INFO;
+
+
+typedef struct SW_I2C_CNTL_DATA_PARAMETERS
+{
+ UCHAR ucControl;
+ UCHAR ucData;
+ UCHAR ucSatus;
+ UCHAR ucTemp;
+} SW_I2C_CNTL_DATA_PARAMETERS;
+
+#define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS
+
+typedef struct _SW_I2C_IO_DATA_PARAMETERS
+{
+ USHORT GPIO_Info;
+ UCHAR ucAct;
+ UCHAR ucData;
+ } SW_I2C_IO_DATA_PARAMETERS;
+
+#define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS
+
+/****************************SW I2C CNTL DEFINITIONS**********************/
+#define SW_I2C_IO_RESET 0
+#define SW_I2C_IO_GET 1
+#define SW_I2C_IO_DRIVE 2
+#define SW_I2C_IO_SET 3
+#define SW_I2C_IO_START 4
+
+#define SW_I2C_IO_CLOCK 0
+#define SW_I2C_IO_DATA 0x80
+
+#define SW_I2C_IO_ZERO 0
+#define SW_I2C_IO_ONE 0x100
+
+#define SW_I2C_CNTL_READ 0
+#define SW_I2C_CNTL_WRITE 1
+#define SW_I2C_CNTL_START 2
+#define SW_I2C_CNTL_STOP 3
+#define SW_I2C_CNTL_OPEN 4
+#define SW_I2C_CNTL_CLOSE 5
+#define SW_I2C_CNTL_WRITE1BIT 6
+
+//==============================VESA definition Portion===============================
+#define VESA_OEM_PRODUCT_REV "01.00"
+#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
+#define VESA_MODE_WIN_ATTRIBUTE 7
+#define VESA_WIN_SIZE 64
+
+typedef struct _PTR_32_BIT_STRUCTURE
+{
+ USHORT Offset16;
+ USHORT Segment16;
+} PTR_32_BIT_STRUCTURE;
+
+typedef union _PTR_32_BIT_UNION
+{
+ PTR_32_BIT_STRUCTURE SegmentOffset;
+ ULONG Ptr32_Bit;
+} PTR_32_BIT_UNION;
+
+typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE
+{
+ UCHAR VbeSignature[4];
+ USHORT VbeVersion;
+ PTR_32_BIT_UNION OemStringPtr;
+ UCHAR Capabilities[4];
+ PTR_32_BIT_UNION VideoModePtr;
+ USHORT TotalMemory;
+} VBE_1_2_INFO_BLOCK_UPDATABLE;
+
+
+typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE
+{
+ VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock;
+ USHORT OemSoftRev;
+ PTR_32_BIT_UNION OemVendorNamePtr;
+ PTR_32_BIT_UNION OemProductNamePtr;
+ PTR_32_BIT_UNION OemProductRevPtr;
+} VBE_2_0_INFO_BLOCK_UPDATABLE;
+
+typedef union _VBE_VERSION_UNION
+{
+ VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock;
+ VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock;
+} VBE_VERSION_UNION;
+
+typedef struct _VBE_INFO_BLOCK
+{
+ VBE_VERSION_UNION UpdatableVBE_Info;
+ UCHAR Reserved[222];
+ UCHAR OemData[256];
+} VBE_INFO_BLOCK;
+
+typedef struct _VBE_FP_INFO
+{
+ USHORT HSize;
+ USHORT VSize;
+ USHORT FPType;
+ UCHAR RedBPP;
+ UCHAR GreenBPP;
+ UCHAR BlueBPP;
+ UCHAR ReservedBPP;
+ ULONG RsvdOffScrnMemSize;
+ ULONG RsvdOffScrnMEmPtr;
+ UCHAR Reserved[14];
+} VBE_FP_INFO;
+
+typedef struct _VESA_MODE_INFO_BLOCK
+{
+// Mandatory information for all VBE revisions
+ USHORT ModeAttributes; // dw ? ; mode attributes
+ UCHAR WinAAttributes; // db ? ; window A attributes
+ UCHAR WinBAttributes; // db ? ; window B attributes
+ USHORT WinGranularity; // dw ? ; window granularity
+ USHORT WinSize; // dw ? ; window size
+ USHORT WinASegment; // dw ? ; window A start segment
+ USHORT WinBSegment; // dw ? ; window B start segment
+ ULONG WinFuncPtr; // dd ? ; real mode pointer to window function
+ USHORT BytesPerScanLine;// dw ? ; bytes per scan line
+
+//; Mandatory information for VBE 1.2 and above
+ USHORT XResolution; // dw ? ; horizontal resolution in pixels or characters
+ USHORT YResolution; // dw ? ; vertical resolution in pixels or characters
+ UCHAR XCharSize; // db ? ; character cell width in pixels
+ UCHAR YCharSize; // db ? ; character cell height in pixels
+ UCHAR NumberOfPlanes; // db ? ; number of memory planes
+ UCHAR BitsPerPixel; // db ? ; bits per pixel
+ UCHAR NumberOfBanks; // db ? ; number of banks
+ UCHAR MemoryModel; // db ? ; memory model type
+ UCHAR BankSize; // db ? ; bank size in KB
+ UCHAR NumberOfImagePages;// db ? ; number of images
+ UCHAR ReservedForPageFunction;//db 1 ; reserved for page function
+
+//; Direct Color fields(required for direct/6 and YUV/7 memory models)
+ UCHAR RedMaskSize; // db ? ; size of direct color red mask in bits
+ UCHAR RedFieldPosition; // db ? ; bit position of lsb of red mask
+ UCHAR GreenMaskSize; // db ? ; size of direct color green mask in bits
+ UCHAR GreenFieldPosition; // db ? ; bit position of lsb of green mask
+ UCHAR BlueMaskSize; // db ? ; size of direct color blue mask in bits
+ UCHAR BlueFieldPosition; // db ? ; bit position of lsb of blue mask
+ UCHAR RsvdMaskSize; // db ? ; size of direct color reserved mask in bits
+ UCHAR RsvdFieldPosition; // db ? ; bit position of lsb of reserved mask
+ UCHAR DirectColorModeInfo;// db ? ; direct color mode attributes
+
+//; Mandatory information for VBE 2.0 and above
+ ULONG PhysBasePtr; // dd ? ; physical address for flat memory frame buffer
+ ULONG Reserved_1; // dd 0 ; reserved - always set to 0
+ USHORT Reserved_2; // dw 0 ; reserved - always set to 0
+
+//; Mandatory information for VBE 3.0 and above
+ USHORT LinBytesPerScanLine; // dw ? ; bytes per scan line for linear modes
+ UCHAR BnkNumberOfImagePages;// db ? ; number of images for banked modes
+ UCHAR LinNumberOfImagPages; // db ? ; number of images for linear modes
+ UCHAR LinRedMaskSize; // db ? ; size of direct color red mask(linear modes)
+ UCHAR LinRedFieldPosition; // db ? ; bit position of lsb of red mask(linear modes)
+ UCHAR LinGreenMaskSize; // db ? ; size of direct color green mask(linear modes)
+ UCHAR LinGreenFieldPosition;// db ? ; bit position of lsb of green mask(linear modes)
+ UCHAR LinBlueMaskSize; // db ? ; size of direct color blue mask(linear modes)
+ UCHAR LinBlueFieldPosition; // db ? ; bit position of lsb of blue mask(linear modes)
+ UCHAR LinRsvdMaskSize; // db ? ; size of direct color reserved mask(linear modes)
+ UCHAR LinRsvdFieldPosition; // db ? ; bit position of lsb of reserved mask(linear modes)
+ ULONG MaxPixelClock; // dd ? ; maximum pixel clock(in Hz) for graphics mode
+ UCHAR Reserved; // db 190 dup (0)
+} VESA_MODE_INFO_BLOCK;
+
+// BIOS function CALLS
+#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 // ATI Extended Function code
+#define ATOM_BIOS_FUNCTION_COP_MODE 0x00
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06
+#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B
+#define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E
+#define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F
+#define ATOM_BIOS_FUNCTION_STV_STD 0x16
+#define ATOM_BIOS_FUNCTION_DEVICE_DET 0x17
+#define ATOM_BIOS_FUNCTION_DEVICE_SWITCH 0x18
+
+#define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82
+#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83
+#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84
+#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A
+#define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B
+#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 // Sub function 80
+#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 // Sub function 80
+
+#define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D
+#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E
+#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F
+#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 // Sub function 03
+#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 // Sub function 7
+#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 // Notify caller the current thermal state
+#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 // Notify caller the current critical state
+#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 // Sub function 85
+#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900// Sub function 89
+#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 // Notify caller that ADC is supported
+
+
+#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 // Set DPMS
+#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 // BL: Sub function 01
+#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 // BL: Sub function 02
+#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 // BH Parameter for DPMS ON.
+#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 // BH Parameter for DPMS STANDBY
+#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 // BH Parameter for DPMS SUSPEND
+#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 // BH Parameter for DPMS OFF
+#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 // BH Parameter for DPMS REDUCE ON (NOT SUPPORTED)
+
+#define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L
+#define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L
+#define ATOM_BIOS_REG_LOW_MASK 0x000000FFL
+
+// structure used for VBIOS only
+
+//DispOutInfoTable
+typedef struct _ASIC_TRANSMITTER_INFO
+{
+ USHORT usTransmitterObjId;
+ USHORT usSupportDevice;
+ UCHAR ucTransmitterCmdTblId;
+ UCHAR ucConfig;
+ UCHAR ucEncoderID; //available 1st encoder ( default )
+ UCHAR ucOptionEncoderID; //available 2nd encoder ( optional )
+ UCHAR uc2ndEncoderID;
+ UCHAR ucReserved;
+}ASIC_TRANSMITTER_INFO;
+
+#define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE 0x01
+#define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE 0x02
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK 0xc4
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A 0x00
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B 0x04
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C 0x40
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D 0x44
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E 0x80
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F 0x84
+
+typedef struct _ASIC_ENCODER_INFO
+{
+ UCHAR ucEncoderID;
+ UCHAR ucEncoderConfig;
+ USHORT usEncoderCmdTblId;
+}ASIC_ENCODER_INFO;
+
+typedef struct _ATOM_DISP_OUT_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT ptrTransmitterInfo;
+ USHORT ptrEncoderInfo;
+ ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
+ ASIC_ENCODER_INFO asEncoderInfo[1];
+}ATOM_DISP_OUT_INFO;
+
+typedef struct _ATOM_DISP_OUT_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT ptrTransmitterInfo;
+ USHORT ptrEncoderInfo;
+ USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary.
+ ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
+ ASIC_ENCODER_INFO asEncoderInfo[1];
+}ATOM_DISP_OUT_INFO_V2;
+
+
+typedef struct _ATOM_DISP_CLOCK_ID {
+ UCHAR ucPpllId;
+ UCHAR ucPpllAttribute;
+}ATOM_DISP_CLOCK_ID;
+
+// ucPpllAttribute
+#define CLOCK_SOURCE_SHAREABLE 0x01
+#define CLOCK_SOURCE_DP_MODE 0x02
+#define CLOCK_SOURCE_NONE_DP_MODE 0x04
+
+//DispOutInfoTable
+typedef struct _ASIC_TRANSMITTER_INFO_V2
+{
+ USHORT usTransmitterObjId;
+ USHORT usDispClkIdOffset; // point to clock source id list supported by Encoder Object
+ UCHAR ucTransmitterCmdTblId;
+ UCHAR ucConfig;
+ UCHAR ucEncoderID; // available 1st encoder ( default )
+ UCHAR ucOptionEncoderID; // available 2nd encoder ( optional )
+ UCHAR uc2ndEncoderID;
+ UCHAR ucReserved;
+}ASIC_TRANSMITTER_INFO_V2;
+
+typedef struct _ATOM_DISP_OUT_INFO_V3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT ptrTransmitterInfo;
+ USHORT ptrEncoderInfo;
+ USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary.
+ USHORT usReserved;
+ UCHAR ucDCERevision;
+ UCHAR ucMaxDispEngineNum;
+ UCHAR ucMaxActiveDispEngineNum;
+ UCHAR ucMaxPPLLNum;
+ UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE
+ UCHAR ucReserved[3];
+ ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
+}ATOM_DISP_OUT_INFO_V3;
+
+typedef enum CORE_REF_CLK_SOURCE{
+ CLOCK_SRC_XTALIN=0,
+ CLOCK_SRC_XO_IN=1,
+ CLOCK_SRC_XO_IN2=2,
+}CORE_REF_CLK_SOURCE;
+
+// DispDevicePriorityInfo
+typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT asDevicePriority[16];
+}ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
+
+//ProcessAuxChannelTransactionTable
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
+{
+ USHORT lpAuxRequest;
+ USHORT lpDataOut;
+ UCHAR ucChannelID;
+ union
+ {
+ UCHAR ucReplyStatus;
+ UCHAR ucDelay;
+ };
+ UCHAR ucDataOutLen;
+ UCHAR ucReserved;
+}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
+
+//ProcessAuxChannelTransactionTable
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2
+{
+ USHORT lpAuxRequest;
+ USHORT lpDataOut;
+ UCHAR ucChannelID;
+ union
+ {
+ UCHAR ucReplyStatus;
+ UCHAR ucDelay;
+ };
+ UCHAR ucDataOutLen;
+ UCHAR ucHPD_ID; //=0: HPD1, =1: HPD2, =2: HPD3, =3: HPD4, =4: HPD5, =5: HPD6
+}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2;
+
+#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
+
+//GetSinkType
+
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS
+{
+ USHORT ucLinkClock;
+ union
+ {
+ UCHAR ucConfig; // for DP training command
+ UCHAR ucI2cId; // use for GET_SINK_TYPE command
+ };
+ UCHAR ucAction;
+ UCHAR ucStatus;
+ UCHAR ucLaneNum;
+ UCHAR ucReserved[2];
+}DP_ENCODER_SERVICE_PARAMETERS;
+
+// ucAction
+#define ATOM_DP_ACTION_GET_SINK_TYPE 0x01
+/* obselete */
+#define ATOM_DP_ACTION_TRAINING_START 0x02
+#define ATOM_DP_ACTION_TRAINING_COMPLETE 0x03
+#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL 0x04
+#define ATOM_DP_ACTION_SET_VSWING_PREEMP 0x05
+#define ATOM_DP_ACTION_GET_VSWING_PREEMP 0x06
+#define ATOM_DP_ACTION_BLANKING 0x07
+
+// ucConfig
+#define ATOM_DP_CONFIG_ENCODER_SEL_MASK 0x03
+#define ATOM_DP_CONFIG_DIG1_ENCODER 0x00
+#define ATOM_DP_CONFIG_DIG2_ENCODER 0x01
+#define ATOM_DP_CONFIG_EXTERNAL_ENCODER 0x02
+#define ATOM_DP_CONFIG_LINK_SEL_MASK 0x04
+#define ATOM_DP_CONFIG_LINK_A 0x00
+#define ATOM_DP_CONFIG_LINK_B 0x04
+/* /obselete */
+#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2
+{
+ USHORT usExtEncoderObjId; // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+ UCHAR ucAuxId;
+ UCHAR ucAction;
+ UCHAR ucSinkType; // Iput and Output parameters.
+ UCHAR ucHPDId; // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+ UCHAR ucReserved[2];
+}DP_ENCODER_SERVICE_PARAMETERS_V2;
+
+typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2
+{
+ DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam;
+ PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam;
+}DP_ENCODER_SERVICE_PS_ALLOCATION_V2;
+
+// ucAction
+#define DP_SERVICE_V2_ACTION_GET_SINK_TYPE 0x01
+#define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION 0x02
+
+
+// DP_TRAINING_TABLE
+#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
+#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 )
+#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16 )
+#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24 )
+#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32)
+#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40)
+#define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48)
+#define DP_I2C_AUX_DDC_WRITE_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 60)
+#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64)
+#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72)
+#define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76)
+#define DP_I2C_AUX_DDC_WRITE_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80)
+#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 84)
+
+typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+{
+ UCHAR ucI2CSpeed;
+ union
+ {
+ UCHAR ucRegIndex;
+ UCHAR ucStatus;
+ };
+ USHORT lpI2CDataOut;
+ UCHAR ucFlag;
+ UCHAR ucTransBytes;
+ UCHAR ucSlaveAddr;
+ UCHAR ucLineNumber;
+}PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
+
+#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+
+//ucFlag
+#define HW_I2C_WRITE 1
+#define HW_I2C_READ 0
+#define I2C_2BYTE_ADDR 0x02
+
+/****************************************************************************/
+// Structures used by HW_Misc_OperationTable
+/****************************************************************************/
+typedef struct _ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1
+{
+ UCHAR ucCmd; // Input: To tell which action to take
+ UCHAR ucReserved[3];
+ ULONG ulReserved;
+}ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1;
+
+typedef struct _ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1
+{
+ UCHAR ucReturnCode; // Output: Return value base on action was taken
+ UCHAR ucReserved[3];
+ ULONG ulReserved;
+}ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1;
+
+// Actions code
+#define ATOM_GET_SDI_SUPPORT 0xF0
+
+// Return code
+#define ATOM_UNKNOWN_CMD 0
+#define ATOM_FEATURE_NOT_SUPPORTED 1
+#define ATOM_FEATURE_SUPPORTED 2
+
+typedef struct _ATOM_HW_MISC_OPERATION_PS_ALLOCATION
+{
+ ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1 sInput_Output;
+ PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS sReserved;
+}ATOM_HW_MISC_OPERATION_PS_ALLOCATION;
+
+/****************************************************************************/
+
+typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
+{
+ UCHAR ucHWBlkInst; // HW block instance, 0, 1, 2, ...
+ UCHAR ucReserved[3];
+}SET_HWBLOCK_INSTANCE_PARAMETER_V2;
+
+#define HWBLKINST_INSTANCE_MASK 0x07
+#define HWBLKINST_HWBLK_MASK 0xF0
+#define HWBLKINST_HWBLK_SHIFT 0x04
+
+//ucHWBlock
+#define SELECT_DISP_ENGINE 0
+#define SELECT_DISP_PLL 1
+#define SELECT_DCIO_UNIPHY_LINK0 2
+#define SELECT_DCIO_UNIPHY_LINK1 3
+#define SELECT_DCIO_IMPCAL 4
+#define SELECT_DCIO_DIG 6
+#define SELECT_CRTC_PIXEL_RATE 7
+#define SELECT_VGA_BLK 8
+
+// DIGTransmitterInfoTable structure used to program UNIPHY settings
+typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_1{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDPVsPreEmphSettingOffset; // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock
+ USHORT usPhyAnalogRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info
+ USHORT usPhyAnalogSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
+ USHORT usPhyPllRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info
+ USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
+}DIG_TRANSMITTER_INFO_HEADER_V3_1;
+
+typedef struct _CLOCK_CONDITION_REGESTER_INFO{
+ USHORT usRegisterIndex;
+ UCHAR ucStartBit;
+ UCHAR ucEndBit;
+}CLOCK_CONDITION_REGESTER_INFO;
+
+typedef struct _CLOCK_CONDITION_SETTING_ENTRY{
+ USHORT usMaxClockFreq;
+ UCHAR ucEncodeMode;
+ UCHAR ucPhySel;
+ ULONG ulAnalogSetting[1];
+}CLOCK_CONDITION_SETTING_ENTRY;
+
+typedef struct _CLOCK_CONDITION_SETTING_INFO{
+ USHORT usEntrySize;
+ CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[1];
+}CLOCK_CONDITION_SETTING_INFO;
+
+typedef struct _PHY_CONDITION_REG_VAL{
+ ULONG ulCondition;
+ ULONG ulRegVal;
+}PHY_CONDITION_REG_VAL;
+
+typedef struct _PHY_CONDITION_REG_INFO{
+ USHORT usRegIndex;
+ USHORT usSize;
+ PHY_CONDITION_REG_VAL asRegVal[1];
+}PHY_CONDITION_REG_INFO;
+
+typedef struct _PHY_ANALOG_SETTING_INFO{
+ UCHAR ucEncodeMode;
+ UCHAR ucPhySel;
+ USHORT usSize;
+ PHY_CONDITION_REG_INFO asAnalogSetting[1];
+}PHY_ANALOG_SETTING_INFO;
+
+/****************************************************************************/
+//Portion VI: Definitinos for vbios MC scratch registers that driver used
+/****************************************************************************/
+
+#define MC_MISC0__MEMORY_TYPE_MASK 0xF0000000
+#define MC_MISC0__MEMORY_TYPE__GDDR1 0x10000000
+#define MC_MISC0__MEMORY_TYPE__DDR2 0x20000000
+#define MC_MISC0__MEMORY_TYPE__GDDR3 0x30000000
+#define MC_MISC0__MEMORY_TYPE__GDDR4 0x40000000
+#define MC_MISC0__MEMORY_TYPE__GDDR5 0x50000000
+#define MC_MISC0__MEMORY_TYPE__DDR3 0xB0000000
+
+/****************************************************************************/
+//Portion VI: Definitinos being oboselete
+/****************************************************************************/
+
+//==========================================================================================
+//Remove the definitions below when driver is ready!
+typedef struct _ATOM_DAC_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMaxFrequency; // in 10kHz unit
+ USHORT usReserved;
+}ATOM_DAC_INFO;
+
+
+typedef struct _COMPASSIONATE_DATA
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+
+ //============================== DAC1 portion
+ UCHAR ucDAC1_BG_Adjustment;
+ UCHAR ucDAC1_DAC_Adjustment;
+ USHORT usDAC1_FORCE_Data;
+ //============================== DAC2 portion
+ UCHAR ucDAC2_CRT2_BG_Adjustment;
+ UCHAR ucDAC2_CRT2_DAC_Adjustment;
+ USHORT usDAC2_CRT2_FORCE_Data;
+ USHORT usDAC2_CRT2_MUX_RegisterIndex;
+ UCHAR ucDAC2_CRT2_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+ UCHAR ucDAC2_NTSC_BG_Adjustment;
+ UCHAR ucDAC2_NTSC_DAC_Adjustment;
+ USHORT usDAC2_TV1_FORCE_Data;
+ USHORT usDAC2_TV1_MUX_RegisterIndex;
+ UCHAR ucDAC2_TV1_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+ UCHAR ucDAC2_CV_BG_Adjustment;
+ UCHAR ucDAC2_CV_DAC_Adjustment;
+ USHORT usDAC2_CV_FORCE_Data;
+ USHORT usDAC2_CV_MUX_RegisterIndex;
+ UCHAR ucDAC2_CV_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+ UCHAR ucDAC2_PAL_BG_Adjustment;
+ UCHAR ucDAC2_PAL_DAC_Adjustment;
+ USHORT usDAC2_TV2_FORCE_Data;
+}COMPASSIONATE_DATA;
+
+/****************************Supported Device Info Table Definitions**********************/
+// ucConnectInfo:
+// [7:4] - connector type
+// = 1 - VGA connector
+// = 2 - DVI-I
+// = 3 - DVI-D
+// = 4 - DVI-A
+// = 5 - SVIDEO
+// = 6 - COMPOSITE
+// = 7 - LVDS
+// = 8 - DIGITAL LINK
+// = 9 - SCART
+// = 0xA - HDMI_type A
+// = 0xB - HDMI_type B
+// = 0xE - Special case1 (DVI+DIN)
+// Others=TBD
+// [3:0] - DAC Associated
+// = 0 - no DAC
+// = 1 - DACA
+// = 2 - DACB
+// = 3 - External DAC
+// Others=TBD
+//
+
+typedef struct _ATOM_CONNECTOR_INFO
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR bfConnectorType:4;
+ UCHAR bfAssociatedDAC:4;
+#else
+ UCHAR bfAssociatedDAC:4;
+ UCHAR bfConnectorType:4;
+#endif
+}ATOM_CONNECTOR_INFO;
+
+typedef union _ATOM_CONNECTOR_INFO_ACCESS
+{
+ ATOM_CONNECTOR_INFO sbfAccess;
+ UCHAR ucAccess;
+}ATOM_CONNECTOR_INFO_ACCESS;
+
+typedef struct _ATOM_CONNECTOR_INFO_I2C
+{
+ ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+}ATOM_CONNECTOR_INFO_I2C;
+
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
+}ATOM_SUPPORTED_DEVICES_INFO;
+
+#define NO_INT_SRC_MAPPED 0xFF
+
+typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP
+{
+ UCHAR ucIntSrcBitmap;
+}ATOM_CONNECTOR_INC_SRC_BITMAP;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+ ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+}ATOM_SUPPORTED_DEVICES_INFO_2;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
+ ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
+}ATOM_SUPPORTED_DEVICES_INFO_2d1;
+
+#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1
+
+
+
+typedef struct _ATOM_MISC_CONTROL_INFO
+{
+ USHORT usFrequency;
+ UCHAR ucPLL_ChargePump; // PLL charge-pump gain control
+ UCHAR ucPLL_DutyCycle; // PLL duty cycle control
+ UCHAR ucPLL_VCO_Gain; // PLL VCO gain control
+ UCHAR ucPLL_VoltageSwing; // PLL driver voltage swing control
+}ATOM_MISC_CONTROL_INFO;
+
+
+#define ATOM_MAX_MISC_INFO 4
+
+typedef struct _ATOM_TMDS_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMaxFrequency; // in 10Khz
+ ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO];
+}ATOM_TMDS_INFO;
+
+
+typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE
+{
+ UCHAR ucTVStandard; //Same as TV standards defined above,
+ UCHAR ucPadding[1];
+}ATOM_ENCODER_ANALOG_ATTRIBUTE;
+
+typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE
+{
+ UCHAR ucAttribute; //Same as other digital encoder attributes defined above
+ UCHAR ucPadding[1];
+}ATOM_ENCODER_DIGITAL_ATTRIBUTE;
+
+typedef union _ATOM_ENCODER_ATTRIBUTE
+{
+ ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
+ ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
+}ATOM_ENCODER_ATTRIBUTE;
+
+
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock;
+ USHORT usEncoderID;
+ UCHAR ucDeviceType; //Use ATOM_DEVICE_xxx1_Index to indicate device type only.
+ UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+ ATOM_ENCODER_ATTRIBUTE usDevAttr;
+}DVO_ENCODER_CONTROL_PARAMETERS;
+
+typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION
+{
+ DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
+}DVO_ENCODER_CONTROL_PS_ALLOCATION;
+
+
+#define ATOM_XTMDS_ASIC_SI164_ID 1
+#define ATOM_XTMDS_ASIC_SI178_ID 2
+#define ATOM_XTMDS_ASIC_TFP513_ID 3
+#define ATOM_XTMDS_SUPPORTED_SINGLELINK 0x00000001
+#define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002
+#define ATOM_XTMDS_MVPU_FPGA 0x00000004
+
+
+typedef struct _ATOM_XTMDS_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usSingleLinkMaxFrequency;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //Point the ID on which I2C is used to control external chip
+ UCHAR ucXtransimitterID;
+ UCHAR ucSupportedLink; // Bit field, bit0=1, single link supported;bit1=1,dual link supported
+ UCHAR ucSequnceAlterID; // Even with the same external TMDS asic, it's possible that the program seqence alters
+ // due to design. This ID is used to alert driver that the sequence is not "standard"!
+ UCHAR ucMasterAddress; // Address to control Master xTMDS Chip
+ UCHAR ucSlaveAddress; // Address to control Slave xTMDS Chip
+}ATOM_XTMDS_INFO;
+
+typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS
+{
+ UCHAR ucEnable; // ATOM_ENABLE=On or ATOM_DISABLE=Off
+ UCHAR ucDevice; // ATOM_DEVICE_DFP1_INDEX....
+ UCHAR ucPadding[2];
+}DFP_DPMS_STATUS_CHANGE_PARAMETERS;
+
+/****************************Legacy Power Play Table Definitions **********************/
+
+//Definitions for ulPowerPlayMiscInfo
+#define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L
+#define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L
+#define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L
+
+#define ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT 0x00000004L
+#define ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH 0x00000008L
+
+#define ATOM_PM_MISCINFO_LOAD_PERFORMANCE_EN 0x00000010L
+
+#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L
+#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L
+#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L //When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program
+
+#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L
+#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L
+#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L
+#define ATOM_PM_MISCINFO_LOAD_BALANCE_EN 0x00000800L
+#define ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE 0x00001000L
+#define ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE 0x00002000L
+#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L
+
+#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L
+#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L
+#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L
+#define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L
+#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L
+
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L //0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20
+
+#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L
+#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L
+#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L
+#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L //When set, Dynamic
+#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L //When set, Dynamic
+#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L //When set, This mode is for acceleated 3D mode
+
+#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L //1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks)
+#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28
+#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L
+
+#define ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE 0x00000001L
+#define ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT 0x00000002L
+#define ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN 0x00000004L
+#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L
+#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L
+#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L
+#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L //If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption.
+ //If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback
+#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L
+#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L
+#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=1
+typedef struct _ATOM_POWERMODE_INFO
+{
+ ULONG ulMiscInfo; //The power level should be arranged in ascending order
+ ULONG ulReserved1; // must set to 0
+ ULONG ulReserved2; // must set to 0
+ USHORT usEngineClock;
+ USHORT usMemoryClock;
+ UCHAR ucVoltageDropIndex; // index to GPIO table
+ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucNumPciELanes; // number of PCIE lanes
+}ATOM_POWERMODE_INFO;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=1
+typedef struct _ATOM_POWERMODE_INFO_V2
+{
+ ULONG ulMiscInfo; //The power level should be arranged in ascending order
+ ULONG ulMiscInfo2;
+ ULONG ulEngineClock;
+ ULONG ulMemoryClock;
+ UCHAR ucVoltageDropIndex; // index to GPIO table
+ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucNumPciELanes; // number of PCIE lanes
+}ATOM_POWERMODE_INFO_V2;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+typedef struct _ATOM_POWERMODE_INFO_V3
+{
+ ULONG ulMiscInfo; //The power level should be arranged in ascending order
+ ULONG ulMiscInfo2;
+ ULONG ulEngineClock;
+ ULONG ulMemoryClock;
+ UCHAR ucVoltageDropIndex; // index to Core (VDDC) votage table
+ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucNumPciELanes; // number of PCIE lanes
+ UCHAR ucVDDCI_VoltageDropIndex; // index to VDDCI votage table
+}ATOM_POWERMODE_INFO_V3;
+
+
+#define ATOM_MAX_NUMBEROF_POWER_BLOCK 8
+
+#define ATOM_PP_OVERDRIVE_INTBITMAP_AUXWIN 0x01
+#define ATOM_PP_OVERDRIVE_INTBITMAP_OVERDRIVE 0x02
+
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM63 0x01
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1032 0x02
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1030 0x03
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 // Andigilog
+
+
+typedef struct _ATOM_POWERPLAY_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucOverdriveThermalController;
+ UCHAR ucOverdriveI2cLine;
+ UCHAR ucOverdriveIntBitmap;
+ UCHAR ucOverdriveControllerAddress;
+ UCHAR ucSizeOfPowerModeEntry;
+ UCHAR ucNumOfPowerModeEntries;
+ ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO;
+
+typedef struct _ATOM_POWERPLAY_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucOverdriveThermalController;
+ UCHAR ucOverdriveI2cLine;
+ UCHAR ucOverdriveIntBitmap;
+ UCHAR ucOverdriveControllerAddress;
+ UCHAR ucSizeOfPowerModeEntry;
+ UCHAR ucNumOfPowerModeEntries;
+ ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO_V2;
+
+typedef struct _ATOM_POWERPLAY_INFO_V3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucOverdriveThermalController;
+ UCHAR ucOverdriveI2cLine;
+ UCHAR ucOverdriveIntBitmap;
+ UCHAR ucOverdriveControllerAddress;
+ UCHAR ucSizeOfPowerModeEntry;
+ UCHAR ucNumOfPowerModeEntries;
+ ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO_V3;
+
+/* New PPlib */
+/**************************************************************************/
+typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+
+{
+ UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
+ UCHAR ucI2cLine; // as interpreted by DAL I2C
+ UCHAR ucI2cAddress;
+ UCHAR ucFanParameters; // Fan Control Parameters.
+ UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
+ UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
+ UCHAR ucReserved; // ----
+ UCHAR ucFlags; // to be defined
+} ATOM_PPLIB_THERMALCONTROLLER;
+
+#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
+#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
+
+#define ATOM_PP_THERMALCONTROLLER_NONE 0
+#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_LM64 5
+#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
+#define ATOM_PP_THERMALCONTROLLER_RV770 8
+#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
+#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
+#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
+#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16
+#define ATOM_PP_THERMALCONTROLLER_LM96163 17
+
+// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+// We probably should reserve the bit 0x80 for this use.
+// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
+// The driver can pick the correct internal controller based on the ASIC.
+
+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
+#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
+
+typedef struct _ATOM_PPLIB_STATE
+{
+ UCHAR ucNonClockStateIndex;
+ UCHAR ucClockStateIndices[1]; // variable-sized
+} ATOM_PPLIB_STATE;
+
+
+typedef struct _ATOM_PPLIB_FANTABLE
+{
+ UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
+ UCHAR ucTHyst; // Temperature hysteresis. Integer.
+ USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+ USHORT usTMed; // The middle temperature where we change slopes.
+ USHORT usTHigh; // The high point above TMed for adjusting the second slope.
+ USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
+ USHORT usPWMMed; // The PWM value (in percent) at TMed.
+ USHORT usPWMHigh; // The PWM value at THigh.
+} ATOM_PPLIB_FANTABLE;
+
+typedef struct _ATOM_PPLIB_FANTABLE2
+{
+ ATOM_PPLIB_FANTABLE basicTable;
+ USHORT usTMax; // The max temperature
+} ATOM_PPLIB_FANTABLE2;
+
+typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+{
+ USHORT usSize;
+ ULONG ulMaxEngineClock; // For Overdrive.
+ ULONG ulMaxMemoryClock; // For Overdrive.
+ // Add extra system parameters here, always adjust size to include all fields.
+ USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
+ USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table
+} ATOM_PPLIB_EXTENDEDHEADER;
+
+//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
+#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
+#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
+#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
+#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
+#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
+#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
+#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
+#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
+#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
+#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
+#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
+#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
+#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
+
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+
+ UCHAR ucDataRevision;
+
+ UCHAR ucNumStates;
+ UCHAR ucStateEntrySize;
+ UCHAR ucClockInfoSize;
+ UCHAR ucNonClockSize;
+
+ // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
+ USHORT usStateArrayOffset;
+
+ // offset from start of this table to array of ASIC-specific structures,
+ // currently ATOM_PPLIB_CLOCK_INFO.
+ USHORT usClockInfoArrayOffset;
+
+ // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
+ USHORT usNonClockInfoArrayOffset;
+
+ USHORT usBackbiasTime; // in microseconds
+ USHORT usVoltageTime; // in microseconds
+ USHORT usTableSize; //the size of this structure, or the extended structure
+
+ ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
+
+ ATOM_PPLIB_THERMALCONTROLLER sThermalController;
+
+ USHORT usBootClockInfoOffset;
+ USHORT usBootNonClockInfoOffset;
+
+} ATOM_PPLIB_POWERPLAYTABLE;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+{
+ ATOM_PPLIB_POWERPLAYTABLE basicTable;
+ UCHAR ucNumCustomThermalPolicy;
+ USHORT usCustomThermalPolicyArrayOffset;
+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+{
+ ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+ USHORT usFormatID; // To be used ONLY by PPGen.
+ USHORT usFanTableOffset;
+ USHORT usExtendendedHeaderOffset;
+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
+{
+ ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
+ ULONG ulGoldenPPID; // PPGen use only
+ ULONG ulGoldenRevision; // PPGen use only
+ USHORT usVddcDependencyOnSCLKOffset;
+ USHORT usVddciDependencyOnMCLKOffset;
+ USHORT usVddcDependencyOnMCLKOffset;
+ USHORT usMaxClockVoltageOnDCOffset;
+ USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
+ USHORT usReserved;
+} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+{
+ ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
+ ULONG ulTDPLimit;
+ ULONG ulNearTDPLimit;
+ ULONG ulSQRampingThreshold;
+ USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
+ ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table
+ USHORT usTDPODLimit;
+ USHORT usLoadLineSlope; // in milliOhms * 100
+} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
+#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
+#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
+#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
+#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
+#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
+#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
+// 2, 4, 6, 7 are reserved
+
+#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
+#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
+#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
+#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
+#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
+#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
+#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
+#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
+#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
+#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
+#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
+#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
+#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D)
+
+//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
+#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
+#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
+
+// 0 is 2.5Gb/s, 1 is 5Gb/s
+#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
+#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
+
+// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
+
+// lookup into reduced refresh-rate table
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
+
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
+// 2-15 TBD as needed.
+
+#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
+#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
+
+#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
+
+#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
+
+//memory related flags
+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
+
+//M3 Arb //2bits, current 3 sets of parameters in total
+#define ATOM_PPLIB_M3ARB_MASK 0x00060000
+#define ATOM_PPLIB_M3ARB_SHIFT 17
+
+#define ATOM_PPLIB_ENABLE_DRR 0x00080000
+
+// remaining 16 bits are reserved
+typedef struct _ATOM_PPLIB_THERMAL_STATE
+{
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucThermalAction;
+}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
+typedef struct _ATOM_PPLIB_NONCLOCK_INFO
+{
+ USHORT usClassification;
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ ULONG ulCapsAndSettings;
+ UCHAR ucRequiredPower;
+ USHORT usClassification2;
+ ULONG ulVCLK;
+ ULONG ulDCLK;
+ UCHAR ucUnused[5];
+} ATOM_PPLIB_NONCLOCK_INFO;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usUnused1;
+ USHORT usUnused2;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_R600_CLOCK_INFO;
+
+// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
+#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
+#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
+#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
+
+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usVDDCI;
+ USHORT usUnused;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usVDDCI;
+ UCHAR ucPCIEGen;
+ UCHAR ucUnused1;
+
+ ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
+
+} ATOM_PPLIB_SI_CLOCK_INFO;
+
+
+typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
+
+{
+ USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
+ UCHAR ucLowEngineClockHigh;
+ USHORT usHighEngineClockLow; // High Engine clock in MHz.
+ UCHAR ucHighEngineClockHigh;
+ USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
+ UCHAR ucMemoryClockHigh; // Currentyl unused.
+ UCHAR ucPadding; // For proper alignment and size.
+ USHORT usVDDC; // For the 780, use: None, Low, High, Variable
+ UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
+ UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
+ USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
+ ULONG ulFlags;
+} ATOM_PPLIB_RS780_CLOCK_INFO;
+
+#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
+#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
+#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
+#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
+
+#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
+#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
+#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
+
+#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
+#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
+#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
+
+typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
+ USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
+ UCHAR ucEngineClockHigh; //clockfrequency >> 16.
+ UCHAR vddcIndex; //2-bit vddc index;
+ USHORT tdpLimit;
+ //please initalize to 0
+ USHORT rsv1;
+ //please initialize to 0s
+ ULONG rsv2[2];
+}ATOM_PPLIB_SUMO_CLOCK_INFO;
+
+
+
+typedef struct _ATOM_PPLIB_STATE_V2
+{
+ //number of valid dpm levels in this state; Driver uses it to calculate the whole
+ //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+ UCHAR ucNumDPMLevels;
+
+ //a index to the array of nonClockInfos
+ UCHAR nonClockInfoIndex;
+ /**
+ * Driver will read the first ucNumDPMLevels in this array
+ */
+ UCHAR clockInfoIndex[1];
+} ATOM_PPLIB_STATE_V2;
+
+typedef struct _StateArray{
+ //how many states we have
+ UCHAR ucNumEntries;
+
+ ATOM_PPLIB_STATE_V2 states[1];
+}StateArray;
+
+
+typedef struct _ClockInfoArray{
+ //how many clock levels we have
+ UCHAR ucNumEntries;
+
+ //sizeof(ATOM_PPLIB_CLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ UCHAR clockInfo[1];
+}ClockInfoArray;
+
+typedef struct _NonClockInfoArray{
+
+ //how many non-clock levels we have. normally should be same as number of states
+ UCHAR ucNumEntries;
+ //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+}NonClockInfoArray;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+{
+ USHORT usClockLow;
+ UCHAR ucClockHigh;
+ USHORT usVoltage;
+}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
+{
+ USHORT usSclkLow;
+ UCHAR ucSclkHigh;
+ USHORT usMclkLow;
+ UCHAR ucMclkHigh;
+ USHORT usVddc;
+ USHORT usVddci;
+}ATOM_PPLIB_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_CAC_Leakage_Record
+{
+ USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations
+ ULONG ulLeakageValue;
+}ATOM_PPLIB_CAC_Leakage_Record;
+
+typedef struct _ATOM_PPLIB_CAC_Leakage_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_CAC_Leakage_Table;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
+{
+ USHORT usVoltage;
+ USHORT usSclkLow;
+ UCHAR ucSclkHigh;
+ USHORT usMclkLow;
+ UCHAR ucMclkHigh;
+}ATOM_PPLIB_PhaseSheddingLimits_Record;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_PhaseSheddingLimits_Table;
+
+typedef struct _VCEClockInfo{
+ USHORT usEVClkLow;
+ UCHAR ucEVClkHigh;
+ USHORT usECClkLow;
+ UCHAR ucECClkHigh;
+}VCEClockInfo;
+
+typedef struct _VCEClockInfoArray{
+ UCHAR ucNumEntries;
+ VCEClockInfo entries[1];
+}VCEClockInfoArray;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
+{
+ USHORT usVoltage;
+ UCHAR ucVCEClockInfoIndex;
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
+{
+ UCHAR numEntries;
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_VCE_State_Record
+{
+ UCHAR ucVCEClockInfoIndex;
+ UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
+}ATOM_PPLIB_VCE_State_Record;
+
+typedef struct _ATOM_PPLIB_VCE_State_Table
+{
+ UCHAR numEntries;
+ ATOM_PPLIB_VCE_State_Record entries[1];
+}ATOM_PPLIB_VCE_State_Table;
+
+
+typedef struct _ATOM_PPLIB_VCE_Table
+{
+ UCHAR revid;
+// VCEClockInfoArray array;
+// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
+// ATOM_PPLIB_VCE_State_Table states;
+}ATOM_PPLIB_VCE_Table;
+
+
+typedef struct _UVDClockInfo{
+ USHORT usVClkLow;
+ UCHAR ucVClkHigh;
+ USHORT usDClkLow;
+ UCHAR ucDClkHigh;
+}UVDClockInfo;
+
+typedef struct _UVDClockInfoArray{
+ UCHAR ucNumEntries;
+ UVDClockInfo entries[1];
+}UVDClockInfoArray;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
+{
+ USHORT usVoltage;
+ UCHAR ucUVDClockInfoIndex;
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
+{
+ UCHAR numEntries;
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_UVD_State_Record
+{
+ UCHAR ucUVDClockInfoIndex;
+ UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
+}ATOM_PPLIB_UVD_State_Record;
+
+typedef struct _ATOM_PPLIB_UVD_State_Table
+{
+ UCHAR numEntries;
+ ATOM_PPLIB_UVD_State_Record entries[1];
+}ATOM_PPLIB_UVD_State_Table;
+
+
+typedef struct _ATOM_PPLIB_UVD_Table
+{
+ UCHAR revid;
+// UVDClockInfoArray array;
+// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
+// ATOM_PPLIB_UVD_State_Table states;
+}ATOM_PPLIB_UVD_Table;
+
+/**************************************************************************/
+
+
+// Following definitions are for compatibility issue in different SW components.
+#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
+#define Object_Info Object_Header
+#define AdjustARB_SEQ MC_InitParameter
+#define VRAM_GPIO_DetectionInfo VoltageObjectInfo
+#define ASIC_VDDCI_Info ASIC_ProfilingInfo
+#define ASIC_MVDDQ_Info MemoryTrainingInfo
+#define SS_Info PPLL_SS_Info
+#define ASIC_MVDDC_Info ASIC_InternalSS_Info
+#define DispDevicePriorityInfo SaveRestoreInfo
+#define DispOutInfo TV_VideoMode
+
+
+#define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE
+#define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE
+
+//New device naming, remove them when both DAL/VBIOS is ready
+#define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS
+#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS
+
+#define DFP1X_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS
+#define DFP1X_OUTPUT_CONTROL_PS_ALLOCATION DFP1X_OUTPUT_CONTROL_PARAMETERS
+
+#define DFP1I_OUTPUT_CONTROL_PARAMETERS DFP1_OUTPUT_CONTROL_PARAMETERS
+#define DFP1I_OUTPUT_CONTROL_PS_ALLOCATION DFP1_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define ATOM_DEVICE_DFP1I_SUPPORT ATOM_DEVICE_DFP1_SUPPORT
+#define ATOM_DEVICE_DFP1X_SUPPORT ATOM_DEVICE_DFP2_SUPPORT
+
+#define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX
+#define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX
+
+#define ATOM_DEVICE_DFP2I_INDEX 0x00000009
+#define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX)
+
+#define ATOM_S0_DFP1I ATOM_S0_DFP1
+#define ATOM_S0_DFP1X ATOM_S0_DFP2
+
+#define ATOM_S0_DFP2I 0x00200000L
+#define ATOM_S0_DFP2Ib2 0x20
+
+#define ATOM_S2_DFP1I_DPMS_STATE ATOM_S2_DFP1_DPMS_STATE
+#define ATOM_S2_DFP1X_DPMS_STATE ATOM_S2_DFP2_DPMS_STATE
+
+#define ATOM_S2_DFP2I_DPMS_STATE 0x02000000L
+#define ATOM_S2_DFP2I_DPMS_STATEb3 0x02
+
+#define ATOM_S3_DFP2I_ACTIVEb1 0x02
+
+#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE
+#define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE
+
+#define ATOM_S3_DFP2I_ACTIVE 0x00000200L
+
+#define ATOM_S3_DFP1I_CRTC_ACTIVE ATOM_S3_DFP1_CRTC_ACTIVE
+#define ATOM_S3_DFP1X_CRTC_ACTIVE ATOM_S3_DFP2_CRTC_ACTIVE
+#define ATOM_S3_DFP2I_CRTC_ACTIVE 0x02000000L
+
+#define ATOM_S3_DFP2I_CRTC_ACTIVEb3 0x02
+#define ATOM_S5_DOS_REQ_DFP2Ib1 0x02
+
+#define ATOM_S5_DOS_REQ_DFP2I 0x0200
+#define ATOM_S6_ACC_REQ_DFP1I ATOM_S6_ACC_REQ_DFP1
+#define ATOM_S6_ACC_REQ_DFP1X ATOM_S6_ACC_REQ_DFP2
+
+#define ATOM_S6_ACC_REQ_DFP2Ib3 0x02
+#define ATOM_S6_ACC_REQ_DFP2I 0x02000000L
+
+#define TMDS1XEncoderControl DVOEncoderControl
+#define DFP1XOutputControl DVOOutputControl
+
+#define ExternalDFPOutputControl DFP1XOutputControl
+#define EnableExternalTMDS_Encoder TMDS1XEncoderControl
+
+#define DFP1IOutputControl TMDSAOutputControl
+#define DFP2IOutputControl LVTMAOutputControl
+
+#define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS
+#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
+
+#define DAC2_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS
+#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
+
+#define ucDac1Standard ucDacStandard
+#define ucDac2Standard ucDacStandard
+
+#define TMDS1EncoderControl TMDSAEncoderControl
+#define TMDS2EncoderControl LVTMAEncoderControl
+
+#define DFP1OutputControl TMDSAOutputControl
+#define DFP2OutputControl LVTMAOutputControl
+#define CRT1OutputControl DAC1OutputControl
+#define CRT2OutputControl DAC2OutputControl
+
+//These two lines will be removed for sure in a few days, will follow up with Michael V.
+#define EnableLVDS_SS EnableSpreadSpectrumOnPPLL
+#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL
+
+//#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
+//#define ATOM_S2_LCD1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_TV1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_DFP1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_CRT2_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
+
+#define ATOM_S6_ACC_REQ_TV2 0x00400000L
+#define ATOM_DEVICE_TV2_INDEX 0x00000006
+#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX)
+#define ATOM_S0_TV2 0x00100000L
+#define ATOM_S3_TV2_ACTIVE ATOM_S3_DFP6_ACTIVE
+#define ATOM_S3_TV2_CRTC_ACTIVE ATOM_S3_DFP6_CRTC_ACTIVE
+
+//
+#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
+#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L
+#define ATOM_S2_TV1_DPMS_STATE 0x00040000L
+#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L
+#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L
+#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L
+#define ATOM_S2_TV2_DPMS_STATE 0x00400000L
+#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L
+#define ATOM_S2_CV_DPMS_STATE 0x01000000L
+#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L
+#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L
+#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L
+
+#define ATOM_S2_CRT1_DPMS_STATEb2 0x01
+#define ATOM_S2_LCD1_DPMS_STATEb2 0x02
+#define ATOM_S2_TV1_DPMS_STATEb2 0x04
+#define ATOM_S2_DFP1_DPMS_STATEb2 0x08
+#define ATOM_S2_CRT2_DPMS_STATEb2 0x10
+#define ATOM_S2_LCD2_DPMS_STATEb2 0x20
+#define ATOM_S2_TV2_DPMS_STATEb2 0x40
+#define ATOM_S2_DFP2_DPMS_STATEb2 0x80
+#define ATOM_S2_CV_DPMS_STATEb3 0x01
+#define ATOM_S2_DFP3_DPMS_STATEb3 0x02
+#define ATOM_S2_DFP4_DPMS_STATEb3 0x04
+#define ATOM_S2_DFP5_DPMS_STATEb3 0x08
+
+#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20
+#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
+#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80
+
+/*********************************************************************************/
+
+#pragma pack() // BIOS data must use byte aligment
+
+//
+// AMD ACPI Table
+//
+#pragma pack(1)
+
+typedef struct {
+ ULONG Signature;
+ ULONG TableLength; //Length
+ UCHAR Revision;
+ UCHAR Checksum;
+ UCHAR OemId[6];
+ UCHAR OemTableId[8]; //UINT64 OemTableId;
+ ULONG OemRevision;
+ ULONG CreatorId;
+ ULONG CreatorRevision;
+} AMD_ACPI_DESCRIPTION_HEADER;
+/*
+//EFI_ACPI_DESCRIPTION_HEADER from AcpiCommon.h
+typedef struct {
+ UINT32 Signature; //0x0
+ UINT32 Length; //0x4
+ UINT8 Revision; //0x8
+ UINT8 Checksum; //0x9
+ UINT8 OemId[6]; //0xA
+ UINT64 OemTableId; //0x10
+ UINT32 OemRevision; //0x18
+ UINT32 CreatorId; //0x1C
+ UINT32 CreatorRevision; //0x20
+}EFI_ACPI_DESCRIPTION_HEADER;
+*/
+typedef struct {
+ AMD_ACPI_DESCRIPTION_HEADER SHeader;
+ UCHAR TableUUID[16]; //0x24
+ ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture.
+ ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture.
+ ULONG Reserved[4]; //0x3C
+}UEFI_ACPI_VFCT;
+
+typedef struct {
+ ULONG PCIBus; //0x4C
+ ULONG PCIDevice; //0x50
+ ULONG PCIFunction; //0x54
+ USHORT VendorID; //0x58
+ USHORT DeviceID; //0x5A
+ USHORT SSVID; //0x5C
+ USHORT SSID; //0x5E
+ ULONG Revision; //0x60
+ ULONG ImageLength; //0x64
+}VFCT_IMAGE_HEADER;
+
+
+typedef struct {
+ VFCT_IMAGE_HEADER VbiosHeader;
+ UCHAR VbiosContent[1];
+}GOP_VBIOS_CONTENT;
+
+typedef struct {
+ VFCT_IMAGE_HEADER Lib1Header;
+ UCHAR Lib1Content[1];
+}GOP_LIB1_CONTENT;
+
+#pragma pack()
+
+
+#endif /* _ATOMBIOS_H */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
new file mode 100644
index 0000000..d5df8fd
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -0,0 +1,1929 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include <drm/drm_fixed.h>
+#include "radeon.h"
+#include "atom.h"
+#include "atom-bits.h"
+
+static void atombios_overscan_setup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ SET_CRTC_OVERSCAN_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
+ int a1, a2;
+
+ memset(&args, 0, sizeof(args));
+
+ args.ucCRTC = radeon_crtc->crtc_id;
+
+ switch (radeon_crtc->rmx_type) {
+ case RMX_CENTER:
+ args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+ args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+ args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
+ args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
+ break;
+ case RMX_ASPECT:
+ a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
+ a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
+
+ if (a1 > a2) {
+ args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
+ args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
+ } else if (a2 > a1) {
+ args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+ args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+ }
+ break;
+ case RMX_FULL:
+ default:
+ args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border);
+ args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border);
+ args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border);
+ args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
+ break;
+ }
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_scaler_setup(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ ENABLE_SCALER_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
+ struct radeon_encoder *radeon_encoder =
+ to_radeon_encoder(radeon_crtc->encoder);
+ /* fixme - fill in enc_priv for atom dac */
+ enum radeon_tv_std tv_std = TV_STD_NTSC;
+ bool is_tv = false, is_cv = false;
+
+ if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
+ return;
+
+ if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
+ struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
+ tv_std = tv_dac->tv_std;
+ is_tv = true;
+ }
+
+ memset(&args, 0, sizeof(args));
+
+ args.ucScaler = radeon_crtc->crtc_id;
+
+ if (is_tv) {
+ switch (tv_std) {
+ case TV_STD_NTSC:
+ default:
+ args.ucTVStandard = ATOM_TV_NTSC;
+ break;
+ case TV_STD_PAL:
+ args.ucTVStandard = ATOM_TV_PAL;
+ break;
+ case TV_STD_PAL_M:
+ args.ucTVStandard = ATOM_TV_PALM;
+ break;
+ case TV_STD_PAL_60:
+ args.ucTVStandard = ATOM_TV_PAL60;
+ break;
+ case TV_STD_NTSC_J:
+ args.ucTVStandard = ATOM_TV_NTSCJ;
+ break;
+ case TV_STD_SCART_PAL:
+ args.ucTVStandard = ATOM_TV_PAL; /* ??? */
+ break;
+ case TV_STD_SECAM:
+ args.ucTVStandard = ATOM_TV_SECAM;
+ break;
+ case TV_STD_PAL_CN:
+ args.ucTVStandard = ATOM_TV_PALCN;
+ break;
+ }
+ args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
+ } else if (is_cv) {
+ args.ucTVStandard = ATOM_TV_CV;
+ args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
+ } else {
+ switch (radeon_crtc->rmx_type) {
+ case RMX_FULL:
+ args.ucEnable = ATOM_SCALER_EXPANSION;
+ break;
+ case RMX_CENTER:
+ args.ucEnable = ATOM_SCALER_CENTER;
+ break;
+ case RMX_ASPECT:
+ args.ucEnable = ATOM_SCALER_EXPANSION;
+ break;
+ default:
+ if (ASIC_IS_AVIVO(rdev))
+ args.ucEnable = ATOM_SCALER_DISABLE;
+ else
+ args.ucEnable = ATOM_SCALER_CENTER;
+ break;
+ }
+ }
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ if ((is_tv || is_cv)
+ && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_R580) {
+ atom_rv515_force_tv_scaler(rdev, radeon_crtc);
+ }
+}
+
+static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int index =
+ GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
+ ENABLE_CRTC_PS_ALLOCATION args;
+
+ memset(&args, 0, sizeof(args));
+
+ args.ucCRTC = radeon_crtc->crtc_id;
+ args.ucEnable = lock;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
+ ENABLE_CRTC_PS_ALLOCATION args;
+
+ memset(&args, 0, sizeof(args));
+
+ args.ucCRTC = radeon_crtc->crtc_id;
+ args.ucEnable = state;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int index = GetIndexIntoMasterTable(COMMAND, EnableCRTCMemReq);
+ ENABLE_CRTC_PS_ALLOCATION args;
+
+ memset(&args, 0, sizeof(args));
+
+ args.ucCRTC = radeon_crtc->crtc_id;
+ args.ucEnable = state;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
+ BLANK_CRTC_PS_ALLOCATION args;
+
+ memset(&args, 0, sizeof(args));
+
+ args.ucCRTC = radeon_crtc->crtc_id;
+ args.ucBlanking = state;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
+ ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+
+ memset(&args, 0, sizeof(args));
+
+ args.ucDispPipeId = radeon_crtc->crtc_id;
+ args.ucEnable = state;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ radeon_crtc->enabled = true;
+ /* adjust pm to dpms changes BEFORE enabling crtcs */
+ radeon_pm_compute_clocks(rdev);
+ atombios_enable_crtc(crtc, ATOM_ENABLE);
+ if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
+ atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
+ atombios_blank_crtc(crtc, ATOM_DISABLE);
+ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ radeon_crtc_load_lut(crtc);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+ if (radeon_crtc->enabled)
+ atombios_blank_crtc(crtc, ATOM_ENABLE);
+ if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
+ atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
+ atombios_enable_crtc(crtc, ATOM_DISABLE);
+ radeon_crtc->enabled = false;
+ /* adjust pm to dpms changes AFTER disabling crtcs */
+ radeon_pm_compute_clocks(rdev);
+ break;
+ }
+}
+
+static void
+atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ SET_CRTC_USING_DTD_TIMING_PARAMETERS args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
+ u16 misc = 0;
+
+ memset(&args, 0, sizeof(args));
+ args.usH_Size = cpu_to_le16(mode->crtc_hdisplay - (radeon_crtc->h_border * 2));
+ args.usH_Blanking_Time =
+ cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay + (radeon_crtc->h_border * 2));
+ args.usV_Size = cpu_to_le16(mode->crtc_vdisplay - (radeon_crtc->v_border * 2));
+ args.usV_Blanking_Time =
+ cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay + (radeon_crtc->v_border * 2));
+ args.usH_SyncOffset =
+ cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay + radeon_crtc->h_border);
+ args.usH_SyncWidth =
+ cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
+ args.usV_SyncOffset =
+ cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay + radeon_crtc->v_border);
+ args.usV_SyncWidth =
+ cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
+ args.ucH_Border = radeon_crtc->h_border;
+ args.ucV_Border = radeon_crtc->v_border;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ misc |= ATOM_VSYNC_POLARITY;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ misc |= ATOM_HSYNC_POLARITY;
+ if (mode->flags & DRM_MODE_FLAG_CSYNC)
+ misc |= ATOM_COMPOSITESYNC;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ misc |= ATOM_INTERLACE;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ misc |= ATOM_DOUBLE_CLOCK_MODE;
+
+ args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ args.ucCRTC = radeon_crtc->crtc_id;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_crtc_set_timing(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing);
+ u16 misc = 0;
+
+ memset(&args, 0, sizeof(args));
+ args.usH_Total = cpu_to_le16(mode->crtc_htotal);
+ args.usH_Disp = cpu_to_le16(mode->crtc_hdisplay);
+ args.usH_SyncStart = cpu_to_le16(mode->crtc_hsync_start);
+ args.usH_SyncWidth =
+ cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
+ args.usV_Total = cpu_to_le16(mode->crtc_vtotal);
+ args.usV_Disp = cpu_to_le16(mode->crtc_vdisplay);
+ args.usV_SyncStart = cpu_to_le16(mode->crtc_vsync_start);
+ args.usV_SyncWidth =
+ cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
+
+ args.ucOverscanRight = radeon_crtc->h_border;
+ args.ucOverscanLeft = radeon_crtc->h_border;
+ args.ucOverscanBottom = radeon_crtc->v_border;
+ args.ucOverscanTop = radeon_crtc->v_border;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ misc |= ATOM_VSYNC_POLARITY;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ misc |= ATOM_HSYNC_POLARITY;
+ if (mode->flags & DRM_MODE_FLAG_CSYNC)
+ misc |= ATOM_COMPOSITESYNC;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ misc |= ATOM_INTERLACE;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ misc |= ATOM_DOUBLE_CLOCK_MODE;
+
+ args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ args.ucCRTC = radeon_crtc->crtc_id;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_disable_ss(struct radeon_device *rdev, int pll_id)
+{
+ u32 ss_cntl;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ switch (pll_id) {
+ case ATOM_PPLL1:
+ ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
+ ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
+ WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl);
+ break;
+ case ATOM_PPLL2:
+ ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL);
+ ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
+ WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl);
+ break;
+ case ATOM_DCPLL:
+ case ATOM_PPLL_INVALID:
+ return;
+ }
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ switch (pll_id) {
+ case ATOM_PPLL1:
+ ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
+ ss_cntl &= ~1;
+ WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl);
+ break;
+ case ATOM_PPLL2:
+ ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
+ ss_cntl &= ~1;
+ WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl);
+ break;
+ case ATOM_DCPLL:
+ case ATOM_PPLL_INVALID:
+ return;
+ }
+ }
+}
+
+
+union atom_enable_ss {
+ ENABLE_LVDS_SS_PARAMETERS lvds_ss;
+ ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
+};
+
+static void atombios_crtc_program_ss(struct radeon_device *rdev,
+ int enable,
+ int pll_id,
+ int crtc_id,
+ struct radeon_atom_ss *ss)
+{
+ unsigned i;
+ int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
+ union atom_enable_ss args;
+
+ if (!enable) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->mode_info.crtcs[i] &&
+ rdev->mode_info.crtcs[i]->enabled &&
+ i != crtc_id &&
+ pll_id == rdev->mode_info.crtcs[i]->pll_id) {
+ /* one other crtc is using this pll don't turn
+ * off spread spectrum as it might turn off
+ * display on active crtc
+ */
+ return;
+ }
+ }
+ }
+
+ memset(&args, 0, sizeof(args));
+
+ if (ASIC_IS_DCE5(rdev)) {
+ args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
+ args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+ switch (pll_id) {
+ case ATOM_PPLL1:
+ args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
+ break;
+ case ATOM_PPLL2:
+ args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
+ break;
+ case ATOM_DCPLL:
+ args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
+ break;
+ case ATOM_PPLL_INVALID:
+ return;
+ }
+ args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+ args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
+ args.v3.ucEnable = enable;
+ if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev))
+ args.v3.ucEnable = ATOM_DISABLE;
+ } else if (ASIC_IS_DCE4(rdev)) {
+ args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+ args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+ switch (pll_id) {
+ case ATOM_PPLL1:
+ args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
+ break;
+ case ATOM_PPLL2:
+ args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
+ break;
+ case ATOM_DCPLL:
+ args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
+ break;
+ case ATOM_PPLL_INVALID:
+ return;
+ }
+ args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+ args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
+ args.v2.ucEnable = enable;
+ if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
+ args.v2.ucEnable = ATOM_DISABLE;
+ } else if (ASIC_IS_DCE3(rdev)) {
+ args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+ args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+ args.v1.ucSpreadSpectrumStep = ss->step;
+ args.v1.ucSpreadSpectrumDelay = ss->delay;
+ args.v1.ucSpreadSpectrumRange = ss->range;
+ args.v1.ucPpll = pll_id;
+ args.v1.ucEnable = enable;
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
+ (ss->type & ATOM_EXTERNAL_SS_MASK)) {
+ atombios_disable_ss(rdev, pll_id);
+ return;
+ }
+ args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+ args.lvds_ss_2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+ args.lvds_ss_2.ucSpreadSpectrumStep = ss->step;
+ args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay;
+ args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
+ args.lvds_ss_2.ucEnable = enable;
+ } else {
+ if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
+ (ss->type & ATOM_EXTERNAL_SS_MASK)) {
+ atombios_disable_ss(rdev, pll_id);
+ return;
+ }
+ args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+ args.lvds_ss.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+ args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2;
+ args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4;
+ args.lvds_ss.ucEnable = enable;
+ }
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union adjust_pixel_clock {
+ ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
+ ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3;
+};
+
+static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder = radeon_crtc->encoder;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ u32 adjusted_clock = mode->clock;
+ int encoder_mode = atombios_get_encoder_mode(encoder);
+ u32 dp_clock = mode->clock;
+ int bpc = radeon_get_monitor_bpc(connector);
+ bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
+
+ /* reset the pll flags */
+ radeon_crtc->pll_flags = 0;
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ if ((rdev->family == CHIP_RS600) ||
+ (rdev->family == CHIP_RS690) ||
+ (rdev->family == CHIP_RS740))
+ radeon_crtc->pll_flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
+ RADEON_PLL_PREFER_CLOSEST_LOWER);
+
+ if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ else
+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+ if (rdev->family < CHIP_RV770)
+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
+ /* use frac fb div on APUs */
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ /* use frac fb div on RS780/RS880 */
+ if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ } else {
+ radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
+
+ if (mode->clock > 200000) /* range limits??? */
+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ else
+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ }
+
+ if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+ (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ }
+ }
+
+ /* use recommended ref_div for ss */
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (radeon_crtc->ss_enabled) {
+ if (radeon_crtc->ss.refdiv) {
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+ radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
+ if (ASIC_IS_AVIVO(rdev))
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ }
+ }
+ }
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
+ adjusted_clock = mode->clock * 2;
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ radeon_crtc->pll_flags |= RADEON_PLL_IS_LCD;
+ } else {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
+ radeon_crtc->pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
+ if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+ }
+
+ /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
+ * accordingly based on the encoder/transmitter to work around
+ * special hw requirements.
+ */
+ if (ASIC_IS_DCE3(rdev)) {
+ union adjust_pixel_clock args;
+ u8 frev, crev;
+ int index;
+
+ index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+ &crev))
+ return adjusted_clock;
+
+ memset(&args, 0, sizeof(args));
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 1:
+ case 2:
+ args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v1.ucTransmitterID = radeon_encoder->encoder_id;
+ args.v1.ucEncodeMode = encoder_mode;
+ if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
+ args.v1.ucConfig |=
+ ADJUST_DISPLAY_CONFIG_SS_ENABLE;
+
+ atom_execute_table(rdev->mode_info.atom_context,
+ index, (uint32_t *)&args);
+ adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
+ break;
+ case 3:
+ args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
+ args.v3.sInput.ucEncodeMode = encoder_mode;
+ args.v3.sInput.ucDispPllConfig = 0;
+ if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;
+ if (ENCODER_MODE_IS_DP(encoder_mode)) {
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_COHERENT_MODE;
+ /* 16200 or 27000 */
+ args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI)
+ /* deep color support */
+ args.v3.sInput.usPixelClock =
+ cpu_to_le16((mode->clock * bpc / 8) / 10);
+ if (dig->coherent_mode)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_COHERENT_MODE;
+ if (is_duallink)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_DUAL_LINK;
+ }
+ if (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
+ ENCODER_OBJECT_ID_NONE)
+ args.v3.sInput.ucExtTransmitterID =
+ radeon_encoder_get_dp_bridge_encoder_id(encoder);
+ else
+ args.v3.sInput.ucExtTransmitterID = 0;
+
+ atom_execute_table(rdev->mode_info.atom_context,
+ index, (uint32_t *)&args);
+ adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
+ if (args.v3.sOutput.ucRefDiv) {
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+ radeon_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv;
+ }
+ if (args.v3.sOutput.ucPostDiv) {
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_POST_DIV;
+ radeon_crtc->pll_post_div = args.v3.sOutput.ucPostDiv;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return adjusted_clock;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return adjusted_clock;
+ }
+ }
+ return adjusted_clock;
+}
+
+union set_pixel_clock {
+ SET_PIXEL_CLOCK_PS_ALLOCATION base;
+ PIXEL_CLOCK_PARAMETERS v1;
+ PIXEL_CLOCK_PARAMETERS_V2 v2;
+ PIXEL_CLOCK_PARAMETERS_V3 v3;
+ PIXEL_CLOCK_PARAMETERS_V5 v5;
+ PIXEL_CLOCK_PARAMETERS_V6 v6;
+};
+
+/* on DCE5, make sure the voltage is high enough to support the
+ * required disp clk.
+ */
+static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev,
+ u32 dispclk)
+{
+ u8 frev, crev;
+ int index;
+ union set_pixel_clock args;
+
+ memset(&args, 0, sizeof(args));
+
+ index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+ &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 5:
+ /* if the default dcpll clock is specified,
+ * SetPixelClock provides the dividers
+ */
+ args.v5.ucCRTC = ATOM_CRTC_INVALID;
+ args.v5.usPixelClock = cpu_to_le16(dispclk);
+ args.v5.ucPpll = ATOM_DCPLL;
+ break;
+ case 6:
+ /* if the default dcpll clock is specified,
+ * SetPixelClock provides the dividers
+ */
+ args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
+ if (ASIC_IS_DCE61(rdev))
+ args.v6.ucPpll = ATOM_EXT_PLL1;
+ else if (ASIC_IS_DCE6(rdev))
+ args.v6.ucPpll = ATOM_PPLL0;
+ else
+ args.v6.ucPpll = ATOM_DCPLL;
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return;
+ }
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_crtc_program_pll(struct drm_crtc *crtc,
+ u32 crtc_id,
+ int pll_id,
+ u32 encoder_mode,
+ u32 encoder_id,
+ u32 clock,
+ u32 ref_div,
+ u32 fb_div,
+ u32 frac_fb_div,
+ u32 post_div,
+ int bpc,
+ bool ss_enabled,
+ struct radeon_atom_ss *ss)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ u8 frev, crev;
+ int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+ union set_pixel_clock args;
+
+ memset(&args, 0, sizeof(args));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+ &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 1:
+ if (clock == ATOM_DISABLE)
+ return;
+ args.v1.usPixelClock = cpu_to_le16(clock / 10);
+ args.v1.usRefDiv = cpu_to_le16(ref_div);
+ args.v1.usFbDiv = cpu_to_le16(fb_div);
+ args.v1.ucFracFbDiv = frac_fb_div;
+ args.v1.ucPostDiv = post_div;
+ args.v1.ucPpll = pll_id;
+ args.v1.ucCRTC = crtc_id;
+ args.v1.ucRefDivSrc = 1;
+ break;
+ case 2:
+ args.v2.usPixelClock = cpu_to_le16(clock / 10);
+ args.v2.usRefDiv = cpu_to_le16(ref_div);
+ args.v2.usFbDiv = cpu_to_le16(fb_div);
+ args.v2.ucFracFbDiv = frac_fb_div;
+ args.v2.ucPostDiv = post_div;
+ args.v2.ucPpll = pll_id;
+ args.v2.ucCRTC = crtc_id;
+ args.v2.ucRefDivSrc = 1;
+ break;
+ case 3:
+ args.v3.usPixelClock = cpu_to_le16(clock / 10);
+ args.v3.usRefDiv = cpu_to_le16(ref_div);
+ args.v3.usFbDiv = cpu_to_le16(fb_div);
+ args.v3.ucFracFbDiv = frac_fb_div;
+ args.v3.ucPostDiv = post_div;
+ args.v3.ucPpll = pll_id;
+ if (crtc_id == ATOM_CRTC2)
+ args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
+ else
+ args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1;
+ if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+ args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
+ args.v3.ucTransmitterId = encoder_id;
+ args.v3.ucEncoderMode = encoder_mode;
+ break;
+ case 5:
+ args.v5.ucCRTC = crtc_id;
+ args.v5.usPixelClock = cpu_to_le16(clock / 10);
+ args.v5.ucRefDiv = ref_div;
+ args.v5.usFbDiv = cpu_to_le16(fb_div);
+ args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+ args.v5.ucPostDiv = post_div;
+ args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
+ if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
+ switch (bpc) {
+ case 8:
+ default:
+ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
+ break;
+ case 10:
+ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
+ break;
+ }
+ args.v5.ucTransmitterID = encoder_id;
+ args.v5.ucEncoderMode = encoder_mode;
+ args.v5.ucPpll = pll_id;
+ break;
+ case 6:
+ args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10);
+ args.v6.ucRefDiv = ref_div;
+ args.v6.usFbDiv = cpu_to_le16(fb_div);
+ args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+ args.v6.ucPostDiv = post_div;
+ args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
+ if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
+ switch (bpc) {
+ case 8:
+ default:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
+ break;
+ case 10:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
+ break;
+ case 12:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
+ break;
+ case 16:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
+ break;
+ }
+ args.v6.ucTransmitterID = encoder_id;
+ args.v6.ucEncoderMode = encoder_mode;
+ args.v6.ucPpll = pll_id;
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder =
+ to_radeon_encoder(radeon_crtc->encoder);
+ int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
+
+ radeon_crtc->bpc = 8;
+ radeon_crtc->ss_enabled = false;
+
+ if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+ (radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector =
+ radeon_get_connector_for_encoder(radeon_crtc->encoder);
+ struct radeon_connector *radeon_connector =
+ to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+ int dp_clock;
+ radeon_crtc->bpc = radeon_get_monitor_bpc(connector);
+
+ switch (encoder_mode) {
+ case ATOM_ENCODER_MODE_DP_MST:
+ case ATOM_ENCODER_MODE_DP:
+ /* DP/eDP */
+ dp_clock = dig_connector->dp_clock / 10;
+ if (ASIC_IS_DCE4(rdev))
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss,
+ ASIC_INTERNAL_SS_ON_DP,
+ dp_clock);
+ else {
+ if (dp_clock == 16200) {
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev,
+ &radeon_crtc->ss,
+ ATOM_DP_SS_ID2);
+ if (!radeon_crtc->ss_enabled)
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev,
+ &radeon_crtc->ss,
+ ATOM_DP_SS_ID1);
+ } else
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev,
+ &radeon_crtc->ss,
+ ATOM_DP_SS_ID1);
+ }
+ break;
+ case ATOM_ENCODER_MODE_LVDS:
+ if (ASIC_IS_DCE4(rdev))
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev,
+ &radeon_crtc->ss,
+ dig->lcd_ss_id,
+ mode->clock / 10);
+ else
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev,
+ &radeon_crtc->ss,
+ dig->lcd_ss_id);
+ break;
+ case ATOM_ENCODER_MODE_DVI:
+ if (ASIC_IS_DCE4(rdev))
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev,
+ &radeon_crtc->ss,
+ ASIC_INTERNAL_SS_ON_TMDS,
+ mode->clock / 10);
+ break;
+ case ATOM_ENCODER_MODE_HDMI:
+ if (ASIC_IS_DCE4(rdev))
+ radeon_crtc->ss_enabled =
+ radeon_atombios_get_asic_ss_info(rdev,
+ &radeon_crtc->ss,
+ ASIC_INTERNAL_SS_ON_HDMI,
+ mode->clock / 10);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* adjust pixel clock as needed */
+ radeon_crtc->adjusted_clock = atombios_adjust_pll(crtc, mode);
+
+ return true;
+}
+
+static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder =
+ to_radeon_encoder(radeon_crtc->encoder);
+ u32 pll_clock = mode->clock;
+ u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
+ struct radeon_pll *pll;
+ int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
+
+ switch (radeon_crtc->pll_id) {
+ case ATOM_PPLL1:
+ pll = &rdev->clock.p1pll;
+ break;
+ case ATOM_PPLL2:
+ pll = &rdev->clock.p2pll;
+ break;
+ case ATOM_DCPLL:
+ case ATOM_PPLL_INVALID:
+ default:
+ pll = &rdev->clock.dcpll;
+ break;
+ }
+
+ /* update pll params */
+ pll->flags = radeon_crtc->pll_flags;
+ pll->reference_div = radeon_crtc->pll_reference_div;
+ pll->post_div = radeon_crtc->pll_post_div;
+
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ /* TV seems to prefer the legacy algo on some boards */
+ radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
+ &fb_div, &frac_fb_div, &ref_div, &post_div);
+ else if (ASIC_IS_AVIVO(rdev))
+ radeon_compute_pll_avivo(pll, radeon_crtc->adjusted_clock, &pll_clock,
+ &fb_div, &frac_fb_div, &ref_div, &post_div);
+ else
+ radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
+ &fb_div, &frac_fb_div, &ref_div, &post_div);
+
+ atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id,
+ radeon_crtc->crtc_id, &radeon_crtc->ss);
+
+ atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+ encoder_mode, radeon_encoder->encoder_id, mode->clock,
+ ref_div, fb_div, frac_fb_div, post_div,
+ radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss);
+
+ if (radeon_crtc->ss_enabled) {
+ /* calculate ss amount and step size */
+ if (ASIC_IS_DCE4(rdev)) {
+ u32 step_size;
+ u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000;
+ radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
+ radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
+ ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
+ if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
+ step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
+ (125 * 25 * pll->reference_freq / 100);
+ else
+ step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
+ (125 * 25 * pll->reference_freq / 100);
+ radeon_crtc->ss.step = step_size;
+ }
+
+ atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id,
+ radeon_crtc->crtc_id, &radeon_crtc->ss);
+ }
+}
+
+static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_framebuffer *radeon_fb;
+ struct drm_framebuffer *target_fb;
+ struct drm_gem_object *obj;
+ struct radeon_bo *rbo;
+ uint64_t fb_location;
+ uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+ unsigned bankw, bankh, mtaspect, tile_split;
+ u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
+ u32 tmp, viewport_w, viewport_h;
+ int r;
+
+ /* no fb bound */
+ if (!atomic && !crtc->fb) {
+ DRM_DEBUG_KMS("No FB bound\n");
+ return 0;
+ }
+
+ if (atomic) {
+ radeon_fb = to_radeon_framebuffer(fb);
+ target_fb = fb;
+ }
+ else {
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+ target_fb = crtc->fb;
+ }
+
+ /* If atomic, assume fb object is pinned & idle & fenced and
+ * just update base pointers
+ */
+ obj = radeon_fb->obj;
+ rbo = gem_to_radeon_bo(obj);
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+
+ if (atomic)
+ fb_location = radeon_bo_gpu_offset(rbo);
+ else {
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ return -EINVAL;
+ }
+ }
+
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
+
+ switch (target_fb->bits_per_pixel) {
+ case 8:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
+ break;
+ case 15:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+ break;
+ case 16:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+ break;
+ case 24:
+ case 32:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+ break;
+ default:
+ DRM_ERROR("Unsupported screen depth %d\n",
+ target_fb->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ if (tiling_flags & RADEON_TILING_MACRO) {
+ if (rdev->family >= CHIP_TAHITI)
+ tmp = rdev->config.si.tile_config;
+ else if (rdev->family >= CHIP_CAYMAN)
+ tmp = rdev->config.cayman.tile_config;
+ else
+ tmp = rdev->config.evergreen.tile_config;
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0: /* 4 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+ break;
+ case 1: /* 8 banks */
+ default:
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+ break;
+ case 2: /* 16 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+ break;
+ }
+
+ fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
+
+ evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
+ fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
+ fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
+ fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
+ } else if (tiling_flags & RADEON_TILING_MICRO)
+ fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+
+ if ((rdev->family == CHIP_TAHITI) ||
+ (rdev->family == CHIP_PITCAIRN))
+ fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
+ else if (rdev->family == CHIP_VERDE)
+ fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
+
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ WREG32(AVIVO_D1VGA_CONTROL, 0);
+ break;
+ case 1:
+ WREG32(AVIVO_D2VGA_CONTROL, 0);
+ break;
+ case 2:
+ WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+ break;
+ case 3:
+ WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+ break;
+ case 4:
+ WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+ break;
+ case 5:
+ WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+ break;
+ default:
+ break;
+ }
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(fb_location));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(fb_location));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+ WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+ WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
+
+ WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
+ WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
+
+ fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
+ WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+
+ WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+ target_fb->height);
+ x &= ~3;
+ y &= ~1;
+ WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
+ (x << 16) | y);
+ viewport_w = crtc->mode.hdisplay;
+ viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+ WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
+ (viewport_w << 16) | viewport_h);
+
+ /* pageflip setup */
+ /* make sure flip is at vb rather than hb */
+ tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+ WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
+ if (!atomic && fb && fb != crtc->fb) {
+ radeon_fb = to_radeon_framebuffer(fb);
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
+ }
+
+ /* Bytes per pixel may have changed */
+ radeon_bandwidth_update(rdev);
+
+ return 0;
+}
+
+static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_framebuffer *radeon_fb;
+ struct drm_gem_object *obj;
+ struct radeon_bo *rbo;
+ struct drm_framebuffer *target_fb;
+ uint64_t fb_location;
+ uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+ u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
+ u32 tmp, viewport_w, viewport_h;
+ int r;
+
+ /* no fb bound */
+ if (!atomic && !crtc->fb) {
+ DRM_DEBUG_KMS("No FB bound\n");
+ return 0;
+ }
+
+ if (atomic) {
+ radeon_fb = to_radeon_framebuffer(fb);
+ target_fb = fb;
+ }
+ else {
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+ target_fb = crtc->fb;
+ }
+
+ obj = radeon_fb->obj;
+ rbo = gem_to_radeon_bo(obj);
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+
+ /* If atomic, assume fb object is pinned & idle & fenced and
+ * just update base pointers
+ */
+ if (atomic)
+ fb_location = radeon_bo_gpu_offset(rbo);
+ else {
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ return -EINVAL;
+ }
+ }
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
+
+ switch (target_fb->bits_per_pixel) {
+ case 8:
+ fb_format =
+ AVIVO_D1GRPH_CONTROL_DEPTH_8BPP |
+ AVIVO_D1GRPH_CONTROL_8BPP_INDEXED;
+ break;
+ case 15:
+ fb_format =
+ AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
+ AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
+ break;
+ case 16:
+ fb_format =
+ AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
+ AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
+#ifdef __BIG_ENDIAN
+ fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
+ break;
+ case 24:
+ case 32:
+ fb_format =
+ AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
+ AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
+#ifdef __BIG_ENDIAN
+ fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
+#endif
+ break;
+ default:
+ DRM_ERROR("Unsupported screen depth %d\n",
+ target_fb->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ if (rdev->family >= CHIP_R600) {
+ if (tiling_flags & RADEON_TILING_MACRO)
+ fb_format |= R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1;
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ fb_format |= R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1;
+ } else {
+ if (tiling_flags & RADEON_TILING_MACRO)
+ fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
+
+ if (tiling_flags & RADEON_TILING_MICRO)
+ fb_format |= AVIVO_D1GRPH_TILED;
+ }
+
+ if (radeon_crtc->crtc_id == 0)
+ WREG32(AVIVO_D1VGA_CONTROL, 0);
+ else
+ WREG32(AVIVO_D2VGA_CONTROL, 0);
+
+ if (rdev->family >= CHIP_RV770) {
+ if (radeon_crtc->crtc_id) {
+ WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+ WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+ } else {
+ WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+ WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+ }
+ }
+ WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32) fb_location);
+ WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
+ radeon_crtc->crtc_offset, (u32) fb_location);
+ WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+ if (rdev->family >= CHIP_R600)
+ WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
+
+ WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
+ WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
+ WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
+ WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0);
+ WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
+ WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
+
+ fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
+ WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+
+ WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+ target_fb->height);
+ x &= ~3;
+ y &= ~1;
+ WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
+ (x << 16) | y);
+ viewport_w = crtc->mode.hdisplay;
+ viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+ WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
+ (viewport_w << 16) | viewport_h);
+
+ /* pageflip setup */
+ /* make sure flip is at vb rather than hb */
+ tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+ WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
+ if (!atomic && fb && fb != crtc->fb) {
+ radeon_fb = to_radeon_framebuffer(fb);
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
+ }
+
+ /* Bytes per pixel may have changed */
+ radeon_bandwidth_update(rdev);
+
+ return 0;
+}
+
+int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (ASIC_IS_DCE4(rdev))
+ return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0);
+ else if (ASIC_IS_AVIVO(rdev))
+ return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
+ else
+ return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (ASIC_IS_DCE4(rdev))
+ return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
+ else if (ASIC_IS_AVIVO(rdev))
+ return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
+ else
+ return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+/* properly set additional regs when using atombios */
+static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ u32 disp_merge_cntl;
+
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
+ disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
+ WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
+ break;
+ case 1:
+ disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
+ disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
+ WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
+ WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
+ WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
+ break;
+ }
+}
+
+/**
+ * radeon_get_pll_use_mask - look up a mask of which pplls are in use
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the mask of which PPLLs (Pixel PLLs) are in use.
+ */
+static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *test_crtc;
+ struct radeon_crtc *test_radeon_crtc;
+ u32 pll_in_use = 0;
+
+ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc == test_crtc)
+ continue;
+
+ test_radeon_crtc = to_radeon_crtc(test_crtc);
+ if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+ pll_in_use |= (1 << test_radeon_crtc->pll_id);
+ }
+ return pll_in_use;
+}
+
+/**
+ * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
+ * also in DP mode. For DP, a single PPLL can be used for all DP
+ * crtcs/encoders.
+ */
+static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *test_crtc;
+ struct radeon_crtc *test_radeon_crtc;
+
+ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc == test_crtc)
+ continue;
+ test_radeon_crtc = to_radeon_crtc(test_crtc);
+ if (test_radeon_crtc->encoder &&
+ ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+ /* for DP use the same PLL for all */
+ if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+ return test_radeon_crtc->pll_id;
+ }
+ }
+ return ATOM_PPLL_INVALID;
+}
+
+/**
+ * radeon_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
+ *
+ * @crtc: drm crtc
+ * @encoder: drm encoder
+ *
+ * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
+ * be shared (i.e., same clock).
+ */
+static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *test_crtc;
+ struct radeon_crtc *test_radeon_crtc;
+ u32 adjusted_clock, test_adjusted_clock;
+
+ adjusted_clock = radeon_crtc->adjusted_clock;
+
+ if (adjusted_clock == 0)
+ return ATOM_PPLL_INVALID;
+
+ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc == test_crtc)
+ continue;
+ test_radeon_crtc = to_radeon_crtc(test_crtc);
+ if (test_radeon_crtc->encoder &&
+ !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+ /* check if we are already driving this connector with another crtc */
+ if (test_radeon_crtc->connector == radeon_crtc->connector) {
+ /* if we are, return that pll */
+ if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+ return test_radeon_crtc->pll_id;
+ }
+ /* for non-DP check the clock */
+ test_adjusted_clock = test_radeon_crtc->adjusted_clock;
+ if ((crtc->mode.clock == test_crtc->mode.clock) &&
+ (adjusted_clock == test_adjusted_clock) &&
+ (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
+ (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
+ return test_radeon_crtc->pll_id;
+ }
+ }
+ return ATOM_PPLL_INVALID;
+}
+
+/**
+ * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
+ * a single PPLL can be used for all DP crtcs/encoders. For non-DP
+ * monitors a dedicated PPLL must be used. If a particular board has
+ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+ * as there is no need to program the PLL itself. If we are not able to
+ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+ * avoid messing up an existing monitor.
+ *
+ * Asic specific PLL information
+ *
+ * DCE 6.1
+ * - PPLL2 is only available to UNIPHYA (both DP and non-DP)
+ * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
+ *
+ * DCE 6.0
+ * - PPLL0 is available to all UNIPHY (DP only)
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ * DCE 5.0
+ * - DCPLL is available to all UNIPHY (DP only)
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ * DCE 3.0/4.0/4.1
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ */
+static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder =
+ to_radeon_encoder(radeon_crtc->encoder);
+ u32 pll_in_use;
+ int pll;
+
+ if (ASIC_IS_DCE61(rdev)) {
+ struct radeon_encoder_atom_dig *dig =
+ radeon_encoder->enc_priv;
+
+ if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY) &&
+ (dig->linkb == false))
+ /* UNIPHY A uses PPLL2 */
+ return ATOM_PPLL2;
+ else if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+ /* UNIPHY B/C/D/E/F */
+ if (rdev->clock.dp_extclk)
+ /* skip PPLL programming if using ext clock */
+ return ATOM_PPLL_INVALID;
+ else {
+ /* use the same PPLL for all DP monitors */
+ pll = radeon_get_shared_dp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
+ }
+ } else {
+ /* use the same PPLL for all monitors with the same clock */
+ pll = radeon_get_shared_nondp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
+ }
+ /* UNIPHY B/C/D/E/F */
+ pll_in_use = radeon_get_pll_use_mask(crtc);
+ if (!(pll_in_use & (1 << ATOM_PPLL0)))
+ return ATOM_PPLL0;
+ if (!(pll_in_use & (1 << ATOM_PPLL1)))
+ return ATOM_PPLL1;
+ DRM_ERROR("unable to allocate a PPLL\n");
+ return ATOM_PPLL_INVALID;
+ } else if (ASIC_IS_DCE4(rdev)) {
+ /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
+ * depending on the asic:
+ * DCE4: PPLL or ext clock
+ * DCE5: PPLL, DCPLL, or ext clock
+ * DCE6: PPLL, PPLL0, or ext clock
+ *
+ * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
+ * PPLL/DCPLL programming and only program the DP DTO for the
+ * crtc virtual pixel clock.
+ */
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+ if (rdev->clock.dp_extclk)
+ /* skip PPLL programming if using ext clock */
+ return ATOM_PPLL_INVALID;
+ else if (ASIC_IS_DCE6(rdev))
+ /* use PPLL0 for all DP */
+ return ATOM_PPLL0;
+ else if (ASIC_IS_DCE5(rdev))
+ /* use DCPLL for all DP */
+ return ATOM_DCPLL;
+ else {
+ /* use the same PPLL for all DP monitors */
+ pll = radeon_get_shared_dp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
+ }
+ } else {
+ /* use the same PPLL for all monitors with the same clock */
+ pll = radeon_get_shared_nondp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
+ }
+ /* all other cases */
+ pll_in_use = radeon_get_pll_use_mask(crtc);
+ if (!(pll_in_use & (1 << ATOM_PPLL1)))
+ return ATOM_PPLL1;
+ if (!(pll_in_use & (1 << ATOM_PPLL2)))
+ return ATOM_PPLL2;
+ DRM_ERROR("unable to allocate a PPLL\n");
+ return ATOM_PPLL_INVALID;
+ } else {
+ /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
+ /* some atombios (observed in some DCE2/DCE3) code have a bug,
+ * the matching btw pll and crtc is done through
+ * PCLK_CRTC[1|2]_CNTL (0x480/0x484) but atombios code use the
+ * pll (1 or 2) to select which register to write. ie if using
+ * pll1 it will use PCLK_CRTC1_CNTL (0x480) and if using pll2
+ * it will use PCLK_CRTC2_CNTL (0x484), it then use crtc id to
+ * choose which value to write. Which is reverse order from
+ * register logic. So only case that works is when pllid is
+ * same as crtcid or when both pll and crtc are enabled and
+ * both use same clock.
+ *
+ * So just return crtc id as if crtc and pll were hard linked
+ * together even if they aren't
+ */
+ return radeon_crtc->crtc_id;
+ }
+}
+
+void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
+{
+ /* always set DCPLL */
+ if (ASIC_IS_DCE6(rdev))
+ atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
+ else if (ASIC_IS_DCE4(rdev)) {
+ struct radeon_atom_ss ss;
+ bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_SS_ON_DCPLL,
+ rdev->clock.default_dispclk);
+ if (ss_enabled)
+ atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss);
+ /* XXX: DCE5, make sure voltage, dispclk is high enough */
+ atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
+ if (ss_enabled)
+ atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss);
+ }
+
+}
+
+int atombios_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder =
+ to_radeon_encoder(radeon_crtc->encoder);
+ bool is_tvcv = false;
+
+ if (radeon_encoder->active_device &
+ (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ is_tvcv = true;
+
+ atombios_crtc_set_pll(crtc, adjusted_mode);
+
+ if (ASIC_IS_DCE4(rdev))
+ atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+ else if (ASIC_IS_AVIVO(rdev)) {
+ if (is_tvcv)
+ atombios_crtc_set_timing(crtc, adjusted_mode);
+ else
+ atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+ } else {
+ atombios_crtc_set_timing(crtc, adjusted_mode);
+ if (radeon_crtc->crtc_id == 0)
+ atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+ radeon_legacy_atom_fixup(crtc);
+ }
+ atombios_crtc_set_base(crtc, x, y, old_fb);
+ atombios_overscan_setup(crtc, mode, adjusted_mode);
+ atombios_scaler_setup(crtc);
+ return 0;
+}
+
+static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ /* assign the encoder to the radeon crtc to avoid repeated lookups later */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ radeon_crtc->encoder = encoder;
+ radeon_crtc->connector = radeon_get_connector_for_encoder(encoder);
+ break;
+ }
+ }
+ if ((radeon_crtc->encoder == NULL) || (radeon_crtc->connector == NULL)) {
+ radeon_crtc->encoder = NULL;
+ radeon_crtc->connector = NULL;
+ return false;
+ }
+ if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+ return false;
+ if (!atombios_crtc_prepare_pll(crtc, adjusted_mode))
+ return false;
+ /* pick pll */
+ radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
+ /* if we can't get a PPLL for a non-DP encoder, fail */
+ if ((radeon_crtc->pll_id == ATOM_PPLL_INVALID) &&
+ !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder)))
+ return false;
+
+ return true;
+}
+
+static void atombios_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* disable crtc pair power gating before programming */
+ if (ASIC_IS_DCE6(rdev))
+ atombios_powergate_crtc(crtc, ATOM_DISABLE);
+
+ atombios_lock_crtc(crtc, ATOM_ENABLE);
+ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void atombios_crtc_commit(struct drm_crtc *crtc)
+{
+ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ atombios_lock_crtc(crtc, ATOM_DISABLE);
+}
+
+static void atombios_crtc_disable(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_atom_ss ss;
+ int i;
+
+ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (ASIC_IS_DCE6(rdev))
+ atombios_powergate_crtc(crtc, ATOM_ENABLE);
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->mode_info.crtcs[i] &&
+ rdev->mode_info.crtcs[i]->enabled &&
+ i != radeon_crtc->crtc_id &&
+ radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) {
+ /* one other crtc is using this pll don't turn
+ * off the pll
+ */
+ goto done;
+ }
+ }
+
+ switch (radeon_crtc->pll_id) {
+ case ATOM_PPLL1:
+ case ATOM_PPLL2:
+ /* disable the ppll */
+ atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+ 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+ break;
+ case ATOM_PPLL0:
+ /* disable the ppll */
+ if (ASIC_IS_DCE61(rdev))
+ atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+ 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+ break;
+ default:
+ break;
+ }
+done:
+ radeon_crtc->pll_id = ATOM_PPLL_INVALID;
+ radeon_crtc->adjusted_clock = 0;
+ radeon_crtc->encoder = NULL;
+ radeon_crtc->connector = NULL;
+}
+
+static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
+ .dpms = atombios_crtc_dpms,
+ .mode_fixup = atombios_crtc_mode_fixup,
+ .mode_set = atombios_crtc_mode_set,
+ .mode_set_base = atombios_crtc_set_base,
+ .mode_set_base_atomic = atombios_crtc_set_base_atomic,
+ .prepare = atombios_crtc_prepare,
+ .commit = atombios_crtc_commit,
+ .load_lut = radeon_crtc_load_lut,
+ .disable = atombios_crtc_disable,
+};
+
+void radeon_atombios_init_crtc(struct drm_device *dev,
+ struct radeon_crtc *radeon_crtc)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ default:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
+ break;
+ case 1:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
+ break;
+ case 2:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
+ break;
+ case 3:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
+ break;
+ case 4:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
+ break;
+ case 5:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
+ break;
+ }
+ } else {
+ if (radeon_crtc->crtc_id == 1)
+ radeon_crtc->crtc_offset =
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
+ else
+ radeon_crtc->crtc_offset = 0;
+ }
+ radeon_crtc->pll_id = ATOM_PPLL_INVALID;
+ radeon_crtc->adjusted_clock = 0;
+ radeon_crtc->encoder = NULL;
+ radeon_crtc->connector = NULL;
+ drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
+}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
new file mode 100644
index 0000000..1602398
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -0,0 +1,925 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#include "atom.h"
+#include "atom-bits.h"
+#include <drm/drm_dp_helper.h>
+
+/* move these to drm_dp_helper.c/h */
+#define DP_LINK_CONFIGURATION_SIZE 9
+#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
+
+static char *voltage_names[] = {
+ "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+ "0dB", "3.5dB", "6dB", "9.5dB"
+};
+
+/***** radeon AUX functions *****/
+
+/* Atom needs data in little endian format
+ * so swap as appropriate when copying data to
+ * or from atom. Note that atom operates on
+ * dw units.
+ */
+void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
+{
+#ifdef __BIG_ENDIAN
+ u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
+ u32 *dst32, *src32;
+ int i;
+
+ memcpy(src_tmp, src, num_bytes);
+ src32 = (u32 *)src_tmp;
+ dst32 = (u32 *)dst_tmp;
+ if (to_le) {
+ for (i = 0; i < ((num_bytes + 3) / 4); i++)
+ dst32[i] = cpu_to_le32(src32[i]);
+ memcpy(dst, dst_tmp, num_bytes);
+ } else {
+ u8 dws = num_bytes & ~3;
+ for (i = 0; i < ((num_bytes + 3) / 4); i++)
+ dst32[i] = le32_to_cpu(src32[i]);
+ memcpy(dst, dst_tmp, dws);
+ if (num_bytes % 4) {
+ for (i = 0; i < (num_bytes % 4); i++)
+ dst[dws+i] = dst_tmp[dws+i];
+ }
+ }
+#else
+ memcpy(dst, src, num_bytes);
+#endif
+}
+
+union aux_channel_transaction {
+ PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
+ PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
+};
+
+static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
+ u8 *send, int send_bytes,
+ u8 *recv, int recv_size,
+ u8 delay, u8 *ack)
+{
+ struct drm_device *dev = chan->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ union aux_channel_transaction args;
+ int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
+ unsigned char *base;
+ int recv_bytes;
+
+ memset(&args, 0, sizeof(args));
+
+ base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
+
+ radeon_atom_copy_swap(base, send, send_bytes, true);
+
+ args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
+ args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
+ args.v1.ucDataOutLen = 0;
+ args.v1.ucChannelID = chan->rec.i2c_id;
+ args.v1.ucDelay = delay / 10;
+ if (ASIC_IS_DCE4(rdev))
+ args.v2.ucHPD_ID = chan->rec.hpd;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ *ack = args.v1.ucReplyStatus;
+
+ /* timeout */
+ if (args.v1.ucReplyStatus == 1) {
+ DRM_DEBUG_KMS("dp_aux_ch timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* flags not zero */
+ if (args.v1.ucReplyStatus == 2) {
+ DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
+ return -EBUSY;
+ }
+
+ /* error */
+ if (args.v1.ucReplyStatus == 3) {
+ DRM_DEBUG_KMS("dp_aux_ch error\n");
+ return -EIO;
+ }
+
+ recv_bytes = args.v1.ucDataOutLen;
+ if (recv_bytes > recv_size)
+ recv_bytes = recv_size;
+
+ if (recv && recv_size)
+ radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
+
+ return recv_bytes;
+}
+
+static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
+ u16 address, u8 *send, u8 send_bytes, u8 delay)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ int ret;
+ u8 msg[20];
+ int msg_bytes = send_bytes + 4;
+ u8 ack;
+ unsigned retry;
+
+ if (send_bytes > 16)
+ return -1;
+
+ msg[0] = address;
+ msg[1] = address >> 8;
+ msg[2] = AUX_NATIVE_WRITE << 4;
+ msg[3] = (msg_bytes << 4) | (send_bytes - 1);
+ memcpy(&msg[4], send, send_bytes);
+
+ for (retry = 0; retry < 4; retry++) {
+ ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
+ msg, msg_bytes, NULL, 0, delay, &ack);
+ if (ret == -EBUSY)
+ continue;
+ else if (ret < 0)
+ return ret;
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ return send_bytes;
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(400);
+ else
+ return -EIO;
+ }
+
+ return -EIO;
+}
+
+static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
+ u16 address, u8 *recv, int recv_bytes, u8 delay)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 msg[4];
+ int msg_bytes = 4;
+ u8 ack;
+ int ret;
+ unsigned retry;
+
+ msg[0] = address;
+ msg[1] = address >> 8;
+ msg[2] = AUX_NATIVE_READ << 4;
+ msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
+
+ for (retry = 0; retry < 4; retry++) {
+ ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
+ msg, msg_bytes, recv, recv_bytes, delay, &ack);
+ if (ret == -EBUSY)
+ continue;
+ else if (ret < 0)
+ return ret;
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ return ret;
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(400);
+ else if (ret == 0)
+ return -EPROTO;
+ else
+ return -EIO;
+ }
+
+ return -EIO;
+}
+
+static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
+ u16 reg, u8 val)
+{
+ radeon_dp_aux_native_write(radeon_connector, reg, &val, 1, 0);
+}
+
+static u8 radeon_read_dpcd_reg(struct radeon_connector *radeon_connector,
+ u16 reg)
+{
+ u8 val = 0;
+
+ radeon_dp_aux_native_read(radeon_connector, reg, &val, 1, 0);
+
+ return val;
+}
+
+int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ u8 write_byte, u8 *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
+ u16 address = algo_data->address;
+ u8 msg[5];
+ u8 reply[2];
+ unsigned retry;
+ int msg_bytes;
+ int reply_bytes = 1;
+ int ret;
+ u8 ack;
+
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ)
+ msg[2] = AUX_I2C_READ << 4;
+ else
+ msg[2] = AUX_I2C_WRITE << 4;
+
+ if (!(mode & MODE_I2C_STOP))
+ msg[2] |= AUX_I2C_MOT << 4;
+
+ msg[0] = address;
+ msg[1] = address >> 8;
+
+ switch (mode) {
+ case MODE_I2C_WRITE:
+ msg_bytes = 5;
+ msg[3] = msg_bytes << 4;
+ msg[4] = write_byte;
+ break;
+ case MODE_I2C_READ:
+ msg_bytes = 4;
+ msg[3] = msg_bytes << 4;
+ break;
+ default:
+ msg_bytes = 4;
+ msg[3] = 3 << 4;
+ break;
+ }
+
+ for (retry = 0; retry < 4; retry++) {
+ ret = radeon_process_aux_ch(auxch,
+ msg, msg_bytes, reply, reply_bytes, 0, &ack);
+ if (ret == -EBUSY)
+ continue;
+ else if (ret < 0) {
+ DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+ return ret;
+ }
+
+ switch (ack & AUX_NATIVE_REPLY_MASK) {
+ case AUX_NATIVE_REPLY_ACK:
+ /* I2C-over-AUX Reply field is only valid
+ * when paired with AUX ACK.
+ */
+ break;
+ case AUX_NATIVE_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_ch native nack\n");
+ return -EREMOTEIO;
+ case AUX_NATIVE_REPLY_DEFER:
+ DRM_DEBUG_KMS("aux_ch native defer\n");
+ udelay(400);
+ continue;
+ default:
+ DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
+ return -EREMOTEIO;
+ }
+
+ switch (ack & AUX_I2C_REPLY_MASK) {
+ case AUX_I2C_REPLY_ACK:
+ if (mode == MODE_I2C_READ)
+ *read_byte = reply[0];
+ return ret;
+ case AUX_I2C_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_i2c nack\n");
+ return -EREMOTEIO;
+ case AUX_I2C_REPLY_DEFER:
+ DRM_DEBUG_KMS("aux_i2c defer\n");
+ udelay(400);
+ break;
+ default:
+ DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
+ return -EREMOTEIO;
+ }
+ }
+
+ DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
+ return -EREMOTEIO;
+}
+
+/***** general DP utility functions *****/
+
+#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
+#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5
+
+static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count,
+ u8 train_set[4])
+{
+ u8 v = 0;
+ u8 p = 0;
+ int lane;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+ u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
+
+ DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
+ lane,
+ voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+ pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+ if (this_v > v)
+ v = this_v;
+ if (this_p > p)
+ p = this_p;
+ }
+
+ if (v >= DP_VOLTAGE_MAX)
+ v |= DP_TRAIN_MAX_SWING_REACHED;
+
+ if (p >= DP_PRE_EMPHASIS_MAX)
+ p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
+ voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+ pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+ for (lane = 0; lane < 4; lane++)
+ train_set[lane] = v | p;
+}
+
+/* convert bits per color to bits per pixel */
+/* get bpc from the EDID */
+static int convert_bpc_to_bpp(int bpc)
+{
+ if (bpc == 0)
+ return 24;
+ else
+ return bpc * 3;
+}
+
+/* get the max pix clock supported by the link rate and lane num */
+static int dp_get_max_dp_pix_clock(int link_rate,
+ int lane_num,
+ int bpp)
+{
+ return (link_rate * lane_num * 8) / bpp;
+}
+
+/***** radeon specific DP functions *****/
+
+/* First get the min lane# when low rate is used according to pixel clock
+ * (prefer low rate), second check max lane# supported by DP panel,
+ * if the max lane# < low rate lane# then use max lane# instead.
+ */
+static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
+ u8 dpcd[DP_DPCD_SIZE],
+ int pix_clock)
+{
+ int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
+ int max_link_rate = drm_dp_max_link_rate(dpcd);
+ int max_lane_num = drm_dp_max_lane_count(dpcd);
+ int lane_num;
+ int max_dp_pix_clock;
+
+ for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
+ max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
+ if (pix_clock <= max_dp_pix_clock)
+ break;
+ }
+
+ return lane_num;
+}
+
+static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
+ u8 dpcd[DP_DPCD_SIZE],
+ int pix_clock)
+{
+ int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
+ int lane_num, max_pix_clock;
+
+ if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+ ENCODER_OBJECT_ID_NUTMEG)
+ return 270000;
+
+ lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
+ max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
+ if (pix_clock <= max_pix_clock)
+ return 162000;
+ max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
+ if (pix_clock <= max_pix_clock)
+ return 270000;
+ if (radeon_connector_is_dp12_capable(connector)) {
+ max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
+ if (pix_clock <= max_pix_clock)
+ return 540000;
+ }
+
+ return drm_dp_max_link_rate(dpcd);
+}
+
+static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
+ int action, int dp_clock,
+ u8 ucconfig, u8 lane_num)
+{
+ DP_ENCODER_SERVICE_PARAMETERS args;
+ int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
+
+ memset(&args, 0, sizeof(args));
+ args.ucLinkClock = dp_clock / 10;
+ args.ucConfig = ucconfig;
+ args.ucAction = action;
+ args.ucLaneNum = lane_num;
+ args.ucStatus = 0;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ return args.ucStatus;
+}
+
+u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ struct drm_device *dev = radeon_connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
+ dig_connector->dp_i2c_bus->rec.i2c_id, 0);
+}
+
+static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 buf[3];
+
+ if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+ return;
+
+ if (radeon_dp_aux_native_read(radeon_connector, DP_SINK_OUI, buf, 3, 0))
+ DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ if (radeon_dp_aux_native_read(radeon_connector, DP_BRANCH_OUI, buf, 3, 0))
+ DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+}
+
+bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 msg[DP_DPCD_SIZE];
+ int ret, i;
+
+ ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg,
+ DP_DPCD_SIZE, 0);
+ if (ret > 0) {
+ memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+ DRM_DEBUG_KMS("DPCD: ");
+ for (i = 0; i < DP_DPCD_SIZE; i++)
+ DRM_DEBUG_KMS("%02x ", msg[i]);
+ DRM_DEBUG_KMS("\n");
+
+ radeon_dp_probe_oui(radeon_connector);
+
+ return true;
+ }
+ dig_connector->dpcd[0] = 0;
+ return false;
+}
+
+int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+ u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
+ u8 tmp;
+
+ if (!ASIC_IS_DCE4(rdev))
+ return panel_mode;
+
+ if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
+ /* DP bridge chips */
+ tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+ if (tmp & 1)
+ panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+ else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
+ (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
+ panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
+ else
+ panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+ } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ /* eDP */
+ tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+ if (tmp & 1)
+ panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+ }
+
+ return panel_mode;
+}
+
+void radeon_dp_set_link_config(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector;
+
+ if (!radeon_connector->con_priv)
+ return;
+ dig_connector = radeon_connector->con_priv;
+
+ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
+ dig_connector->dp_clock =
+ radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
+ dig_connector->dp_lane_count =
+ radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
+ }
+}
+
+int radeon_dp_mode_valid_helper(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector;
+ int dp_clock;
+
+ if (!radeon_connector->con_priv)
+ return MODE_CLOCK_HIGH;
+ dig_connector = radeon_connector->con_priv;
+
+ dp_clock =
+ radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
+
+ if ((dp_clock == 540000) &&
+ (!radeon_connector_is_dp12_capable(connector)))
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
+ u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ int ret;
+ ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
+ link_status, DP_LINK_STATUS_SIZE, 100);
+ if (ret <= 0) {
+ return false;
+ }
+
+ DRM_DEBUG_KMS("link status %*ph\n", 6, link_status);
+ return true;
+}
+
+bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if (!radeon_dp_get_link_status(radeon_connector, link_status))
+ return false;
+ if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
+ return false;
+ return true;
+}
+
+struct radeon_dp_link_train_info {
+ struct radeon_device *rdev;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ int enc_id;
+ int dp_clock;
+ int dp_lane_count;
+ bool tp3_supported;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ u8 train_set[4];
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 tries;
+ bool use_dpencoder;
+};
+
+static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
+{
+ /* set the initial vs/emph on the source */
+ atombios_dig_transmitter_setup(dp_info->encoder,
+ ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
+ 0, dp_info->train_set[0]); /* sets all lanes at once */
+
+ /* set the vs/emph on the sink */
+ radeon_dp_aux_native_write(dp_info->radeon_connector, DP_TRAINING_LANE0_SET,
+ dp_info->train_set, dp_info->dp_lane_count, 0);
+}
+
+static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
+{
+ int rtp = 0;
+
+ /* set training pattern on the source */
+ if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) {
+ switch (tp) {
+ case DP_TRAINING_PATTERN_1:
+ rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3;
+ break;
+ }
+ atombios_dig_encoder_setup(dp_info->encoder, rtp, 0);
+ } else {
+ switch (tp) {
+ case DP_TRAINING_PATTERN_1:
+ rtp = 0;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ rtp = 1;
+ break;
+ }
+ radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+ dp_info->dp_clock, dp_info->enc_id, rtp);
+ }
+
+ /* enable training pattern on the sink */
+ radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, tp);
+}
+
+static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(dp_info->encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u8 tmp;
+
+ /* power up the sink */
+ if (dp_info->dpcd[0] >= 0x11)
+ radeon_write_dpcd_reg(dp_info->radeon_connector,
+ DP_SET_POWER, DP_SET_POWER_D0);
+
+ /* possibly enable downspread on the sink */
+ if (dp_info->dpcd[3] & 0x1)
+ radeon_write_dpcd_reg(dp_info->radeon_connector,
+ DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
+ else
+ radeon_write_dpcd_reg(dp_info->radeon_connector,
+ DP_DOWNSPREAD_CTRL, 0);
+
+ if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
+ (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
+ radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
+ }
+
+ /* set the lane count on the sink */
+ tmp = dp_info->dp_lane_count;
+ if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 &&
+ dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
+ tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
+
+ /* set the link rate on the sink */
+ tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
+ radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
+
+ /* start training on the source */
+ if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
+ atombios_dig_encoder_setup(dp_info->encoder,
+ ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
+ else
+ radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_START,
+ dp_info->dp_clock, dp_info->enc_id, 0);
+
+ /* disable the training pattern on the sink */
+ radeon_write_dpcd_reg(dp_info->radeon_connector,
+ DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+
+ return 0;
+}
+
+static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info)
+{
+ udelay(400);
+
+ /* disable the training pattern on the sink */
+ radeon_write_dpcd_reg(dp_info->radeon_connector,
+ DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+
+ /* disable the training pattern on the source */
+ if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
+ atombios_dig_encoder_setup(dp_info->encoder,
+ ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
+ else
+ radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
+ dp_info->dp_clock, dp_info->enc_id, 0);
+
+ return 0;
+}
+
+static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
+{
+ bool clock_recovery;
+ u8 voltage;
+ int i;
+
+ radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1);
+ memset(dp_info->train_set, 0, 4);
+ radeon_dp_update_vs_emph(dp_info);
+
+ udelay(400);
+
+ /* clock recovery loop */
+ clock_recovery = false;
+ dp_info->tries = 0;
+ voltage = 0xff;
+ while (1) {
+ drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
+
+ if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
+ DRM_ERROR("displayport link status failed\n");
+ break;
+ }
+
+ if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+ clock_recovery = true;
+ break;
+ }
+
+ for (i = 0; i < dp_info->dp_lane_count; i++) {
+ if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ break;
+ }
+ if (i == dp_info->dp_lane_count) {
+ DRM_ERROR("clock recovery reached max voltage\n");
+ break;
+ }
+
+ if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++dp_info->tries;
+ if (dp_info->tries == 5) {
+ DRM_ERROR("clock recovery tried 5 times\n");
+ break;
+ }
+ } else
+ dp_info->tries = 0;
+
+ voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ /* Compute new train_set as requested by sink */
+ dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
+
+ radeon_dp_update_vs_emph(dp_info);
+ }
+ if (!clock_recovery) {
+ DRM_ERROR("clock recovery failed\n");
+ return -1;
+ } else {
+ DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
+ dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+ (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT);
+ return 0;
+ }
+}
+
+static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
+{
+ bool channel_eq;
+
+ if (dp_info->tp3_supported)
+ radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3);
+ else
+ radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2);
+
+ /* channel equalization loop */
+ dp_info->tries = 0;
+ channel_eq = false;
+ while (1) {
+ drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
+
+ if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
+ DRM_ERROR("displayport link status failed\n");
+ break;
+ }
+
+ if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+ channel_eq = true;
+ break;
+ }
+
+ /* Try 5 times */
+ if (dp_info->tries > 5) {
+ DRM_ERROR("channel eq failed: 5 tries\n");
+ break;
+ }
+
+ /* Compute new train_set as requested by sink */
+ dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
+
+ radeon_dp_update_vs_emph(dp_info);
+ dp_info->tries++;
+ }
+
+ if (!channel_eq) {
+ DRM_ERROR("channel eq failed\n");
+ return -1;
+ } else {
+ DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
+ dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+ (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
+ >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
+ return 0;
+ }
+}
+
+void radeon_dp_link_train(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+ struct radeon_dp_link_train_info dp_info;
+ int index;
+ u8 tmp, frev, crev;
+
+ if (!radeon_encoder->enc_priv)
+ return;
+ dig = radeon_encoder->enc_priv;
+
+ radeon_connector = to_radeon_connector(connector);
+ if (!radeon_connector->con_priv)
+ return;
+ dig_connector = radeon_connector->con_priv;
+
+ if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) &&
+ (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
+ return;
+
+ /* DPEncoderService newer than 1.1 can't program properly the
+ * training pattern. When facing such version use the
+ * DIGXEncoderControl (X== 1 | 2)
+ */
+ dp_info.use_dpencoder = true;
+ index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
+ if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) {
+ if (crev > 1) {
+ dp_info.use_dpencoder = false;
+ }
+ }
+
+ dp_info.enc_id = 0;
+ if (dig->dig_encoder)
+ dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
+ else
+ dp_info.enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
+ if (dig->linkb)
+ dp_info.enc_id |= ATOM_DP_CONFIG_LINK_B;
+ else
+ dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
+
+ tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
+ if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
+ dp_info.tp3_supported = true;
+ else
+ dp_info.tp3_supported = false;
+
+ memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
+ dp_info.rdev = rdev;
+ dp_info.encoder = encoder;
+ dp_info.connector = connector;
+ dp_info.radeon_connector = radeon_connector;
+ dp_info.dp_lane_count = dig_connector->dp_lane_count;
+ dp_info.dp_clock = dig_connector->dp_clock;
+
+ if (radeon_dp_link_train_init(&dp_info))
+ goto done;
+ if (radeon_dp_link_train_cr(&dp_info))
+ goto done;
+ if (radeon_dp_link_train_ce(&dp_info))
+ goto done;
+done:
+ if (radeon_dp_link_train_finish(&dp_info))
+ return;
+}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
new file mode 100644
index 0000000..4c81e9f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -0,0 +1,2673 @@
+/*
+ * Copyright 2007-11 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+#include <linux/backlight.h>
+
+extern int atom_debug;
+
+static u8
+radeon_atom_get_backlight_level_from_reg(struct radeon_device *rdev)
+{
+ u8 backlight_level;
+ u32 bios_2_scratch;
+
+ if (rdev->family >= CHIP_R600)
+ bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+ else
+ bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+ backlight_level = ((bios_2_scratch & ATOM_S2_CURRENT_BL_LEVEL_MASK) >>
+ ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+ return backlight_level;
+}
+
+static void
+radeon_atom_set_backlight_level_to_reg(struct radeon_device *rdev,
+ u8 backlight_level)
+{
+ u32 bios_2_scratch;
+
+ if (rdev->family >= CHIP_R600)
+ bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+ else
+ bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+ bios_2_scratch &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+ bios_2_scratch |= ((backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
+ ATOM_S2_CURRENT_BL_LEVEL_MASK);
+
+ if (rdev->family >= CHIP_R600)
+ WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+ else
+ WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+}
+
+u8
+atombios_get_backlight_level(struct radeon_encoder *radeon_encoder)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+ return 0;
+
+ return radeon_atom_get_backlight_level_from_reg(rdev);
+}
+
+void
+atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
+{
+ struct drm_encoder *encoder = &radeon_encoder->base;
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder_atom_dig *dig;
+ DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
+ int index;
+
+ if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+ return;
+
+ if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
+ radeon_encoder->enc_priv) {
+ dig = radeon_encoder->enc_priv;
+ dig->backlight_level = level;
+ radeon_atom_set_backlight_level_to_reg(rdev, dig->backlight_level);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+ if (dig->backlight_level == 0) {
+ args.ucAction = ATOM_LCD_BLOFF;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ } else {
+ args.ucAction = ATOM_LCD_BL_BRIGHTNESS_CONTROL;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ args.ucAction = ATOM_LCD_BLON;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig->backlight_level == 0)
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+ else {
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static u8 radeon_atom_bl_level(struct backlight_device *bd)
+{
+ u8 level;
+
+ /* Convert brightness to hardware level */
+ if (bd->props.brightness < 0)
+ level = 0;
+ else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
+ level = RADEON_MAX_BL_LEVEL;
+ else
+ level = bd->props.brightness;
+
+ return level;
+}
+
+static int radeon_atom_backlight_update_status(struct backlight_device *bd)
+{
+ struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+ struct radeon_encoder *radeon_encoder = pdata->encoder;
+
+ atombios_set_backlight_level(radeon_encoder, radeon_atom_bl_level(bd));
+
+ return 0;
+}
+
+static int radeon_atom_backlight_get_brightness(struct backlight_device *bd)
+{
+ struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+ struct radeon_encoder *radeon_encoder = pdata->encoder;
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ return radeon_atom_get_backlight_level_from_reg(rdev);
+}
+
+static const struct backlight_ops radeon_atom_backlight_ops = {
+ .get_brightness = radeon_atom_backlight_get_brightness,
+ .update_status = radeon_atom_backlight_update_status,
+};
+
+void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+ struct drm_connector *drm_connector)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct backlight_device *bd;
+ struct backlight_properties props;
+ struct radeon_backlight_privdata *pdata;
+ struct radeon_encoder_atom_dig *dig;
+ u8 backlight_level;
+ char bl_name[16];
+
+ /* Mac laptops with multiple GPUs use the gmux driver for backlight
+ * so don't register a backlight device
+ */
+ if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
+ (rdev->pdev->device == 0x6741))
+ return;
+
+ if (!radeon_encoder->enc_priv)
+ return;
+
+ if (!rdev->is_atom_bios)
+ return;
+
+ if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+ return;
+
+ pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
+ if (!pdata) {
+ DRM_ERROR("Memory allocation failed\n");
+ goto error;
+ }
+
+ memset(&props, 0, sizeof(props));
+ props.max_brightness = RADEON_MAX_BL_LEVEL;
+ props.type = BACKLIGHT_RAW;
+ snprintf(bl_name, sizeof(bl_name),
+ "radeon_bl%d", dev->primary->index);
+ bd = backlight_device_register(bl_name, &drm_connector->kdev,
+ pdata, &radeon_atom_backlight_ops, &props);
+ if (IS_ERR(bd)) {
+ DRM_ERROR("Backlight registration failed\n");
+ goto error;
+ }
+
+ pdata->encoder = radeon_encoder;
+
+ backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
+
+ dig = radeon_encoder->enc_priv;
+ dig->bl_dev = bd;
+
+ bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
+
+ DRM_INFO("radeon atom DIG backlight initialized\n");
+
+ return;
+
+error:
+ kfree(pdata);
+ return;
+}
+
+static void radeon_atom_backlight_exit(struct radeon_encoder *radeon_encoder)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct backlight_device *bd = NULL;
+ struct radeon_encoder_atom_dig *dig;
+
+ if (!radeon_encoder->enc_priv)
+ return;
+
+ if (!rdev->is_atom_bios)
+ return;
+
+ if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+ return;
+
+ dig = radeon_encoder->enc_priv;
+ bd = dig->bl_dev;
+ dig->bl_dev = NULL;
+
+ if (bd) {
+ struct radeon_legacy_backlight_privdata *pdata;
+
+ pdata = bl_get_data(bd);
+ backlight_device_unregister(bd);
+ kfree(pdata);
+
+ DRM_INFO("radeon atom LVDS backlight unloaded\n");
+ }
+}
+
+#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+void radeon_atom_backlight_init(struct radeon_encoder *encoder)
+{
+}
+
+static void radeon_atom_backlight_exit(struct radeon_encoder *encoder)
+{
+}
+
+#endif
+
+/* evil but including atombios.h is much worse */
+bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
+ struct drm_display_mode *mode);
+
+
+static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* set the active encoder to connector routing */
+ radeon_encoder_set_active_device(encoder);
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ /* hw bug */
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+ && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+
+ /* get the native mode for LVDS */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+ radeon_panel_mode_fixup(encoder, adjusted_mode);
+
+ /* get the native mode for TV */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
+ struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
+ if (tv_dac) {
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J ||
+ tv_dac->tv_std == TV_STD_PAL_M)
+ radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
+ else
+ radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
+ }
+ }
+
+ if (ASIC_IS_DCE3(rdev) &&
+ ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+ (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ radeon_dp_set_link_config(connector, adjusted_mode);
+ }
+
+ return true;
+}
+
+static void
+atombios_dac_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ DAC_ENCODER_CONTROL_PS_ALLOCATION args;
+ int index = 0;
+ struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
+
+ memset(&args, 0, sizeof(args));
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
+ break;
+ }
+
+ args.ucAction = action;
+
+ if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT))
+ args.ucDacStandard = ATOM_DAC1_PS2;
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.ucDacStandard = ATOM_DAC1_CV;
+ else {
+ switch (dac_info->tv_std) {
+ case TV_STD_PAL:
+ case TV_STD_PAL_M:
+ case TV_STD_SCART_PAL:
+ case TV_STD_SECAM:
+ case TV_STD_PAL_CN:
+ args.ucDacStandard = ATOM_DAC1_PAL;
+ break;
+ case TV_STD_NTSC:
+ case TV_STD_NTSC_J:
+ case TV_STD_PAL_60:
+ default:
+ args.ucDacStandard = ATOM_DAC1_NTSC;
+ break;
+ }
+ }
+ args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static void
+atombios_tv_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ TV_ENCODER_CONTROL_PS_ALLOCATION args;
+ int index = 0;
+ struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
+
+ memset(&args, 0, sizeof(args));
+
+ index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
+
+ args.sTVEncoder.ucAction = action;
+
+ if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
+ else {
+ switch (dac_info->tv_std) {
+ case TV_STD_NTSC:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+ break;
+ case TV_STD_PAL:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
+ break;
+ case TV_STD_PAL_M:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
+ break;
+ case TV_STD_PAL_60:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
+ break;
+ case TV_STD_NTSC_J:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
+ break;
+ case TV_STD_SCART_PAL:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
+ break;
+ case TV_STD_SECAM:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
+ break;
+ case TV_STD_PAL_CN:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
+ break;
+ default:
+ args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+ break;
+ }
+ }
+
+ args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
+{
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ int bpc = 8;
+
+ if (connector)
+ bpc = radeon_get_monitor_bpc(connector);
+
+ switch (bpc) {
+ case 0:
+ return PANEL_BPC_UNDEFINE;
+ case 6:
+ return PANEL_6BIT_PER_COLOR;
+ case 8:
+ default:
+ return PANEL_8BIT_PER_COLOR;
+ case 10:
+ return PANEL_10BIT_PER_COLOR;
+ case 12:
+ return PANEL_12BIT_PER_COLOR;
+ case 16:
+ return PANEL_16BIT_PER_COLOR;
+ }
+}
+
+
+union dvo_encoder_control {
+ ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
+ DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
+ DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
+};
+
+void
+atombios_dvo_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ union dvo_encoder_control args;
+ int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
+ uint8_t frev, crev;
+
+ memset(&args, 0, sizeof(args));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ /* some R4xx chips have the wrong frev */
+ if (rdev->family <= CHIP_RV410)
+ frev = 1;
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 1:
+ /* R4xx, R5xx */
+ args.ext_tmds.sXTmdsEncoder.ucEnable = action;
+
+ if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+
+ args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
+ break;
+ case 2:
+ /* RS600/690/740 */
+ args.dvo.sDVOEncoder.ucAction = action;
+ args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ /* DFP1, CRT1, TV1 depending on the type of port */
+ args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
+
+ if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
+ break;
+ case 3:
+ /* R6xx */
+ args.dvo_v3.ucAction = action;
+ args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.dvo_v3.ucDVOConfig = 0; /* XXX */
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union lvds_encoder_control {
+ LVDS_ENCODER_CONTROL_PS_ALLOCATION v1;
+ LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
+};
+
+void
+atombios_digital_setup(struct drm_encoder *encoder, int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ union lvds_encoder_control args;
+ int index = 0;
+ int hdmi_detected = 0;
+ uint8_t frev, crev;
+
+ if (!dig)
+ return;
+
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+ hdmi_detected = 1;
+
+ memset(&args, 0, sizeof(args));
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
+ break;
+ }
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ case 2:
+ switch (crev) {
+ case 1:
+ args.v1.ucMisc = 0;
+ args.v1.ucAction = action;
+ if (hdmi_detected)
+ args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+ args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
+ args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+ args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
+ } else {
+ if (dig->linkb)
+ args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+ if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ /*if (pScrn->rgbBits == 8) */
+ args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
+ }
+ break;
+ case 2:
+ case 3:
+ args.v2.ucMisc = 0;
+ args.v2.ucAction = action;
+ if (crev == 3) {
+ if (dig->coherent_mode)
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
+ }
+ if (hdmi_detected)
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+ args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v2.ucTruncate = 0;
+ args.v2.ucSpatial = 0;
+ args.v2.ucTemporal = 0;
+ args.v2.ucFRC = 0;
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
+ args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
+ if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+ args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
+ }
+ if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
+ args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
+ if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+ args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
+ if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
+ args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
+ }
+ } else {
+ if (dig->linkb)
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+ if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+int
+atombios_get_encoder_mode(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+
+ /* dp bridges are always DP */
+ if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)
+ return ATOM_ENCODER_MODE_DP;
+
+ /* DVO is always DVO */
+ if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DVO1) ||
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
+ return ATOM_ENCODER_MODE_DVO;
+
+ connector = radeon_get_connector_for_encoder(encoder);
+ /* if we don't have an active device yet, just use one of
+ * the connectors tied to the encoder.
+ */
+ if (!connector)
+ connector = radeon_get_connector_for_encoder_init(encoder);
+ radeon_connector = to_radeon_connector(connector);
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
+ if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+ return ATOM_ENCODER_MODE_HDMI;
+ else if (radeon_connector->use_digital)
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_CRT;
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ default:
+ if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+ return ATOM_ENCODER_MODE_HDMI;
+ else
+ return ATOM_ENCODER_MODE_DVI;
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ return ATOM_ENCODER_MODE_LVDS;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ dig_connector = radeon_connector->con_priv;
+ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+ return ATOM_ENCODER_MODE_DP;
+ else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+ return ATOM_ENCODER_MODE_HDMI;
+ else
+ return ATOM_ENCODER_MODE_DVI;
+ break;
+ case DRM_MODE_CONNECTOR_eDP:
+ return ATOM_ENCODER_MODE_DP;
+ case DRM_MODE_CONNECTOR_DVIA:
+ case DRM_MODE_CONNECTOR_VGA:
+ return ATOM_ENCODER_MODE_CRT;
+ break;
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_9PinDIN:
+ /* fix me */
+ return ATOM_ENCODER_MODE_TV;
+ /*return ATOM_ENCODER_MODE_CV;*/
+ break;
+ }
+}
+
+/*
+ * DIG Encoder/Transmitter Setup
+ *
+ * DCE 3.0/3.1
+ * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
+ * Supports up to 3 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1 can drive UNIPHY link A or link B
+ * DIG2 can drive UNIPHY link B or LVTMA
+ *
+ * DCE 3.2
+ * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
+ * Supports up to 5 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
+ * DCE 4.0/5.0/6.0
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 6 DIG encoder blocks.
+ * - DIG to PHY mapping is hardcoded
+ * DIG1 drives UNIPHY0 link A, A+B
+ * DIG2 drives UNIPHY0 link B
+ * DIG3 drives UNIPHY1 link A, A+B
+ * DIG4 drives UNIPHY1 link B
+ * DIG5 drives UNIPHY2 link A, A+B
+ * DIG6 drives UNIPHY2 link B
+ *
+ * DCE 4.1
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 2 DIG encoder blocks.
+ * llano
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ * ontario
+ * DIG1 drives UNIPHY0/1/2 link A
+ * DIG2 drives UNIPHY0/1/2 link B
+ *
+ * Routing
+ * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
+ * Examples:
+ * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
+ * crtc1 -> dig1 -> UNIPHY0 link B -> DP
+ * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
+ * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
+ */
+
+union dig_encoder_control {
+ DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
+ DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
+ DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
+};
+
+void
+atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ union dig_encoder_control args;
+ int index = 0;
+ uint8_t frev, crev;
+ int dp_clock = 0;
+ int dp_lane_count = 0;
+ int hpd_id = RADEON_HPD_NONE;
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ dp_lane_count = dig_connector->dp_lane_count;
+ hpd_id = radeon_connector->hpd.hpd;
+ }
+
+ /* no dig encoder assigned */
+ if (dig->dig_encoder == -1)
+ return;
+
+ memset(&args, 0, sizeof(args));
+
+ if (ASIC_IS_DCE4(rdev))
+ index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
+ else {
+ if (dig->dig_encoder)
+ index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+ }
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 1:
+ args.v1.ucAction = action;
+ args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+ args.v3.ucPanelMode = panel_mode;
+ else
+ args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+ args.v1.ucLaneNum = dp_lane_count;
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v1.ucLaneNum = 8;
+ else
+ args.v1.ucLaneNum = 4;
+
+ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
+ break;
+ }
+ if (dig->linkb)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+ else
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+ break;
+ case 2:
+ case 3:
+ args.v3.ucAction = action;
+ args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+ args.v3.ucPanelMode = panel_mode;
+ else
+ args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode))
+ args.v3.ucLaneNum = dp_lane_count;
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v3.ucLaneNum = 8;
+ else
+ args.v3.ucLaneNum = 4;
+
+ if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+ args.v3.acConfig.ucDigSel = dig->dig_encoder;
+ args.v3.ucBitPerColor = radeon_atom_get_bpc(encoder);
+ break;
+ case 4:
+ args.v4.ucAction = action;
+ args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+ args.v4.ucPanelMode = panel_mode;
+ else
+ args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode))
+ args.v4.ucLaneNum = dp_lane_count;
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v4.ucLaneNum = 8;
+ else
+ args.v4.ucLaneNum = 4;
+
+ if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) {
+ if (dp_clock == 270000)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
+ else if (dp_clock == 540000)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
+ }
+ args.v4.acConfig.ucDigSel = dig->dig_encoder;
+ args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
+ if (hpd_id == RADEON_HPD_NONE)
+ args.v4.ucHPD_ID = 0;
+ else
+ args.v4.ucHPD_ID = hpd_id + 1;
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+union dig_transmitter_control {
+ DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 v5;
+};
+
+void
+atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector;
+ union dig_transmitter_control args;
+ int index = 0;
+ uint8_t frev, crev;
+ bool is_dp = false;
+ int pll_id = 0;
+ int dp_clock = 0;
+ int dp_lane_count = 0;
+ int connector_object_id = 0;
+ int igp_lane_info = 0;
+ int dig_encoder = dig->dig_encoder;
+ int hpd_id = RADEON_HPD_NONE;
+
+ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+ connector = radeon_get_connector_for_encoder_init(encoder);
+ /* just needed to avoid bailing in the encoder check. the encoder
+ * isn't used for init
+ */
+ dig_encoder = 0;
+ } else
+ connector = radeon_get_connector_for_encoder(encoder);
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ hpd_id = radeon_connector->hpd.hpd;
+ dp_clock = dig_connector->dp_clock;
+ dp_lane_count = dig_connector->dp_lane_count;
+ connector_object_id =
+ (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ igp_lane_info = dig_connector->igp_lane_info;
+ }
+
+ if (encoder->crtc) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ pll_id = radeon_crtc->pll_id;
+ }
+
+ /* no dig encoder assigned */
+ if (dig_encoder == -1)
+ return;
+
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)))
+ is_dp = true;
+
+ memset(&args, 0, sizeof(args));
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
+ break;
+ }
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 1:
+ args.v1.ucAction = action;
+ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+ args.v1.usInitInfo = cpu_to_le16(connector_object_id);
+ } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+ args.v1.asMode.ucLaneSel = lane_num;
+ args.v1.asMode.ucLaneSet = lane_set;
+ } else {
+ if (is_dp)
+ args.v1.usPixelClock = cpu_to_le16(dp_clock / 10);
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+ else
+ args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ }
+
+ args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
+
+ if (dig_encoder)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+ else
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+
+ if ((rdev->flags & RADEON_IS_IGP) &&
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
+ if (is_dp ||
+ !radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) {
+ if (igp_lane_info & 0x1)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+ else if (igp_lane_info & 0x2)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
+ else if (igp_lane_info & 0x4)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
+ else if (igp_lane_info & 0x8)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
+ } else {
+ if (igp_lane_info & 0x3)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
+ else if (igp_lane_info & 0xc)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
+ }
+ }
+
+ if (dig->linkb)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
+ else
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
+
+ if (is_dp)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (dig->coherent_mode)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+ if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
+ }
+ break;
+ case 2:
+ args.v2.ucAction = action;
+ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+ args.v2.usInitInfo = cpu_to_le16(connector_object_id);
+ } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+ args.v2.asMode.ucLaneSel = lane_num;
+ args.v2.asMode.ucLaneSet = lane_set;
+ } else {
+ if (is_dp)
+ args.v2.usPixelClock = cpu_to_le16(dp_clock / 10);
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+ else
+ args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ }
+
+ args.v2.acConfig.ucEncoderSel = dig_encoder;
+ if (dig->linkb)
+ args.v2.acConfig.ucLinkSel = 1;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.v2.acConfig.ucTransmitterSel = 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ args.v2.acConfig.ucTransmitterSel = 1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ args.v2.acConfig.ucTransmitterSel = 2;
+ break;
+ }
+
+ if (is_dp) {
+ args.v2.acConfig.fCoherentMode = 1;
+ args.v2.acConfig.fDPConnector = 1;
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (dig->coherent_mode)
+ args.v2.acConfig.fCoherentMode = 1;
+ if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v2.acConfig.fDualLinkConnector = 1;
+ }
+ break;
+ case 3:
+ args.v3.ucAction = action;
+ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+ args.v3.usInitInfo = cpu_to_le16(connector_object_id);
+ } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+ args.v3.asMode.ucLaneSel = lane_num;
+ args.v3.asMode.ucLaneSet = lane_set;
+ } else {
+ if (is_dp)
+ args.v3.usPixelClock = cpu_to_le16(dp_clock / 10);
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+ else
+ args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ }
+
+ if (is_dp)
+ args.v3.ucLaneNum = dp_lane_count;
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v3.ucLaneNum = 8;
+ else
+ args.v3.ucLaneNum = 4;
+
+ if (dig->linkb)
+ args.v3.acConfig.ucLinkSel = 1;
+ if (dig_encoder & 1)
+ args.v3.acConfig.ucEncoderSel = 1;
+
+ /* Select the PLL for the PHY
+ * DP PHY should be clocked from external src if there is
+ * one.
+ */
+ /* On DCE4, if there is an external clock, it generates the DP ref clock */
+ if (is_dp && rdev->clock.dp_extclk)
+ args.v3.acConfig.ucRefClkSource = 2; /* external src */
+ else
+ args.v3.acConfig.ucRefClkSource = pll_id;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.v3.acConfig.ucTransmitterSel = 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ args.v3.acConfig.ucTransmitterSel = 1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ args.v3.acConfig.ucTransmitterSel = 2;
+ break;
+ }
+
+ if (is_dp)
+ args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (dig->coherent_mode)
+ args.v3.acConfig.fCoherentMode = 1;
+ if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v3.acConfig.fDualLinkConnector = 1;
+ }
+ break;
+ case 4:
+ args.v4.ucAction = action;
+ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+ args.v4.usInitInfo = cpu_to_le16(connector_object_id);
+ } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+ args.v4.asMode.ucLaneSel = lane_num;
+ args.v4.asMode.ucLaneSet = lane_set;
+ } else {
+ if (is_dp)
+ args.v4.usPixelClock = cpu_to_le16(dp_clock / 10);
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+ else
+ args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ }
+
+ if (is_dp)
+ args.v4.ucLaneNum = dp_lane_count;
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v4.ucLaneNum = 8;
+ else
+ args.v4.ucLaneNum = 4;
+
+ if (dig->linkb)
+ args.v4.acConfig.ucLinkSel = 1;
+ if (dig_encoder & 1)
+ args.v4.acConfig.ucEncoderSel = 1;
+
+ /* Select the PLL for the PHY
+ * DP PHY should be clocked from external src if there is
+ * one.
+ */
+ /* On DCE5 DCPLL usually generates the DP ref clock */
+ if (is_dp) {
+ if (rdev->clock.dp_extclk)
+ args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
+ else
+ args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
+ } else
+ args.v4.acConfig.ucRefClkSource = pll_id;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.v4.acConfig.ucTransmitterSel = 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ args.v4.acConfig.ucTransmitterSel = 1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ args.v4.acConfig.ucTransmitterSel = 2;
+ break;
+ }
+
+ if (is_dp)
+ args.v4.acConfig.fCoherentMode = 1; /* DP requires coherent */
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (dig->coherent_mode)
+ args.v4.acConfig.fCoherentMode = 1;
+ if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v4.acConfig.fDualLinkConnector = 1;
+ }
+ break;
+ case 5:
+ args.v5.ucAction = action;
+ if (is_dp)
+ args.v5.usSymClock = cpu_to_le16(dp_clock / 10);
+ else
+ args.v5.usSymClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (dig->linkb)
+ args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYB;
+ else
+ args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ if (dig->linkb)
+ args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYD;
+ else
+ args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig->linkb)
+ args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYF;
+ else
+ args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYE;
+ break;
+ }
+ if (is_dp)
+ args.v5.ucLaneNum = dp_lane_count;
+ else if (radeon_encoder->pixel_clock > 165000)
+ args.v5.ucLaneNum = 8;
+ else
+ args.v5.ucLaneNum = 4;
+ args.v5.ucConnObjId = connector_object_id;
+ args.v5.ucDigMode = atombios_get_encoder_mode(encoder);
+
+ if (is_dp && rdev->clock.dp_extclk)
+ args.v5.asConfig.ucPhyClkSrcId = ENCODER_REFCLK_SRC_EXTCLK;
+ else
+ args.v5.asConfig.ucPhyClkSrcId = pll_id;
+
+ if (is_dp)
+ args.v5.asConfig.ucCoherentMode = 1; /* DP requires coherent */
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (dig->coherent_mode)
+ args.v5.asConfig.ucCoherentMode = 1;
+ }
+ if (hpd_id == RADEON_HPD_NONE)
+ args.v5.asConfig.ucHPDSel = 0;
+ else
+ args.v5.asConfig.ucHPDSel = hpd_id + 1;
+ args.v5.ucDigEncoderSel = 1 << dig_encoder;
+ args.v5.ucDPLaneSet = lane_set;
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ break;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+bool
+atombios_set_edp_panel_power(struct drm_connector *connector, int action)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_device *dev = radeon_connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ union dig_transmitter_control args;
+ int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+ uint8_t frev, crev;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+ goto done;
+
+ if (!ASIC_IS_DCE4(rdev))
+ goto done;
+
+ if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
+ (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
+ goto done;
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ goto done;
+
+ memset(&args, 0, sizeof(args));
+
+ args.v1.ucAction = action;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ /* wait for the panel to power up */
+ if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
+ int i;
+
+ for (i = 0; i < 300; i++) {
+ if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+ return true;
+ mdelay(1);
+ }
+ return false;
+ }
+done:
+ return true;
+}
+
+union external_encoder_control {
+ EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
+ EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
+};
+
+static void
+atombios_external_encoder_setup(struct drm_encoder *encoder,
+ struct drm_encoder *ext_encoder,
+ int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
+ union external_encoder_control args;
+ struct drm_connector *connector;
+ int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
+ u8 frev, crev;
+ int dp_clock = 0;
+ int dp_lane_count = 0;
+ int connector_object_id = 0;
+ u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+
+ if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+ connector = radeon_get_connector_for_encoder_init(encoder);
+ else
+ connector = radeon_get_connector_for_encoder(encoder);
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ dp_lane_count = dig_connector->dp_lane_count;
+ connector_object_id =
+ (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ }
+
+ memset(&args, 0, sizeof(args));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ /* no params on frev 1 */
+ break;
+ case 2:
+ switch (crev) {
+ case 1:
+ case 2:
+ args.v1.sDigEncoder.ucAction = action;
+ args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (ENCODER_MODE_IS_DP(args.v1.sDigEncoder.ucEncoderMode)) {
+ if (dp_clock == 270000)
+ args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
+ } else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v1.sDigEncoder.ucLaneNum = 8;
+ else
+ args.v1.sDigEncoder.ucLaneNum = 4;
+ break;
+ case 3:
+ args.v3.sExtEncoder.ucAction = action;
+ if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+ args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
+ else
+ args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (ENCODER_MODE_IS_DP(args.v3.sExtEncoder.ucEncoderMode)) {
+ if (dp_clock == 270000)
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+ else if (dp_clock == 540000)
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
+ args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
+ } else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ args.v3.sExtEncoder.ucLaneNum = 8;
+ else
+ args.v3.sExtEncoder.ucLaneNum = 4;
+ switch (ext_enum) {
+ case GRAPH_OBJECT_ENUM_ID1:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
+ break;
+ case GRAPH_OBJECT_ENUM_ID2:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
+ break;
+ case GRAPH_OBJECT_ENUM_ID3:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
+ break;
+ }
+ args.v3.sExtEncoder.ucBitPerColor = radeon_atom_get_bpc(encoder);
+ break;
+ default:
+ DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+ return;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+ return;
+ }
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void
+atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ ENABLE_YUV_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
+ uint32_t temp, reg;
+
+ memset(&args, 0, sizeof(args));
+
+ if (rdev->family >= CHIP_R600)
+ reg = R600_BIOS_3_SCRATCH;
+ else
+ reg = RADEON_BIOS_3_SCRATCH;
+
+ /* XXX: fix up scratch reg handling */
+ temp = RREG32(reg);
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ WREG32(reg, (ATOM_S3_TV1_ACTIVE |
+ (radeon_crtc->crtc_id << 18)));
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
+ else
+ WREG32(reg, 0);
+
+ if (enable)
+ args.ucEnable = ATOM_ENABLE;
+ args.ucCRTC = radeon_crtc->crtc_id;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ WREG32(reg, temp);
+}
+
+static void
+radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
+ int index = 0;
+
+ memset(&args, 0, sizeof(args));
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
+ break;
+ default:
+ return;
+ }
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ args.ucAction = ATOM_ENABLE;
+ /* workaround for DVOOutputControl on some RS690 systems */
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
+ u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
+ WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ WREG32(RADEON_BIOS_3_SCRATCH, reg);
+ } else
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ args.ucAction = ATOM_LCD_BLON;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ args.ucAction = ATOM_DISABLE;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ args.ucAction = ATOM_LCD_BLOFF;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+ break;
+ }
+}
+
+static void
+radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_connector *radeon_connector = NULL;
+ struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
+
+ if (connector) {
+ radeon_connector = to_radeon_connector(connector);
+ radeon_dig_connector = radeon_connector->con_priv;
+ }
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+ if (!connector)
+ dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+ else
+ dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
+
+ /* setup and enable the encoder */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+ atombios_dig_encoder_setup(encoder,
+ ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
+ dig->panel_mode);
+ if (ext_encoder) {
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+ }
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ } else if (ASIC_IS_DCE4(rdev)) {
+ /* setup and enable the encoder */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+ /* enable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ } else {
+ /* setup and enable the encoder and transmitter */
+ atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ /* some dce3.x boards have a bug in their transmitter control table.
+ * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
+ * does the same thing and more.
+ */
+ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
+ (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ }
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ radeon_dig_connector->edp_on = true;
+ }
+ radeon_dp_link_train(encoder, connector);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
+ }
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ } else if (ASIC_IS_DCE4(rdev)) {
+ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ } else {
+ /* disable the encoder and transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+ }
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_OFF);
+ radeon_dig_connector->edp_on = false;
+ }
+ }
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+ break;
+ }
+}
+
+static void
+radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder,
+ struct drm_encoder *ext_encoder,
+ int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ default:
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
+ } else
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
+ } else
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
+ break;
+ }
+}
+
+static void
+radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+ DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
+ radeon_encoder->encoder_id, mode, radeon_encoder->devices,
+ radeon_encoder->active_device);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ radeon_atom_encoder_dpms_avivo(encoder, mode);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ radeon_atom_encoder_dpms_dig(encoder, mode);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ if (ASIC_IS_DCE5(rdev)) {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ atombios_dvo_setup(encoder, ATOM_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ atombios_dvo_setup(encoder, ATOM_DISABLE);
+ break;
+ }
+ } else if (ASIC_IS_DCE3(rdev))
+ radeon_atom_encoder_dpms_dig(encoder, mode);
+ else
+ radeon_atom_encoder_dpms_avivo(encoder, mode);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ if (ASIC_IS_DCE5(rdev)) {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ atombios_dac_setup(encoder, ATOM_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ atombios_dac_setup(encoder, ATOM_DISABLE);
+ break;
+ }
+ } else
+ radeon_atom_encoder_dpms_avivo(encoder, mode);
+ break;
+ default:
+ return;
+ }
+
+ if (ext_encoder)
+ radeon_atom_encoder_dpms_ext(encoder, ext_encoder, mode);
+
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+union crtc_source_param {
+ SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
+ SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
+};
+
+static void
+atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ union crtc_source_param args;
+ int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
+ uint8_t frev, crev;
+ struct radeon_encoder_atom_dig *dig;
+
+ memset(&args, 0, sizeof(args));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 1:
+ default:
+ if (ASIC_IS_AVIVO(rdev))
+ args.v1.ucCRTC = radeon_crtc->crtc_id;
+ else {
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
+ args.v1.ucCRTC = radeon_crtc->crtc_id;
+ } else {
+ args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
+ }
+ }
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
+ args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
+ else
+ args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+ else
+ args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+ else
+ args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
+ break;
+ }
+ break;
+ case 2:
+ args.v2.ucCRTC = radeon_crtc->crtc_id;
+ if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+ else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
+ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
+ else
+ args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ } else
+ args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ dig = radeon_encoder->enc_priv;
+ switch (dig->dig_encoder) {
+ case 0:
+ args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+ break;
+ case 1:
+ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+ break;
+ case 2:
+ args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
+ break;
+ case 3:
+ args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
+ break;
+ case 4:
+ args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
+ break;
+ case 5:
+ args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
+ break;
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+ else
+ args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+ else
+ args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
+ break;
+ }
+ break;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+ return;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ /* update scratch regs with new routing */
+ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static void
+atombios_apply_encoder_quirks(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+
+ /* Funky macbooks */
+ if ((dev->pdev->device == 0x71C5) &&
+ (dev->pdev->subsystem_vendor == 0x106b) &&
+ (dev->pdev->subsystem_device == 0x0080)) {
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+ uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
+
+ lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
+ lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
+
+ WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
+ }
+ }
+
+ /* set scaler clears this on some chips */
+ if (ASIC_IS_AVIVO(rdev) &&
+ (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
+ if (ASIC_IS_DCE4(rdev)) {
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
+ EVERGREEN_INTERLEAVE_EN);
+ else
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+ } else {
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+ AVIVO_D1MODE_INTERLEAVE_EN);
+ else
+ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+ }
+ }
+}
+
+static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *test_encoder;
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t dig_enc_in_use = 0;
+
+ if (ASIC_IS_DCE6(rdev)) {
+ /* DCE6 */
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (dig->linkb)
+ return 1;
+ else
+ return 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ if (dig->linkb)
+ return 3;
+ else
+ return 2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig->linkb)
+ return 5;
+ else
+ return 4;
+ break;
+ }
+ } else if (ASIC_IS_DCE4(rdev)) {
+ /* DCE4/5 */
+ if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) {
+ /* ontario follows DCE4 */
+ if (rdev->family == CHIP_PALM) {
+ if (dig->linkb)
+ return 1;
+ else
+ return 0;
+ } else
+ /* llano follows DCE3.2 */
+ return radeon_crtc->crtc_id;
+ } else {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (dig->linkb)
+ return 1;
+ else
+ return 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ if (dig->linkb)
+ return 3;
+ else
+ return 2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig->linkb)
+ return 5;
+ else
+ return 4;
+ break;
+ }
+ }
+ }
+
+ /* on DCE32 and encoder can driver any block so just crtc id */
+ if (ASIC_IS_DCE32(rdev)) {
+ return radeon_crtc->crtc_id;
+ }
+
+ /* on DCE3 - LVTMA can only be driven by DIGB */
+ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_test_encoder;
+
+ if (encoder == test_encoder)
+ continue;
+
+ if (!radeon_encoder_is_digital(test_encoder))
+ continue;
+
+ radeon_test_encoder = to_radeon_encoder(test_encoder);
+ dig = radeon_test_encoder->enc_priv;
+
+ if (dig->dig_encoder >= 0)
+ dig_enc_in_use |= (1 << dig->dig_encoder);
+ }
+
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
+ if (dig_enc_in_use & 0x2)
+ DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
+ return 1;
+ }
+ if (!(dig_enc_in_use & 1))
+ return 0;
+ return 1;
+}
+
+/* This only needs to be called once at startup */
+void
+radeon_atom_encoder_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+ break;
+ default:
+ break;
+ }
+
+ if (ext_encoder && (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)))
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
+ }
+}
+
+static void
+radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ radeon_encoder->pixel_clock = adjusted_mode->clock;
+
+ /* need to call this here rather than in prepare() since we need some crtc info */
+ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
+ if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
+ atombios_yuv_setup(encoder, true);
+ else
+ atombios_yuv_setup(encoder, false);
+ }
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ /* handled in dpms */
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ atombios_dvo_setup(encoder, ATOM_ENABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ atombios_dac_setup(encoder, ATOM_ENABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ atombios_tv_setup(encoder, ATOM_ENABLE);
+ else
+ atombios_tv_setup(encoder, ATOM_DISABLE);
+ }
+ break;
+ }
+
+ atombios_apply_encoder_quirks(encoder, adjusted_mode);
+
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+ if (rdev->asic->display.hdmi_enable)
+ radeon_hdmi_enable(rdev, encoder, true);
+ if (rdev->asic->display.hdmi_setmode)
+ radeon_hdmi_setmode(rdev, encoder, adjusted_mode);
+ }
+}
+
+static bool
+atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
+ ATOM_DEVICE_CV_SUPPORT |
+ ATOM_DEVICE_CRT_SUPPORT)) {
+ DAC_LOAD_DETECTION_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
+ uint8_t frev, crev;
+
+ memset(&args, 0, sizeof(args));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return false;
+
+ args.sDacload.ucMisc = 0;
+
+ if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
+ args.sDacload.ucDacType = ATOM_DAC_A;
+ else
+ args.sDacload.ucDacType = ATOM_DAC_B;
+
+ if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
+ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
+ else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
+ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
+ else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
+ if (crev >= 3)
+ args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+ } else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
+ if (crev >= 3)
+ args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ return true;
+ } else
+ return false;
+}
+
+static enum drm_connector_status
+radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ uint32_t bios_0_scratch;
+
+ if (!atombios_dac_load_detect(encoder, connector)) {
+ DRM_DEBUG_KMS("detect returned false \n");
+ return connector_status_unknown;
+ }
+
+ if (rdev->family >= CHIP_R600)
+ bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+ else
+ bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+
+ DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+ if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+ return connector_status_connected; /* CTV */
+ else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+ return connector_status_connected; /* STV */
+ }
+ return connector_status_disconnected;
+}
+
+static enum drm_connector_status
+radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+ u32 bios_0_scratch;
+
+ if (!ASIC_IS_DCE4(rdev))
+ return connector_status_unknown;
+
+ if (!ext_encoder)
+ return connector_status_unknown;
+
+ if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
+ return connector_status_unknown;
+
+ /* load detect on the dp bridge */
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
+
+ bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+
+ DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+ if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+ if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+ return connector_status_connected;
+ }
+ if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+ return connector_status_connected; /* CTV */
+ else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+ return connector_status_connected; /* STV */
+ }
+ return connector_status_disconnected;
+}
+
+void
+radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
+{
+ struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+ if (ext_encoder)
+ /* ddc_setup on the dp bridge */
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
+
+}
+
+static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+ if ((radeon_encoder->active_device &
+ (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+ (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
+ ENCODER_OBJECT_ID_NONE)) {
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ if (dig) {
+ dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
+ if (radeon_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) {
+ if (rdev->family >= CHIP_R600)
+ dig->afmt = rdev->mode_info.afmt[dig->dig_encoder];
+ else
+ /* RS600/690/740 have only 1 afmt block */
+ dig->afmt = rdev->mode_info.afmt[0];
+ }
+ }
+ }
+
+ radeon_atom_output_lock(encoder, true);
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ /* select the clock/data port if it uses a router */
+ if (radeon_connector->router.cd_valid)
+ radeon_router_select_cd_port(radeon_connector);
+
+ /* turn eDP panel on for mode set */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ }
+
+ /* this is needed for the pll/ss setup to work correctly in some cases */
+ atombios_set_encoder_crtc_source(encoder);
+}
+
+static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
+{
+ /* need to call this here as we need the crtc set up */
+ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+ radeon_atom_output_lock(encoder, false);
+}
+
+static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig;
+
+ /* check for pre-DCE3 cards with shared encoders;
+ * can't really use the links individually, so don't disable
+ * the encoder if it's in use by another connector
+ */
+ if (!ASIC_IS_DCE3(rdev)) {
+ struct drm_encoder *other_encoder;
+ struct radeon_encoder *other_radeon_encoder;
+
+ list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+ other_radeon_encoder = to_radeon_encoder(other_encoder);
+ if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
+ drm_helper_encoder_in_use(other_encoder))
+ goto disable_done;
+ }
+ }
+
+ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ /* handled in dpms */
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ atombios_dvo_setup(encoder, ATOM_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ atombios_dac_setup(encoder, ATOM_DISABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ atombios_tv_setup(encoder, ATOM_DISABLE);
+ break;
+ }
+
+disable_done:
+ if (radeon_encoder_is_digital(encoder)) {
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+ if (rdev->asic->display.hdmi_enable)
+ radeon_hdmi_enable(rdev, encoder, false);
+ }
+ dig = radeon_encoder->enc_priv;
+ dig->dig_encoder = -1;
+ }
+ radeon_encoder->active_device = 0;
+}
+
+/* these are handled by the primary encoders */
+static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void radeon_atom_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void radeon_atom_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
+ .dpms = radeon_atom_ext_dpms,
+ .mode_fixup = radeon_atom_ext_mode_fixup,
+ .prepare = radeon_atom_ext_prepare,
+ .mode_set = radeon_atom_ext_mode_set,
+ .commit = radeon_atom_ext_commit,
+ .disable = radeon_atom_ext_disable,
+ /* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
+ .dpms = radeon_atom_encoder_dpms,
+ .mode_fixup = radeon_atom_mode_fixup,
+ .prepare = radeon_atom_encoder_prepare,
+ .mode_set = radeon_atom_encoder_mode_set,
+ .commit = radeon_atom_encoder_commit,
+ .disable = radeon_atom_encoder_disable,
+ .detect = radeon_atom_dig_detect,
+};
+
+static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
+ .dpms = radeon_atom_encoder_dpms,
+ .mode_fixup = radeon_atom_mode_fixup,
+ .prepare = radeon_atom_encoder_prepare,
+ .mode_set = radeon_atom_encoder_mode_set,
+ .commit = radeon_atom_encoder_commit,
+ .detect = radeon_atom_dac_detect,
+};
+
+void radeon_enc_destroy(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ radeon_atom_backlight_exit(radeon_encoder);
+ kfree(radeon_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(radeon_encoder);
+}
+
+static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
+ .destroy = radeon_enc_destroy,
+};
+
+static struct radeon_encoder_atom_dac *
+radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
+
+ if (!dac)
+ return NULL;
+
+ dac->tv_std = radeon_atombios_get_tv_info(rdev);
+ return dac;
+}
+
+static struct radeon_encoder_atom_dig *
+radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
+{
+ int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+ struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
+
+ if (!dig)
+ return NULL;
+
+ /* coherent mode by default */
+ dig->coherent_mode = true;
+ dig->dig_encoder = -1;
+
+ if (encoder_enum == 2)
+ dig->linkb = true;
+ else
+ dig->linkb = false;
+
+ return dig;
+}
+
+void
+radeon_add_atom_encoder(struct drm_device *dev,
+ uint32_t encoder_enum,
+ uint32_t supported_device,
+ u16 caps)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+
+ /* see if we already added it */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ if (radeon_encoder->encoder_enum == encoder_enum) {
+ radeon_encoder->devices |= supported_device;
+ return;
+ }
+
+ }
+
+ /* add a new one */
+ radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
+ if (!radeon_encoder)
+ return;
+
+ encoder = &radeon_encoder->base;
+ switch (rdev->num_crtc) {
+ case 1:
+ encoder->possible_crtcs = 0x1;
+ break;
+ case 2:
+ default:
+ encoder->possible_crtcs = 0x3;
+ break;
+ case 4:
+ encoder->possible_crtcs = 0xf;
+ break;
+ case 6:
+ encoder->possible_crtcs = 0x3f;
+ break;
+ }
+
+ radeon_encoder->enc_priv = NULL;
+
+ radeon_encoder->encoder_enum = encoder_enum;
+ radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ radeon_encoder->devices = supported_device;
+ radeon_encoder->rmx_type = RMX_OFF;
+ radeon_encoder->underscan_type = UNDERSCAN_OFF;
+ radeon_encoder->is_ext_encoder = false;
+ radeon_encoder->caps = caps;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ radeon_encoder->rmx_type = RMX_FULL;
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+ } else {
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+ }
+ drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
+ drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
+ radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
+ drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ radeon_encoder->rmx_type = RMX_FULL;
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+ } else {
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+ }
+ drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_SI170B:
+ case ENCODER_OBJECT_ID_CH7303:
+ case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+ case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+ case ENCODER_OBJECT_ID_TITFP513:
+ case ENCODER_OBJECT_ID_VT1623:
+ case ENCODER_OBJECT_ID_HDMI_SI1930:
+ case ENCODER_OBJECT_ID_TRAVIS:
+ case ENCODER_OBJECT_ID_NUTMEG:
+ /* these are handled by the primary encoders */
+ radeon_encoder->is_ext_encoder = true;
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ else
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
new file mode 100644
index 0000000..2ca389d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ *
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+
+extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
+
+#define TARGET_HW_I2C_CLOCK 50
+
+/* these are a limitation of ProcessI2cChannelTransaction not the hw */
+#define ATOM_MAX_HW_I2C_WRITE 2
+#define ATOM_MAX_HW_I2C_READ 255
+
+static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
+ u8 slave_addr, u8 flags,
+ u8 *buf, u8 num)
+{
+ struct drm_device *dev = chan->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
+ unsigned char *base;
+ u16 out;
+
+ memset(&args, 0, sizeof(args));
+
+ base = (unsigned char *)rdev->mode_info.atom_context->scratch;
+
+ if (flags & HW_I2C_WRITE) {
+ if (num > ATOM_MAX_HW_I2C_WRITE) {
+ DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num);
+ return -EINVAL;
+ }
+ memcpy(&out, buf, num);
+ args.lpI2CDataOut = cpu_to_le16(out);
+ } else {
+ if (num > ATOM_MAX_HW_I2C_READ) {
+ DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
+ return -EINVAL;
+ }
+ }
+
+ args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
+ args.ucRegIndex = 0;
+ args.ucTransBytes = num;
+ args.ucSlaveAddr = slave_addr << 1;
+ args.ucLineNumber = chan->rec.i2c_id;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ /* error */
+ if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("hw_i2c error\n");
+ return -EIO;
+ }
+
+ if (!(flags & HW_I2C_WRITE))
+ radeon_atom_copy_swap(buf, base, num, false);
+
+ return 0;
+}
+
+int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct i2c_msg *p;
+ int i, remaining, current_count, buffer_offset, max_bytes, ret;
+ u8 buf = 0, flags;
+
+ /* check for bus probe */
+ p = &msgs[0];
+ if ((num == 1) && (p->len == 0)) {
+ ret = radeon_process_i2c_ch(i2c,
+ p->addr, HW_I2C_WRITE,
+ &buf, 1);
+ if (ret)
+ return ret;
+ else
+ return num;
+ }
+
+ for (i = 0; i < num; i++) {
+ p = &msgs[i];
+ remaining = p->len;
+ buffer_offset = 0;
+ /* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */
+ if (p->flags & I2C_M_RD) {
+ max_bytes = ATOM_MAX_HW_I2C_READ;
+ flags = HW_I2C_READ;
+ } else {
+ max_bytes = ATOM_MAX_HW_I2C_WRITE;
+ flags = HW_I2C_WRITE;
+ }
+ while (remaining) {
+ if (remaining > max_bytes)
+ current_count = max_bytes;
+ else
+ current_count = remaining;
+ ret = radeon_process_i2c_ch(i2c,
+ p->addr, flags,
+ &p->buf[buffer_offset], current_count);
+ if (ret)
+ return ret;
+ remaining -= current_count;
+ buffer_offset += current_count;
+ }
+ }
+
+ return num;
+}
+
+u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
diff --git a/drivers/gpu/drm/radeon/avivod.h b/drivers/gpu/drm/radeon/avivod.h
new file mode 100644
index 0000000..3c391e7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/avivod.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef AVIVOD_H
+#define AVIVOD_H
+
+
+#define D1CRTC_CONTROL 0x6080
+#define CRTC_EN (1 << 0)
+#define D1CRTC_STATUS 0x609c
+#define D1CRTC_UPDATE_LOCK 0x60E8
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
+
+#define D2CRTC_CONTROL 0x6880
+#define D2CRTC_STATUS 0x689c
+#define D2CRTC_UPDATE_LOCK 0x68E8
+#define D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910
+#define D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918
+
+#define D1VGA_CONTROL 0x0330
+#define DVGA_CONTROL_MODE_ENABLE (1 << 0)
+#define DVGA_CONTROL_TIMING_SELECT (1 << 8)
+#define DVGA_CONTROL_SYNC_POLARITY_SELECT (1 << 9)
+#define DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1 << 10)
+#define DVGA_CONTROL_OVERSCAN_COLOR_EN (1 << 16)
+#define DVGA_CONTROL_ROTATE (1 << 24)
+#define D2VGA_CONTROL 0x0338
+
+#define VGA_HDP_CONTROL 0x328
+#define VGA_MEM_PAGE_SELECT_EN (1 << 0)
+#define VGA_MEMORY_DISABLE (1 << 4)
+#define VGA_RBBM_LOCK_DISABLE (1 << 8)
+#define VGA_SOFT_RESET (1 << 16)
+#define VGA_MEMORY_BASE_ADDRESS 0x0310
+#define VGA_RENDER_CONTROL 0x0300
+#define VGA_VSTATUS_CNTL_MASK 0x00030000
+
+#endif
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
new file mode 100644
index 0000000..19a0114
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
@@ -0,0 +1,374 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+/*
+ * evergreen cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup. Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables. The regsiter state and shaders
+ * were hand generated to support blitting functionality. See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 cayman_default_state[] =
+{
+ 0xc0066900,
+ 0x00000000,
+ 0x00000060, /* DB_RENDER_CONTROL */
+ 0x00000000, /* DB_COUNT_CONTROL */
+ 0x00000000, /* DB_DEPTH_VIEW */
+ 0x0000002a, /* DB_RENDER_OVERRIDE */
+ 0x00000000, /* DB_RENDER_OVERRIDE2 */
+ 0x00000000, /* DB_HTILE_DATA_BASE */
+
+ 0xc0026900,
+ 0x0000000a,
+ 0x00000000, /* DB_STENCIL_CLEAR */
+ 0x00000000, /* DB_DEPTH_CLEAR */
+
+ 0xc0036900,
+ 0x0000000f,
+ 0x00000000, /* DB_DEPTH_INFO */
+ 0x00000000, /* DB_Z_INFO */
+ 0x00000000, /* DB_STENCIL_INFO */
+
+ 0xc0016900,
+ 0x00000080,
+ 0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+ 0xc00d6900,
+ 0x00000083,
+ 0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+ 0x00000000, /* PA_SC_CLIPRECT_0_TL */
+ 0x20002000, /* PA_SC_CLIPRECT_0_BR */
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0xaaaaaaaa, /* PA_SC_EDGERULE */
+ 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+ 0x0000000f, /* CB_TARGET_MASK */
+ 0x0000000f, /* CB_SHADER_MASK */
+
+ 0xc0226900,
+ 0x00000094,
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+ 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+ 0xc0016900,
+ 0x000000d4,
+ 0x00000000, /* SX_MISC */
+
+ 0xc0026900,
+ 0x000000d9,
+ 0x00000000, /* CP_RINGID */
+ 0x00000000, /* CP_VMID */
+
+ 0xc0096900,
+ 0x00000100,
+ 0x00ffffff, /* VGT_MAX_VTX_INDX */
+ 0x00000000, /* VGT_MIN_VTX_INDX */
+ 0x00000000, /* VGT_INDX_OFFSET */
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+ 0x00000000, /* SX_ALPHA_TEST_CONTROL */
+ 0x00000000, /* CB_BLEND_RED */
+ 0x00000000, /* CB_BLEND_GREEN */
+ 0x00000000, /* CB_BLEND_BLUE */
+ 0x00000000, /* CB_BLEND_ALPHA */
+
+ 0xc0016900,
+ 0x00000187,
+ 0x00000100, /* SPI_VS_OUT_ID_0 */
+
+ 0xc0026900,
+ 0x00000191,
+ 0x00000100, /* SPI_PS_INPUT_CNTL_0 */
+ 0x00000101, /* SPI_PS_INPUT_CNTL_1 */
+
+ 0xc0016900,
+ 0x000001b1,
+ 0x00000000, /* SPI_VS_OUT_CONFIG */
+
+ 0xc0106900,
+ 0x000001b3,
+ 0x20000001, /* SPI_PS_IN_CONTROL_0 */
+ 0x00000000, /* SPI_PS_IN_CONTROL_1 */
+ 0x00000000, /* SPI_INTERP_CONTROL_0 */
+ 0x00000000, /* SPI_INPUT_Z */
+ 0x00000000, /* SPI_FOG_CNTL */
+ 0x00100000, /* SPI_BARYC_CNTL */
+ 0x00000000, /* SPI_PS_IN_CONTROL_2 */
+ 0x00000000, /* SPI_COMPUTE_INPUT_CNTL */
+ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_X */
+ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Y */
+ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Z */
+ 0x00000000, /* SPI_GPR_MGMT */
+ 0x00000000, /* SPI_LDS_MGMT */
+ 0x00000000, /* SPI_STACK_MGMT */
+ 0x00000000, /* SPI_WAVE_MGMT_1 */
+ 0x00000000, /* SPI_WAVE_MGMT_2 */
+
+ 0xc0016900,
+ 0x000001e0,
+ 0x00000000, /* CB_BLEND0_CONTROL */
+
+ 0xc00e6900,
+ 0x00000200,
+ 0x00000000, /* DB_DEPTH_CONTROL */
+ 0x00000000, /* DB_EQAA */
+ 0x00cc0010, /* CB_COLOR_CONTROL */
+ 0x00000210, /* DB_SHADER_CONTROL */
+ 0x00010000, /* PA_CL_CLIP_CNTL */
+ 0x00000004, /* PA_SU_SC_MODE_CNTL */
+ 0x00000100, /* PA_CL_VTE_CNTL */
+ 0x00000000, /* PA_CL_VS_OUT_CNTL */
+ 0x00000000, /* PA_CL_NANINF_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+ 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+ 0x00000000, /* */
+ 0x00000000, /* */
+
+ 0xc0026900,
+ 0x00000229,
+ 0x00000000, /* SQ_PGM_START_FS */
+ 0x00000000,
+
+ 0xc0016900,
+ 0x0000023b,
+ 0x00000000, /* SQ_LDS_ALLOC_PS */
+
+ 0xc0066900,
+ 0x00000240,
+ 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0046900,
+ 0x00000247,
+ 0x00000000, /* SQ_GS_VERT_ITEMSIZE */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0116900,
+ 0x00000280,
+ 0x00000000, /* PA_SU_POINT_SIZE */
+ 0x00000000, /* PA_SU_POINT_MINMAX */
+ 0x00000008, /* PA_SU_LINE_CNTL */
+ 0x00000000, /* PA_SC_LINE_STIPPLE */
+ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+ 0x00000000, /* VGT_HOS_CNTL */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* VGT_GS_MODE */
+
+ 0xc0026900,
+ 0x00000292,
+ 0x00000000, /* PA_SC_MODE_CNTL_0 */
+ 0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+ 0xc0016900,
+ 0x000002a1,
+ 0x00000000, /* VGT_PRIMITIVEID_EN */
+
+ 0xc0016900,
+ 0x000002a5,
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+ 0xc0026900,
+ 0x000002a8,
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+ 0x00000000,
+
+ 0xc0026900,
+ 0x000002ad,
+ 0x00000000, /* VGT_REUSE_OFF */
+ 0x00000000,
+
+ 0xc0016900,
+ 0x000002d5,
+ 0x00000000, /* VGT_SHADER_STAGES_EN */
+
+ 0xc0016900,
+ 0x000002dc,
+ 0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+ 0xc0066900,
+ 0x000002de,
+ 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0026900,
+ 0x000002e5,
+ 0x00000000, /* VGT_STRMOUT_CONFIG */
+ 0x00000000,
+
+ 0xc01b6900,
+ 0x000002f5,
+ 0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
+ 0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
+ 0x00000000, /* PA_SC_LINE_CNTL */
+ 0x00000000, /* PA_SC_AA_CONFIG */
+ 0x00000005, /* PA_SU_VTX_CNTL */
+ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
+ 0xffffffff,
+
+ 0xc0026900,
+ 0x00000316,
+ 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ 0x00000010, /* */
+};
+
+const u32 cayman_vs[] =
+{
+ 0x00000004,
+ 0x80400400,
+ 0x0000a03c,
+ 0x95000688,
+ 0x00004000,
+ 0x15000688,
+ 0x00000000,
+ 0x88000000,
+ 0x04000000,
+ 0x67961001,
+#ifdef __BIG_ENDIAN
+ 0x00020000,
+#else
+ 0x00000000,
+#endif
+ 0x00000000,
+ 0x04000000,
+ 0x67961000,
+#ifdef __BIG_ENDIAN
+ 0x00020008,
+#else
+ 0x00000008,
+#endif
+ 0x00000000,
+};
+
+const u32 cayman_ps[] =
+{
+ 0x00000004,
+ 0xa00c0000,
+ 0x00000008,
+ 0x80400000,
+ 0x00000000,
+ 0x95000688,
+ 0x00000000,
+ 0x88000000,
+ 0x00380400,
+ 0x00146b10,
+ 0x00380000,
+ 0x20146b10,
+ 0x00380400,
+ 0x40146b00,
+ 0x80380000,
+ 0x60146b00,
+ 0x00000010,
+ 0x000d1000,
+ 0xb0800000,
+ 0x00000000,
+};
+
+const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
+const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
+const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.h b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
new file mode 100644
index 0000000..f5d0e9a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef CAYMAN_BLIT_SHADERS_H
+#define CAYMAN_BLIT_SHADERS_H
+
+extern const u32 cayman_ps[];
+extern const u32 cayman_vs[];
+extern const u32 cayman_default_state[];
+
+extern const u32 cayman_ps_size, cayman_vs_size;
+extern const u32 cayman_default_size;
+
+#endif
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
new file mode 100644
index 0000000..8b6b0ba
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -0,0 +1,5082 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include <drm/radeon_drm.h>
+#include "evergreend.h"
+#include "atom.h"
+#include "avivod.h"
+#include "evergreen_reg.h"
+#include "evergreen_blit_shaders.h"
+
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+
+static const u32 crtc_offsets[6] =
+{
+ EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_CRTC5_REGISTER_OFFSET
+};
+
+static void evergreen_gpu_init(struct radeon_device *rdev);
+void evergreen_fini(struct radeon_device *rdev);
+void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+ int ring, u32 cp_int_cntl);
+
+static const u32 evergreen_golden_registers[] =
+{
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x9b7c, 0xffffffff, 0x00000000,
+ 0x8a14, 0xffffffff, 0x00000007,
+ 0x8b10, 0xffffffff, 0x00000000,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x88d4, 0xffffffff, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0xffffffff, 0x001000f0,
+ 0x6104, 0x01000300, 0x00000000,
+ 0x5bc0, 0x00300000, 0x00000000,
+ 0x7030, 0xffffffff, 0x00000011,
+ 0x7c30, 0xffffffff, 0x00000011,
+ 0x10830, 0xffffffff, 0x00000011,
+ 0x11430, 0xffffffff, 0x00000011,
+ 0x12030, 0xffffffff, 0x00000011,
+ 0x12c30, 0xffffffff, 0x00000011,
+ 0xd02c, 0xffffffff, 0x08421000,
+ 0x240c, 0xffffffff, 0x00000380,
+ 0x8b24, 0xffffffff, 0x00ff0fff,
+ 0x28a4c, 0x06000000, 0x06000000,
+ 0x10c, 0x00000001, 0x00000001,
+ 0x8d00, 0xffffffff, 0x100e4848,
+ 0x8d04, 0xffffffff, 0x00164745,
+ 0x8c00, 0xffffffff, 0xe4000003,
+ 0x8c04, 0xffffffff, 0x40600060,
+ 0x8c08, 0xffffffff, 0x001c001c,
+ 0x8cf0, 0xffffffff, 0x08e00620,
+ 0x8c20, 0xffffffff, 0x00800080,
+ 0x8c24, 0xffffffff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0xffffffff, 0x00001010,
+ 0x28350, 0xffffffff, 0x00000000,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x5cc, 0xffffffff, 0x00000001,
+ 0x9508, 0xffffffff, 0x00000002,
+ 0x913c, 0x0000000f, 0x0000000a
+};
+
+static const u32 evergreen_golden_registers2[] =
+{
+ 0x2f4c, 0xffffffff, 0x00000000,
+ 0x54f4, 0xffffffff, 0x00000000,
+ 0x54f0, 0xffffffff, 0x00000000,
+ 0x5498, 0xffffffff, 0x00000000,
+ 0x549c, 0xffffffff, 0x00000000,
+ 0x5494, 0xffffffff, 0x00000000,
+ 0x53cc, 0xffffffff, 0x00000000,
+ 0x53c8, 0xffffffff, 0x00000000,
+ 0x53c4, 0xffffffff, 0x00000000,
+ 0x53c0, 0xffffffff, 0x00000000,
+ 0x53bc, 0xffffffff, 0x00000000,
+ 0x53b8, 0xffffffff, 0x00000000,
+ 0x53b4, 0xffffffff, 0x00000000,
+ 0x53b0, 0xffffffff, 0x00000000
+};
+
+static const u32 cypress_mgcg_init[] =
+{
+ 0x802c, 0xffffffff, 0xc0000000,
+ 0x5448, 0xffffffff, 0x00000100,
+ 0x55e4, 0xffffffff, 0x00000100,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x5644, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x8d58, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x9654, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0x9040, 0xffffffff, 0x00000100,
+ 0xa200, 0xffffffff, 0x00000100,
+ 0xa204, 0xffffffff, 0x00000100,
+ 0xa208, 0xffffffff, 0x00000100,
+ 0xa20c, 0xffffffff, 0x00000100,
+ 0x971c, 0xffffffff, 0x00000100,
+ 0x977c, 0xffffffff, 0x00000100,
+ 0x3f80, 0xffffffff, 0x00000100,
+ 0xa210, 0xffffffff, 0x00000100,
+ 0xa214, 0xffffffff, 0x00000100,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x9784, 0xffffffff, 0x00000100,
+ 0x9698, 0xffffffff, 0x00000100,
+ 0x4d4, 0xffffffff, 0x00000200,
+ 0x30cc, 0xffffffff, 0x00000100,
+ 0xd0c0, 0xffffffff, 0xff000100,
+ 0x802c, 0xffffffff, 0x40000000,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9160, 0xffffffff, 0x00030002,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9180, 0xffffffff, 0x00050004,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x919c, 0xffffffff, 0x00050004,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x91b0, 0xffffffff, 0x00070000,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91b8, 0xffffffff, 0x00050004,
+ 0x91c4, 0xffffffff, 0x00010006,
+ 0x91c8, 0xffffffff, 0x00090008,
+ 0x91cc, 0xffffffff, 0x00070000,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91d4, 0xffffffff, 0x00050004,
+ 0x91e0, 0xffffffff, 0x00010006,
+ 0x91e4, 0xffffffff, 0x00090008,
+ 0x91e8, 0xffffffff, 0x00000000,
+ 0x91ec, 0xffffffff, 0x00070000,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x91f4, 0xffffffff, 0x00050004,
+ 0x9200, 0xffffffff, 0x00010006,
+ 0x9204, 0xffffffff, 0x00090008,
+ 0x9208, 0xffffffff, 0x00070000,
+ 0x920c, 0xffffffff, 0x00030002,
+ 0x9210, 0xffffffff, 0x00050004,
+ 0x921c, 0xffffffff, 0x00010006,
+ 0x9220, 0xffffffff, 0x00090008,
+ 0x9224, 0xffffffff, 0x00070000,
+ 0x9228, 0xffffffff, 0x00030002,
+ 0x922c, 0xffffffff, 0x00050004,
+ 0x9238, 0xffffffff, 0x00010006,
+ 0x923c, 0xffffffff, 0x00090008,
+ 0x9240, 0xffffffff, 0x00070000,
+ 0x9244, 0xffffffff, 0x00030002,
+ 0x9248, 0xffffffff, 0x00050004,
+ 0x9254, 0xffffffff, 0x00010006,
+ 0x9258, 0xffffffff, 0x00090008,
+ 0x925c, 0xffffffff, 0x00070000,
+ 0x9260, 0xffffffff, 0x00030002,
+ 0x9264, 0xffffffff, 0x00050004,
+ 0x9270, 0xffffffff, 0x00010006,
+ 0x9274, 0xffffffff, 0x00090008,
+ 0x9278, 0xffffffff, 0x00070000,
+ 0x927c, 0xffffffff, 0x00030002,
+ 0x9280, 0xffffffff, 0x00050004,
+ 0x928c, 0xffffffff, 0x00010006,
+ 0x9290, 0xffffffff, 0x00090008,
+ 0x9294, 0xffffffff, 0x00000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x802c, 0xffffffff, 0x40010000,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9160, 0xffffffff, 0x00030002,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9180, 0xffffffff, 0x00050004,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x919c, 0xffffffff, 0x00050004,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x91b0, 0xffffffff, 0x00070000,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91b8, 0xffffffff, 0x00050004,
+ 0x91c4, 0xffffffff, 0x00010006,
+ 0x91c8, 0xffffffff, 0x00090008,
+ 0x91cc, 0xffffffff, 0x00070000,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91d4, 0xffffffff, 0x00050004,
+ 0x91e0, 0xffffffff, 0x00010006,
+ 0x91e4, 0xffffffff, 0x00090008,
+ 0x91e8, 0xffffffff, 0x00000000,
+ 0x91ec, 0xffffffff, 0x00070000,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x91f4, 0xffffffff, 0x00050004,
+ 0x9200, 0xffffffff, 0x00010006,
+ 0x9204, 0xffffffff, 0x00090008,
+ 0x9208, 0xffffffff, 0x00070000,
+ 0x920c, 0xffffffff, 0x00030002,
+ 0x9210, 0xffffffff, 0x00050004,
+ 0x921c, 0xffffffff, 0x00010006,
+ 0x9220, 0xffffffff, 0x00090008,
+ 0x9224, 0xffffffff, 0x00070000,
+ 0x9228, 0xffffffff, 0x00030002,
+ 0x922c, 0xffffffff, 0x00050004,
+ 0x9238, 0xffffffff, 0x00010006,
+ 0x923c, 0xffffffff, 0x00090008,
+ 0x9240, 0xffffffff, 0x00070000,
+ 0x9244, 0xffffffff, 0x00030002,
+ 0x9248, 0xffffffff, 0x00050004,
+ 0x9254, 0xffffffff, 0x00010006,
+ 0x9258, 0xffffffff, 0x00090008,
+ 0x925c, 0xffffffff, 0x00070000,
+ 0x9260, 0xffffffff, 0x00030002,
+ 0x9264, 0xffffffff, 0x00050004,
+ 0x9270, 0xffffffff, 0x00010006,
+ 0x9274, 0xffffffff, 0x00090008,
+ 0x9278, 0xffffffff, 0x00070000,
+ 0x927c, 0xffffffff, 0x00030002,
+ 0x9280, 0xffffffff, 0x00050004,
+ 0x928c, 0xffffffff, 0x00010006,
+ 0x9290, 0xffffffff, 0x00090008,
+ 0x9294, 0xffffffff, 0x00000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 redwood_mgcg_init[] =
+{
+ 0x802c, 0xffffffff, 0xc0000000,
+ 0x5448, 0xffffffff, 0x00000100,
+ 0x55e4, 0xffffffff, 0x00000100,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x5644, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x8d58, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x9654, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0x9040, 0xffffffff, 0x00000100,
+ 0xa200, 0xffffffff, 0x00000100,
+ 0xa204, 0xffffffff, 0x00000100,
+ 0xa208, 0xffffffff, 0x00000100,
+ 0xa20c, 0xffffffff, 0x00000100,
+ 0x971c, 0xffffffff, 0x00000100,
+ 0x977c, 0xffffffff, 0x00000100,
+ 0x3f80, 0xffffffff, 0x00000100,
+ 0xa210, 0xffffffff, 0x00000100,
+ 0xa214, 0xffffffff, 0x00000100,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x9784, 0xffffffff, 0x00000100,
+ 0x9698, 0xffffffff, 0x00000100,
+ 0x4d4, 0xffffffff, 0x00000200,
+ 0x30cc, 0xffffffff, 0x00000100,
+ 0xd0c0, 0xffffffff, 0xff000100,
+ 0x802c, 0xffffffff, 0x40000000,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9160, 0xffffffff, 0x00030002,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9180, 0xffffffff, 0x00050004,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x919c, 0xffffffff, 0x00050004,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x91b0, 0xffffffff, 0x00070000,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91b8, 0xffffffff, 0x00050004,
+ 0x91c4, 0xffffffff, 0x00010006,
+ 0x91c8, 0xffffffff, 0x00090008,
+ 0x91cc, 0xffffffff, 0x00070000,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91d4, 0xffffffff, 0x00050004,
+ 0x91e0, 0xffffffff, 0x00010006,
+ 0x91e4, 0xffffffff, 0x00090008,
+ 0x91e8, 0xffffffff, 0x00000000,
+ 0x91ec, 0xffffffff, 0x00070000,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x91f4, 0xffffffff, 0x00050004,
+ 0x9200, 0xffffffff, 0x00010006,
+ 0x9204, 0xffffffff, 0x00090008,
+ 0x9294, 0xffffffff, 0x00000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 cedar_golden_registers[] =
+{
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x9b7c, 0xffffffff, 0x00000000,
+ 0x8a14, 0xffffffff, 0x00000007,
+ 0x8b10, 0xffffffff, 0x00000000,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x88d4, 0xffffffff, 0x00000000,
+ 0x8974, 0xffffffff, 0x00000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0xffffffff, 0x001000f0,
+ 0x6104, 0x01000300, 0x00000000,
+ 0x5bc0, 0x00300000, 0x00000000,
+ 0x7030, 0xffffffff, 0x00000011,
+ 0x7c30, 0xffffffff, 0x00000011,
+ 0x10830, 0xffffffff, 0x00000011,
+ 0x11430, 0xffffffff, 0x00000011,
+ 0xd02c, 0xffffffff, 0x08421000,
+ 0x240c, 0xffffffff, 0x00000380,
+ 0x8b24, 0xffffffff, 0x00ff0fff,
+ 0x28a4c, 0x06000000, 0x06000000,
+ 0x10c, 0x00000001, 0x00000001,
+ 0x8d00, 0xffffffff, 0x100e4848,
+ 0x8d04, 0xffffffff, 0x00164745,
+ 0x8c00, 0xffffffff, 0xe4000003,
+ 0x8c04, 0xffffffff, 0x40600060,
+ 0x8c08, 0xffffffff, 0x001c001c,
+ 0x8cf0, 0xffffffff, 0x08e00410,
+ 0x8c20, 0xffffffff, 0x00800080,
+ 0x8c24, 0xffffffff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0xffffffff, 0x00001010,
+ 0x28350, 0xffffffff, 0x00000000,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x5cc, 0xffffffff, 0x00000001,
+ 0x9508, 0xffffffff, 0x00000002
+};
+
+static const u32 cedar_mgcg_init[] =
+{
+ 0x802c, 0xffffffff, 0xc0000000,
+ 0x5448, 0xffffffff, 0x00000100,
+ 0x55e4, 0xffffffff, 0x00000100,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x5644, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x8d58, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x9654, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0x9040, 0xffffffff, 0x00000100,
+ 0xa200, 0xffffffff, 0x00000100,
+ 0xa204, 0xffffffff, 0x00000100,
+ 0xa208, 0xffffffff, 0x00000100,
+ 0xa20c, 0xffffffff, 0x00000100,
+ 0x971c, 0xffffffff, 0x00000100,
+ 0x977c, 0xffffffff, 0x00000100,
+ 0x3f80, 0xffffffff, 0x00000100,
+ 0xa210, 0xffffffff, 0x00000100,
+ 0xa214, 0xffffffff, 0x00000100,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x9784, 0xffffffff, 0x00000100,
+ 0x9698, 0xffffffff, 0x00000100,
+ 0x4d4, 0xffffffff, 0x00000200,
+ 0x30cc, 0xffffffff, 0x00000100,
+ 0xd0c0, 0xffffffff, 0xff000100,
+ 0x802c, 0xffffffff, 0x40000000,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9178, 0xffffffff, 0x00050000,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00010004,
+ 0x9190, 0xffffffff, 0x00070006,
+ 0x9194, 0xffffffff, 0x00050000,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x91a8, 0xffffffff, 0x00010004,
+ 0x91ac, 0xffffffff, 0x00070006,
+ 0x91e8, 0xffffffff, 0x00000000,
+ 0x9294, 0xffffffff, 0x00000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 juniper_mgcg_init[] =
+{
+ 0x802c, 0xffffffff, 0xc0000000,
+ 0x5448, 0xffffffff, 0x00000100,
+ 0x55e4, 0xffffffff, 0x00000100,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x5644, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x8d58, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x9654, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0x9040, 0xffffffff, 0x00000100,
+ 0xa200, 0xffffffff, 0x00000100,
+ 0xa204, 0xffffffff, 0x00000100,
+ 0xa208, 0xffffffff, 0x00000100,
+ 0xa20c, 0xffffffff, 0x00000100,
+ 0x971c, 0xffffffff, 0x00000100,
+ 0xd0c0, 0xffffffff, 0xff000100,
+ 0x802c, 0xffffffff, 0x40000000,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9160, 0xffffffff, 0x00030002,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9180, 0xffffffff, 0x00050004,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x919c, 0xffffffff, 0x00050004,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x91b0, 0xffffffff, 0x00070000,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91b8, 0xffffffff, 0x00050004,
+ 0x91c4, 0xffffffff, 0x00010006,
+ 0x91c8, 0xffffffff, 0x00090008,
+ 0x91cc, 0xffffffff, 0x00070000,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91d4, 0xffffffff, 0x00050004,
+ 0x91e0, 0xffffffff, 0x00010006,
+ 0x91e4, 0xffffffff, 0x00090008,
+ 0x91e8, 0xffffffff, 0x00000000,
+ 0x91ec, 0xffffffff, 0x00070000,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x91f4, 0xffffffff, 0x00050004,
+ 0x9200, 0xffffffff, 0x00010006,
+ 0x9204, 0xffffffff, 0x00090008,
+ 0x9208, 0xffffffff, 0x00070000,
+ 0x920c, 0xffffffff, 0x00030002,
+ 0x9210, 0xffffffff, 0x00050004,
+ 0x921c, 0xffffffff, 0x00010006,
+ 0x9220, 0xffffffff, 0x00090008,
+ 0x9224, 0xffffffff, 0x00070000,
+ 0x9228, 0xffffffff, 0x00030002,
+ 0x922c, 0xffffffff, 0x00050004,
+ 0x9238, 0xffffffff, 0x00010006,
+ 0x923c, 0xffffffff, 0x00090008,
+ 0x9240, 0xffffffff, 0x00070000,
+ 0x9244, 0xffffffff, 0x00030002,
+ 0x9248, 0xffffffff, 0x00050004,
+ 0x9254, 0xffffffff, 0x00010006,
+ 0x9258, 0xffffffff, 0x00090008,
+ 0x925c, 0xffffffff, 0x00070000,
+ 0x9260, 0xffffffff, 0x00030002,
+ 0x9264, 0xffffffff, 0x00050004,
+ 0x9270, 0xffffffff, 0x00010006,
+ 0x9274, 0xffffffff, 0x00090008,
+ 0x9278, 0xffffffff, 0x00070000,
+ 0x927c, 0xffffffff, 0x00030002,
+ 0x9280, 0xffffffff, 0x00050004,
+ 0x928c, 0xffffffff, 0x00010006,
+ 0x9290, 0xffffffff, 0x00090008,
+ 0x9294, 0xffffffff, 0x00000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x802c, 0xffffffff, 0xc0000000,
+ 0x977c, 0xffffffff, 0x00000100,
+ 0x3f80, 0xffffffff, 0x00000100,
+ 0xa210, 0xffffffff, 0x00000100,
+ 0xa214, 0xffffffff, 0x00000100,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x9784, 0xffffffff, 0x00000100,
+ 0x9698, 0xffffffff, 0x00000100,
+ 0x4d4, 0xffffffff, 0x00000200,
+ 0x30cc, 0xffffffff, 0x00000100,
+ 0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 supersumo_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5cc, 0xffffffff, 0x00000001,
+ 0x7030, 0xffffffff, 0x00000011,
+ 0x7c30, 0xffffffff, 0x00000011,
+ 0x6104, 0x01000300, 0x00000000,
+ 0x5bc0, 0x00300000, 0x00000000,
+ 0x8c04, 0xffffffff, 0x40600060,
+ 0x8c08, 0xffffffff, 0x001c001c,
+ 0x8c20, 0xffffffff, 0x00800080,
+ 0x8c24, 0xffffffff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0xffffffff, 0x00001010,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x91c4, 0xffffffff, 0x00010006,
+ 0x91e0, 0xffffffff, 0x00010006,
+ 0x9200, 0xffffffff, 0x00010006,
+ 0x9150, 0xffffffff, 0x6e944040,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9180, 0xffffffff, 0x00050004,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x919c, 0xffffffff, 0x00050004,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91b8, 0xffffffff, 0x00050004,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91d4, 0xffffffff, 0x00050004,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x91f4, 0xffffffff, 0x00050004,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9160, 0xffffffff, 0x00030002,
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x91b0, 0xffffffff, 0x00070000,
+ 0x91cc, 0xffffffff, 0x00070000,
+ 0x91ec, 0xffffffff, 0x00070000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x91c8, 0xffffffff, 0x00090008,
+ 0x91e4, 0xffffffff, 0x00090008,
+ 0x9204, 0xffffffff, 0x00090008,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x5644, 0xffffffff, 0x00000100,
+ 0x9b7c, 0xffffffff, 0x00000000,
+ 0x8030, 0xffffffff, 0x0000100a,
+ 0x8a14, 0xffffffff, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ff0fff,
+ 0x8b10, 0xffffffff, 0x00000000,
+ 0x28a4c, 0x06000000, 0x06000000,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x913c, 0xffff000f, 0x0100000a,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x88d4, 0xffffffff, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0x5e78, 0xffffffff, 0x001000f0,
+ 0xd02c, 0xffffffff, 0x08421000,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x8d00, 0xffffffff, 0x100e4848,
+ 0x8d04, 0xffffffff, 0x00164745,
+ 0x8c00, 0xffffffff, 0xe4000003,
+ 0x8cf0, 0x1fffffff, 0x08e00620,
+ 0x28350, 0xffffffff, 0x00000000,
+ 0x9508, 0xffffffff, 0x00000002
+};
+
+static const u32 sumo_golden_registers[] =
+{
+ 0x900c, 0x00ffffff, 0x0017071f,
+ 0x8c18, 0xffffffff, 0x10101060,
+ 0x8c1c, 0xffffffff, 0x00001010,
+ 0x8c30, 0x0000000f, 0x00000005,
+ 0x9688, 0x0000000f, 0x00000007
+};
+
+static const u32 wrestler_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5cc, 0xffffffff, 0x00000001,
+ 0x7030, 0xffffffff, 0x00000011,
+ 0x7c30, 0xffffffff, 0x00000011,
+ 0x6104, 0x01000300, 0x00000000,
+ 0x5bc0, 0x00300000, 0x00000000,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x9150, 0xffffffff, 0x6e944040,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x9b7c, 0xffffffff, 0x00000000,
+ 0x8030, 0xffffffff, 0x0000100a,
+ 0x8a14, 0xffffffff, 0x00000001,
+ 0x8b24, 0xffffffff, 0x00ff0fff,
+ 0x8b10, 0xffffffff, 0x00000000,
+ 0x28a4c, 0x06000000, 0x06000000,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x913c, 0xffff000f, 0x0100000a,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x88d4, 0xffffffff, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0x5e78, 0xffffffff, 0x001000f0,
+ 0xd02c, 0xffffffff, 0x08421000,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x8d00, 0xffffffff, 0x100e4848,
+ 0x8d04, 0xffffffff, 0x00164745,
+ 0x8c00, 0xffffffff, 0xe4000003,
+ 0x8cf0, 0x1fffffff, 0x08e00410,
+ 0x28350, 0xffffffff, 0x00000000,
+ 0x9508, 0xffffffff, 0x00000002,
+ 0x900c, 0xffffffff, 0x0017071f,
+ 0x8c18, 0xffffffff, 0x10101060,
+ 0x8c1c, 0xffffffff, 0x00001010
+};
+
+static const u32 barts_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0x8f311ff1, 0x001000f0,
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0xbd4, 0x70073777, 0x00010001,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x03773777, 0x02011003,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x98f8, 0x33773777, 0x02011003,
+ 0x98fc, 0xffffffff, 0x76543210,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2f48, 0x00000007, 0x02011003,
+ 0x6b28, 0x00000010, 0x00000012,
+ 0x7728, 0x00000010, 0x00000012,
+ 0x10328, 0x00000010, 0x00000012,
+ 0x10f28, 0x00000010, 0x00000012,
+ 0x11b28, 0x00000010, 0x00000012,
+ 0x12728, 0x00000010, 0x00000012,
+ 0x240c, 0x000007ff, 0x00000380,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x10c, 0x00000001, 0x00010003,
+ 0xa02c, 0xffffffff, 0x0000009b,
+ 0x913c, 0x0000000f, 0x0100000a,
+ 0x8d00, 0xffff7f7f, 0x100e4848,
+ 0x8d04, 0x00ffffff, 0x00164745,
+ 0x8c00, 0xfffc0003, 0xe4000003,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8c08, 0x00ff00ff, 0x001c001c,
+ 0x8cf0, 0x1fff1fff, 0x08e00620,
+ 0x8c20, 0x0fff0fff, 0x00800080,
+ 0x8c24, 0x0fff0fff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0x0000ffff, 0x00001010,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0x3700001f, 0x00000002,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0x001f3ae3, 0x000000c2,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 turks_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0x8f311ff1, 0x001000f0,
+ 0x8c8, 0x00003000, 0x00001070,
+ 0x8cc, 0x000fffff, 0x00040035,
+ 0x3f90, 0xffff0000, 0xfff00000,
+ 0x9148, 0xffff0000, 0xfff00000,
+ 0x3f94, 0xffff0000, 0xfff00000,
+ 0x914c, 0xffff0000, 0xfff00000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0xbd4, 0x00073007, 0x00010002,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x03773777, 0x02010002,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x98f8, 0x33773777, 0x00010002,
+ 0x98fc, 0xffffffff, 0x33221100,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2f48, 0x33773777, 0x00010002,
+ 0x6b28, 0x00000010, 0x00000012,
+ 0x7728, 0x00000010, 0x00000012,
+ 0x10328, 0x00000010, 0x00000012,
+ 0x10f28, 0x00000010, 0x00000012,
+ 0x11b28, 0x00000010, 0x00000012,
+ 0x12728, 0x00000010, 0x00000012,
+ 0x240c, 0x000007ff, 0x00000380,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x10c, 0x00000001, 0x00010003,
+ 0xa02c, 0xffffffff, 0x0000009b,
+ 0x913c, 0x0000000f, 0x0100000a,
+ 0x8d00, 0xffff7f7f, 0x100e4848,
+ 0x8d04, 0x00ffffff, 0x00164745,
+ 0x8c00, 0xfffc0003, 0xe4000003,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8c08, 0x00ff00ff, 0x001c001c,
+ 0x8cf0, 0x1fff1fff, 0x08e00410,
+ 0x8c20, 0x0fff0fff, 0x00800080,
+ 0x8c24, 0x0fff0fff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0x0000ffff, 0x00001010,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0x3700001f, 0x00000002,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0x001f3ae3, 0x000000c2,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 caicos_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0x8f311ff1, 0x001000f0,
+ 0x8c8, 0x00003420, 0x00001450,
+ 0x8cc, 0x000fffff, 0x00040035,
+ 0x3f90, 0xffff0000, 0xfffc0000,
+ 0x9148, 0xffff0000, 0xfffc0000,
+ 0x3f94, 0xffff0000, 0xfffc0000,
+ 0x914c, 0xffff0000, 0xfffc0000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0xbd4, 0x00073007, 0x00010001,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x03773777, 0x02010001,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x98f8, 0x33773777, 0x02010001,
+ 0x98fc, 0xffffffff, 0x33221100,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2f48, 0x33773777, 0x02010001,
+ 0x6b28, 0x00000010, 0x00000012,
+ 0x7728, 0x00000010, 0x00000012,
+ 0x10328, 0x00000010, 0x00000012,
+ 0x10f28, 0x00000010, 0x00000012,
+ 0x11b28, 0x00000010, 0x00000012,
+ 0x12728, 0x00000010, 0x00000012,
+ 0x240c, 0x000007ff, 0x00000380,
+ 0x8a14, 0xf000001f, 0x00000001,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x10c, 0x00000001, 0x00010003,
+ 0xa02c, 0xffffffff, 0x0000009b,
+ 0x913c, 0x0000000f, 0x0100000a,
+ 0x8d00, 0xffff7f7f, 0x100e4848,
+ 0x8d04, 0x00ffffff, 0x00164745,
+ 0x8c00, 0xfffc0003, 0xe4000003,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8c08, 0x00ff00ff, 0x001c001c,
+ 0x8cf0, 0x1fff1fff, 0x08e00410,
+ 0x8c20, 0x0fff0fff, 0x00800080,
+ 0x8c24, 0x0fff0fff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0x0000ffff, 0x00001010,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0x3700001f, 0x00000002,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0x001f3ae3, 0x000000c2,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static void evergreen_init_golden_registers(struct radeon_device *rdev)
+{
+ switch (rdev->family) {
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers));
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers2,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+ radeon_program_register_sequence(rdev,
+ cypress_mgcg_init,
+ (const u32)ARRAY_SIZE(cypress_mgcg_init));
+ break;
+ case CHIP_JUNIPER:
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers));
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers2,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+ radeon_program_register_sequence(rdev,
+ juniper_mgcg_init,
+ (const u32)ARRAY_SIZE(juniper_mgcg_init));
+ break;
+ case CHIP_REDWOOD:
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers));
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers2,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+ radeon_program_register_sequence(rdev,
+ redwood_mgcg_init,
+ (const u32)ARRAY_SIZE(redwood_mgcg_init));
+ break;
+ case CHIP_CEDAR:
+ radeon_program_register_sequence(rdev,
+ cedar_golden_registers,
+ (const u32)ARRAY_SIZE(cedar_golden_registers));
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers2,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+ radeon_program_register_sequence(rdev,
+ cedar_mgcg_init,
+ (const u32)ARRAY_SIZE(cedar_mgcg_init));
+ break;
+ case CHIP_PALM:
+ radeon_program_register_sequence(rdev,
+ wrestler_golden_registers,
+ (const u32)ARRAY_SIZE(wrestler_golden_registers));
+ break;
+ case CHIP_SUMO:
+ radeon_program_register_sequence(rdev,
+ supersumo_golden_registers,
+ (const u32)ARRAY_SIZE(supersumo_golden_registers));
+ break;
+ case CHIP_SUMO2:
+ radeon_program_register_sequence(rdev,
+ supersumo_golden_registers,
+ (const u32)ARRAY_SIZE(supersumo_golden_registers));
+ radeon_program_register_sequence(rdev,
+ sumo_golden_registers,
+ (const u32)ARRAY_SIZE(sumo_golden_registers));
+ break;
+ case CHIP_BARTS:
+ radeon_program_register_sequence(rdev,
+ barts_golden_registers,
+ (const u32)ARRAY_SIZE(barts_golden_registers));
+ break;
+ case CHIP_TURKS:
+ radeon_program_register_sequence(rdev,
+ turks_golden_registers,
+ (const u32)ARRAY_SIZE(turks_golden_registers));
+ break;
+ case CHIP_CAICOS:
+ radeon_program_register_sequence(rdev,
+ caicos_golden_registers,
+ (const u32)ARRAY_SIZE(caicos_golden_registers));
+ break;
+ default:
+ break;
+ }
+}
+
+void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
+ unsigned *bankh, unsigned *mtaspect,
+ unsigned *tile_split)
+{
+ *bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
+ *bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
+ *mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
+ *tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
+ switch (*bankw) {
+ default:
+ case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
+ case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
+ case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
+ case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
+ }
+ switch (*bankh) {
+ default:
+ case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
+ case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
+ case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
+ case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
+ }
+ switch (*mtaspect) {
+ default:
+ case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
+ case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
+ case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
+ case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
+ }
+}
+
+static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
+ u32 cntl_reg, u32 status_reg)
+{
+ int r, i;
+ struct atom_clock_dividers dividers;
+
+ r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ clock, false, &dividers);
+ if (r)
+ return r;
+
+ WREG32_P(cntl_reg, dividers.post_div, ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK));
+
+ for (i = 0; i < 100; i++) {
+ if (RREG32(status_reg) & DCLK_STATUS)
+ break;
+ mdelay(10);
+ }
+ if (i == 100)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+ int r = 0;
+ u32 cg_scratch = RREG32(CG_SCRATCH1);
+
+ r = sumo_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
+ if (r)
+ goto done;
+ cg_scratch &= 0xffff0000;
+ cg_scratch |= vclk / 100; /* Mhz */
+
+ r = sumo_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
+ if (r)
+ goto done;
+ cg_scratch &= 0x0000ffff;
+ cg_scratch |= (dclk / 100) << 16; /* Mhz */
+
+done:
+ WREG32(CG_SCRATCH1, cg_scratch);
+
+ return r;
+}
+
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+ /* start off with something large */
+ unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+ int r;
+
+ /* bypass vclk and dclk with bclk */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ /* put PLL in bypass mode */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+
+ if (!vclk || !dclk) {
+ /* keep the Bypass mode, put PLL to sleep */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ return 0;
+ }
+
+ r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
+ 16384, 0x03FFFFFF, 0, 128, 5,
+ &fb_div, &vclk_div, &dclk_div);
+ if (r)
+ return r;
+
+ /* set VCO_MODE to 1 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
+
+ /* toggle UPLL_SLEEP to 1 then back to 0 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
+
+ /* deassert UPLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+ mdelay(1);
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* assert UPLL_RESET again */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+ /* disable spread spectrum. */
+ WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
+
+ /* set feedback divider */
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
+
+ /* set ref divider to 0 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
+
+ if (fb_div < 307200)
+ WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
+ else
+ WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
+
+ /* set PDIV_A and PDIV_B */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
+ ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
+
+ /* give the PLL some time to settle */
+ mdelay(15);
+
+ /* deassert PLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+ mdelay(15);
+
+ /* switch from bypass mode to normal mode */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* switch VCLK and DCLK selection */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ mdelay(100);
+
+ return 0;
+}
+
+void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
+{
+ u16 ctl, v;
+ int err;
+
+ err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl);
+ if (err)
+ return;
+
+ v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
+
+ /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
+ * to avoid hangs or perfomance issues
+ */
+ if ((v == 0) || (v == 6) || (v == 7)) {
+ ctl &= ~PCI_EXP_DEVCTL_READRQ;
+ ctl |= (2 << 12);
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl);
+ }
+}
+
+static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+ if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+ return true;
+ else
+ return false;
+}
+
+static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+ u32 pos1, pos2;
+
+ pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+ if (pos1 != pos2)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * dce4_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
+{
+ unsigned i = 0;
+
+ if (crtc >= rdev->num_crtc)
+ return;
+
+ if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+ return;
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (dce4_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!dce4_is_counter_moving(rdev, crtc))
+ break;
+ }
+ }
+
+ while (!dce4_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!dce4_is_counter_moving(rdev, crtc))
+ break;
+ }
+ }
+}
+
+/**
+ * radeon_irq_kms_pflip_irq_get - pre-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to prepare for pageflip on
+ *
+ * Pre-pageflip callback (evergreen+).
+ * Enables the pageflip irq (vblank irq).
+ */
+void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* enable the pflip int */
+ radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+/**
+ * evergreen_post_page_flip - pos-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to cleanup pageflip on
+ *
+ * Post-pageflip callback (evergreen+).
+ * Disables the pageflip irq (vblank irq).
+ */
+void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* disable the pflip int */
+ radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+/**
+ * evergreen_page_flip - pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (evergreen+).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
+u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
+
+ /* Lock the graphics update lock */
+ tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* update the scanout addresses */
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(crtc_base));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(crtc_base));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ /* Wait for update_pending to go high. */
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* Return current update_pending status: */
+ return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
+}
+
+/* get temperature in millidegrees */
+int evergreen_get_temp(struct radeon_device *rdev)
+{
+ u32 temp, toffset;
+ int actual_temp = 0;
+
+ if (rdev->family == CHIP_JUNIPER) {
+ toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
+ TOFFSET_SHIFT;
+ temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
+ TS0_ADC_DOUT_SHIFT;
+
+ if (toffset & 0x100)
+ actual_temp = temp / 2 - (0x200 - toffset);
+ else
+ actual_temp = temp / 2 + toffset;
+
+ actual_temp = actual_temp * 1000;
+
+ } else {
+ temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
+ ASIC_T_SHIFT;
+
+ if (temp & 0x400)
+ actual_temp = -256;
+ else if (temp & 0x200)
+ actual_temp = 255;
+ else if (temp & 0x100) {
+ actual_temp = temp & 0x1ff;
+ actual_temp |= ~0x1ff;
+ } else
+ actual_temp = temp & 0xff;
+
+ actual_temp = (actual_temp * 1000) / 2;
+ }
+
+ return actual_temp;
+}
+
+int sumo_get_temp(struct radeon_device *rdev)
+{
+ u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
+ int actual_temp = temp - 49;
+
+ return actual_temp * 1000;
+}
+
+/**
+ * sumo_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (sumo, trinity, SI).
+ * Used for profile mode only.
+ */
+void sumo_pm_init_profile(struct radeon_device *rdev)
+{
+ int idx;
+
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+
+ /* low,mid sh/mh */
+ if (rdev->flags & RADEON_IS_MOBILITY)
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ else
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+
+ /* high sh/mh */
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
+ rdev->pm.power_state[idx].num_clock_modes - 1;
+
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
+ rdev->pm.power_state[idx].num_clock_modes - 1;
+}
+
+/**
+ * btc_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (BTC, cayman).
+ * Used for profile mode only.
+ */
+void btc_pm_init_profile(struct radeon_device *rdev)
+{
+ int idx;
+
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+ /* starting with BTC, there is one state that is used for both
+ * MH and SH. Difference is that we always use the high clock index for
+ * mclk.
+ */
+ if (rdev->flags & RADEON_IS_MOBILITY)
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ else
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+}
+
+/**
+ * evergreen_pm_misc - set additional pm hw parameters callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set non-clock parameters associated with a power state
+ * (voltage, etc.) (evergreen+).
+ */
+void evergreen_pm_misc(struct radeon_device *rdev)
+{
+ int req_ps_idx = rdev->pm.requested_power_state_index;
+ int req_cm_idx = rdev->pm.requested_clock_mode_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+ if (voltage->type == VOLTAGE_SW) {
+ /* 0xff01 is a flag rather then an actual voltage */
+ if (voltage->voltage == 0xff01)
+ return;
+ if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
+ radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
+ rdev->pm.current_vddc = voltage->voltage;
+ DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
+ }
+
+ /* starting with BTC, there is one state that is used for both
+ * MH and SH. Difference is that we always use the high clock index for
+ * mclk and vddci.
+ */
+ if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
+ (rdev->family >= CHIP_BARTS) &&
+ rdev->pm.active_crtc_count &&
+ ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
+ (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
+ voltage = &rdev->pm.power_state[req_ps_idx].
+ clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
+
+ /* 0xff01 is a flag rather then an actual voltage */
+ if (voltage->vddci == 0xff01)
+ return;
+ if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
+ radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
+ rdev->pm.current_vddci = voltage->vddci;
+ DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
+ }
+ }
+}
+
+/**
+ * evergreen_pm_prepare - pre-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Prepare for a power state change (evergreen+).
+ */
+void evergreen_pm_prepare(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* disable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
+/**
+ * evergreen_pm_finish - post-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clean up after a power state change (evergreen+).
+ */
+void evergreen_pm_finish(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* enable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
+/**
+ * evergreen_hpd_sense - hpd sense callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (evergreen+).
+ * Returns true if connected, false if not connected.
+ */
+bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+ bool connected = false;
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_3:
+ if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_4:
+ if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_5:
+ if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_6:
+ if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+
+ return connected;
+}
+
+/**
+ * evergreen_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (evergreen+).
+ */
+void evergreen_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = evergreen_hpd_sense(rdev, hpd);
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_3:
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_4:
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_5:
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_6:
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * evergreen_hpd_init - hpd setup callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+void evergreen_hpd_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+ unsigned enabled = 0;
+ u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
+ DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ continue;
+ }
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, tmp);
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, tmp);
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, tmp);
+ break;
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, tmp);
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+ radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+ enabled |= 1 << radeon_connector->hpd.hpd;
+ }
+ radeon_irq_kms_enable_hpd(rdev, enabled);
+}
+
+/**
+ * evergreen_hpd_fini - hpd tear down callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+void evergreen_hpd_fini(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+ unsigned disabled = 0;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, 0);
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, 0);
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, 0);
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, 0);
+ break;
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, 0);
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, 0);
+ break;
+ default:
+ break;
+ }
+ disabled |= 1 << radeon_connector->hpd.hpd;
+ }
+ radeon_irq_kms_disable_hpd(rdev, disabled);
+}
+
+/* watermark setup */
+
+static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
+ struct radeon_crtc *radeon_crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *other_mode)
+{
+ u32 tmp, buffer_alloc, i;
+ u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
+ /*
+ * Line Buffer Setup
+ * There are 3 line buffers, each one shared by 2 display controllers.
+ * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+ * the display controllers. The paritioning is done via one of four
+ * preset allocations specified in bits 2:0:
+ * first display controller
+ * 0 - first half of lb (3840 * 2)
+ * 1 - first 3/4 of lb (5760 * 2)
+ * 2 - whole lb (7680 * 2), other crtc must be disabled
+ * 3 - first 1/4 of lb (1920 * 2)
+ * second display controller
+ * 4 - second half of lb (3840 * 2)
+ * 5 - second 3/4 of lb (5760 * 2)
+ * 6 - whole lb (7680 * 2), other crtc must be disabled
+ * 7 - last 1/4 of lb (1920 * 2)
+ */
+ /* this can get tricky if we have two large displays on a paired group
+ * of crtcs. Ideally for multiple large displays we'd assign them to
+ * non-linked crtcs for maximum line buffer allocation.
+ */
+ if (radeon_crtc->base.enabled && mode) {
+ if (other_mode) {
+ tmp = 0; /* 1/2 */
+ buffer_alloc = 1;
+ } else {
+ tmp = 2; /* whole */
+ buffer_alloc = 2;
+ }
+ } else {
+ tmp = 0;
+ buffer_alloc = 0;
+ }
+
+ /* second controller of the pair uses second half of the lb */
+ if (radeon_crtc->crtc_id % 2)
+ tmp += 4;
+ WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
+
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+ WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ DMIF_BUFFERS_ALLOCATED_COMPLETED)
+ break;
+ udelay(1);
+ }
+ }
+
+ if (radeon_crtc->base.enabled && mode) {
+ switch (tmp) {
+ case 0:
+ case 4:
+ default:
+ if (ASIC_IS_DCE5(rdev))
+ return 4096 * 2;
+ else
+ return 3840 * 2;
+ case 1:
+ case 5:
+ if (ASIC_IS_DCE5(rdev))
+ return 6144 * 2;
+ else
+ return 5760 * 2;
+ case 2:
+ case 6:
+ if (ASIC_IS_DCE5(rdev))
+ return 8192 * 2;
+ else
+ return 7680 * 2;
+ case 3:
+ case 7:
+ if (ASIC_IS_DCE5(rdev))
+ return 2048 * 2;
+ else
+ return 1920 * 2;
+ }
+ }
+
+ /* controller not enabled, so no lb used */
+ return 0;
+}
+
+u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ return 1;
+ case 1:
+ return 2;
+ case 2:
+ return 4;
+ case 3:
+ return 8;
+ }
+}
+
+struct evergreen_wm_params {
+ u32 dram_channels; /* number of dram channels */
+ u32 yclk; /* bandwidth per dram data pin in kHz */
+ u32 sclk; /* engine clock in kHz */
+ u32 disp_clk; /* display clock in kHz */
+ u32 src_width; /* viewport width */
+ u32 active_time; /* active display time in ns */
+ u32 blank_time; /* blank time in ns */
+ bool interlaced; /* mode is interlaced */
+ fixed20_12 vsc; /* vertical scale ratio */
+ u32 num_heads; /* number of active crtcs */
+ u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+ u32 lb_size; /* line buffer allocated to pipe */
+ u32 vtaps; /* vertical scaler taps */
+};
+
+static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
+{
+ /* Calculate DRAM Bandwidth and the part allocated to display. */
+ fixed20_12 dram_efficiency; /* 0.7 */
+ fixed20_12 yclk, dram_channels, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ yclk.full = dfixed_const(wm->yclk);
+ yclk.full = dfixed_div(yclk, a);
+ dram_channels.full = dfixed_const(wm->dram_channels * 4);
+ a.full = dfixed_const(10);
+ dram_efficiency.full = dfixed_const(7);
+ dram_efficiency.full = dfixed_div(dram_efficiency, a);
+ bandwidth.full = dfixed_mul(dram_channels, yclk);
+ bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
+{
+ /* Calculate DRAM Bandwidth and the part allocated to display. */
+ fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+ fixed20_12 yclk, dram_channels, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ yclk.full = dfixed_const(wm->yclk);
+ yclk.full = dfixed_div(yclk, a);
+ dram_channels.full = dfixed_const(wm->dram_channels * 4);
+ a.full = dfixed_const(10);
+ disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+ disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+ bandwidth.full = dfixed_mul(dram_channels, yclk);
+ bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
+{
+ /* Calculate the display Data return Bandwidth */
+ fixed20_12 return_efficiency; /* 0.8 */
+ fixed20_12 sclk, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ sclk.full = dfixed_const(wm->sclk);
+ sclk.full = dfixed_div(sclk, a);
+ a.full = dfixed_const(10);
+ return_efficiency.full = dfixed_const(8);
+ return_efficiency.full = dfixed_div(return_efficiency, a);
+ a.full = dfixed_const(32);
+ bandwidth.full = dfixed_mul(a, sclk);
+ bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
+{
+ /* Calculate the DMIF Request Bandwidth */
+ fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+ fixed20_12 disp_clk, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ disp_clk.full = dfixed_const(wm->disp_clk);
+ disp_clk.full = dfixed_div(disp_clk, a);
+ a.full = dfixed_const(10);
+ disp_clk_request_efficiency.full = dfixed_const(8);
+ disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+ a.full = dfixed_const(32);
+ bandwidth.full = dfixed_mul(a, disp_clk);
+ bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
+{
+ /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+ u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
+ u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
+ u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
+
+ return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
+{
+ /* Calculate the display mode Average Bandwidth
+ * DisplayMode should contain the source and destination dimensions,
+ * timing, etc.
+ */
+ fixed20_12 bpp;
+ fixed20_12 line_time;
+ fixed20_12 src_width;
+ fixed20_12 bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+ line_time.full = dfixed_div(line_time, a);
+ bpp.full = dfixed_const(wm->bytes_per_pixel);
+ src_width.full = dfixed_const(wm->src_width);
+ bandwidth.full = dfixed_mul(src_width, bpp);
+ bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+ bandwidth.full = dfixed_div(bandwidth, line_time);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
+{
+ /* First calcualte the latency in ns */
+ u32 mc_latency = 2000; /* 2000 ns. */
+ u32 available_bandwidth = evergreen_available_bandwidth(wm);
+ u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+ u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+ u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+ u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+ (wm->num_heads * cursor_line_pair_return_time);
+ u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+ u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+ fixed20_12 a, b, c;
+
+ if (wm->num_heads == 0)
+ return 0;
+
+ a.full = dfixed_const(2);
+ b.full = dfixed_const(1);
+ if ((wm->vsc.full > a.full) ||
+ ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+ (wm->vtaps >= 5) ||
+ ((wm->vsc.full >= a.full) && wm->interlaced))
+ max_src_lines_per_dst_line = 4;
+ else
+ max_src_lines_per_dst_line = 2;
+
+ a.full = dfixed_const(available_bandwidth);
+ b.full = dfixed_const(wm->num_heads);
+ a.full = dfixed_div(a, b);
+
+ b.full = dfixed_const(1000);
+ c.full = dfixed_const(wm->disp_clk);
+ b.full = dfixed_div(c, b);
+ c.full = dfixed_const(wm->bytes_per_pixel);
+ b.full = dfixed_mul(b, c);
+
+ lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
+
+ a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+ b.full = dfixed_const(1000);
+ c.full = dfixed_const(lb_fill_bw);
+ b.full = dfixed_div(c, b);
+ a.full = dfixed_div(a, b);
+ line_fill_time = dfixed_trunc(a);
+
+ if (line_fill_time < wm->active_time)
+ return latency;
+ else
+ return latency + (line_fill_time - wm->active_time);
+
+}
+
+static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
+{
+ if (evergreen_average_bandwidth(wm) <=
+ (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
+ return true;
+ else
+ return false;
+};
+
+static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
+{
+ if (evergreen_average_bandwidth(wm) <=
+ (evergreen_available_bandwidth(wm) / wm->num_heads))
+ return true;
+ else
+ return false;
+};
+
+static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
+{
+ u32 lb_partitions = wm->lb_size / wm->src_width;
+ u32 line_time = wm->active_time + wm->blank_time;
+ u32 latency_tolerant_lines;
+ u32 latency_hiding;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1);
+ if (wm->vsc.full > a.full)
+ latency_tolerant_lines = 1;
+ else {
+ if (lb_partitions <= (wm->vtaps + 1))
+ latency_tolerant_lines = 1;
+ else
+ latency_tolerant_lines = 2;
+ }
+
+ latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+ if (evergreen_latency_watermark(wm) <= latency_hiding)
+ return true;
+ else
+ return false;
+}
+
+static void evergreen_program_watermarks(struct radeon_device *rdev,
+ struct radeon_crtc *radeon_crtc,
+ u32 lb_size, u32 num_heads)
+{
+ struct drm_display_mode *mode = &radeon_crtc->base.mode;
+ struct evergreen_wm_params wm;
+ u32 pixel_period;
+ u32 line_time = 0;
+ u32 latency_watermark_a = 0, latency_watermark_b = 0;
+ u32 priority_a_mark = 0, priority_b_mark = 0;
+ u32 priority_a_cnt = PRIORITY_OFF;
+ u32 priority_b_cnt = PRIORITY_OFF;
+ u32 pipe_offset = radeon_crtc->crtc_id * 16;
+ u32 tmp, arb_control3;
+ fixed20_12 a, b, c;
+
+ if (radeon_crtc->base.enabled && num_heads && mode) {
+ pixel_period = 1000000 / (u32)mode->clock;
+ line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ priority_a_cnt = 0;
+ priority_b_cnt = 0;
+
+ wm.yclk = rdev->pm.current_mclk * 10;
+ wm.sclk = rdev->pm.current_sclk * 10;
+ wm.disp_clk = mode->clock;
+ wm.src_width = mode->crtc_hdisplay;
+ wm.active_time = mode->crtc_hdisplay * pixel_period;
+ wm.blank_time = line_time - wm.active_time;
+ wm.interlaced = false;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ wm.interlaced = true;
+ wm.vsc = radeon_crtc->vsc;
+ wm.vtaps = 1;
+ if (radeon_crtc->rmx_type != RMX_OFF)
+ wm.vtaps = 2;
+ wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm.lb_size = lb_size;
+ wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
+ wm.num_heads = num_heads;
+
+ /* set for high clocks */
+ latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
+ /* set for low clocks */
+ /* wm.yclk = low clk; wm.sclk = low clk */
+ latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
+
+ /* possibly force display priority to high */
+ /* should really do this at mode validation time... */
+ if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
+ !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
+ !evergreen_check_latency_hiding(&wm) ||
+ (rdev->disp_priority == 2)) {
+ DRM_DEBUG_KMS("force priority to high\n");
+ priority_a_cnt |= PRIORITY_ALWAYS_ON;
+ priority_b_cnt |= PRIORITY_ALWAYS_ON;
+ }
+
+ a.full = dfixed_const(1000);
+ b.full = dfixed_const(mode->clock);
+ b.full = dfixed_div(b, a);
+ c.full = dfixed_const(latency_watermark_a);
+ c.full = dfixed_mul(c, b);
+ c.full = dfixed_mul(c, radeon_crtc->hsc);
+ c.full = dfixed_div(c, a);
+ a.full = dfixed_const(16);
+ c.full = dfixed_div(c, a);
+ priority_a_mark = dfixed_trunc(c);
+ priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
+
+ a.full = dfixed_const(1000);
+ b.full = dfixed_const(mode->clock);
+ b.full = dfixed_div(b, a);
+ c.full = dfixed_const(latency_watermark_b);
+ c.full = dfixed_mul(c, b);
+ c.full = dfixed_mul(c, radeon_crtc->hsc);
+ c.full = dfixed_div(c, a);
+ a.full = dfixed_const(16);
+ c.full = dfixed_div(c, a);
+ priority_b_mark = dfixed_trunc(c);
+ priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+ }
+
+ /* select wm A */
+ arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
+ tmp = arb_control3;
+ tmp &= ~LATENCY_WATERMARK_MASK(3);
+ tmp |= LATENCY_WATERMARK_MASK(1);
+ WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
+ WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
+ (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+ LATENCY_HIGH_WATERMARK(line_time)));
+ /* select wm B */
+ tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
+ tmp &= ~LATENCY_WATERMARK_MASK(3);
+ tmp |= LATENCY_WATERMARK_MASK(2);
+ WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
+ WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
+ (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+ LATENCY_HIGH_WATERMARK(line_time)));
+ /* restore original selection */
+ WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
+
+ /* write the priority marks */
+ WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
+ WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
+
+}
+
+/**
+ * evergreen_bandwidth_update - update display watermarks callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Update the display watermarks based on the requested mode(s)
+ * (evergreen+).
+ */
+void evergreen_bandwidth_update(struct radeon_device *rdev)
+{
+ struct drm_display_mode *mode0 = NULL;
+ struct drm_display_mode *mode1 = NULL;
+ u32 num_heads = 0, lb_size;
+ int i;
+
+ radeon_update_display_priority(rdev);
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->mode_info.crtcs[i]->base.enabled)
+ num_heads++;
+ }
+ for (i = 0; i < rdev->num_crtc; i += 2) {
+ mode0 = &rdev->mode_info.crtcs[i]->base.mode;
+ mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
+ lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
+ evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
+ lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
+ evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
+ }
+}
+
+/**
+ * evergreen_mc_wait_for_idle - wait for MC idle callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Wait for the MC (memory controller) to be idle.
+ * (evergreen+).
+ * Returns 0 if the MC is idle, -1 if not.
+ */
+int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ u32 tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(SRBM_STATUS) & 0x1F00;
+ if (!tmp)
+ return 0;
+ udelay(1);
+ }
+ return -1;
+}
+
+/*
+ * GART
+ */
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+ unsigned i;
+ u32 tmp;
+
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+ WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
+ tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
+ if (tmp == 2) {
+ printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
+ return;
+ }
+ if (tmp) {
+ return;
+ }
+ udelay(1);
+ }
+}
+
+static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int r;
+
+ if (rdev->gart.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
+ return r;
+ radeon_gart_restore(rdev);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ if (rdev->flags & RADEON_IS_IGP) {
+ WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
+ } else {
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ if ((rdev->family == CHIP_JUNIPER) ||
+ (rdev->family == CHIP_CYPRESS) ||
+ (rdev->family == CHIP_HEMLOCK) ||
+ (rdev->family == CHIP_BARTS))
+ WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
+ }
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+ WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(rdev->dummy_page.addr >> 12));
+ WREG32(VM_CONTEXT1_CNTL, 0);
+
+ evergreen_pcie_gart_tlb_flush(rdev);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)rdev->gart.table_addr);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ /* Disable all tables */
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
+
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ radeon_gart_table_vram_unpin(rdev);
+}
+
+static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
+{
+ evergreen_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
+
+static void evergreen_agp_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
+}
+
+void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+{
+ u32 crtc_enabled, tmp, frame_count, blackout;
+ int i, j;
+
+ if (!ASIC_IS_NODCE(rdev)) {
+ save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+
+ /* disable VGA render */
+ WREG32(VGA_RENDER_CONTROL, 0);
+ }
+ /* blank the display controllers */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
+ if (crtc_enabled) {
+ save->crtc_enabled[i] = true;
+ if (ASIC_IS_DCE6(rdev)) {
+ tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+ if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
+ radeon_wait_for_vblank(rdev, i);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ }
+ } else {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
+ radeon_wait_for_vblank(rdev, i);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ }
+ /* wait for the next frame */
+ frame_count = radeon_get_vblank_counter(rdev, i);
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ if (radeon_get_vblank_counter(rdev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+
+ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ save->crtc_enabled[i] = false;
+ /* ***** */
+ } else {
+ save->crtc_enabled[i] = false;
+ }
+ }
+
+ radeon_mc_wait_for_idle(rdev);
+
+ blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+ if ((blackout & BLACKOUT_MODE_MASK) != 1) {
+ /* Block CPU access */
+ WREG32(BIF_FB_EN, 0);
+ /* blackout the MC */
+ blackout &= ~BLACKOUT_MODE_MASK;
+ WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+ }
+ /* wait for the MC to settle */
+ udelay(100);
+
+ /* lock double buffered regs */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
+ tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (!(tmp & 1)) {
+ tmp |= 1;
+ WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ }
+ }
+}
+
+void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
+{
+ u32 tmp, frame_count;
+ int i, j;
+
+ /* update crtc base addresses */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+ }
+
+ if (!ASIC_IS_NODCE(rdev)) {
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+ }
+
+ /* unlock regs and wait for update */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
+ if ((tmp & 0x3) != 0) {
+ tmp &= ~0x3;
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
+ tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (tmp & 1) {
+ tmp &= ~1;
+ WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+ break;
+ udelay(1);
+ }
+ }
+ }
+
+ /* unblackout the MC */
+ tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
+ tmp &= ~BLACKOUT_MODE_MASK;
+ WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
+ /* allow CPU access */
+ WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ if (ASIC_IS_DCE6(rdev)) {
+ tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ } else {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ /* wait for the next frame */
+ frame_count = radeon_get_vblank_counter(rdev, i);
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ if (radeon_get_vblank_counter(rdev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ if (!ASIC_IS_NODCE(rdev)) {
+ /* Unlock vga access */
+ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+ WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+ }
+}
+
+void evergreen_mc_program(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ u32 tmp;
+ int i, j;
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+ WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ /* Lockout access through VGA aperture*/
+ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+ /* Update configuration */
+ if (rdev->flags & RADEON_IS_AGP) {
+ if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+ /* VRAM before AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.gtt_end >> 12);
+ } else {
+ /* VRAM after AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.gtt_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.vram_end >> 12);
+ }
+ } else {
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.vram_end >> 12);
+ }
+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+ /* llano/ontario only */
+ if ((rdev->family == CHIP_PALM) ||
+ (rdev->family == CHIP_SUMO) ||
+ (rdev->family == CHIP_SUMO2)) {
+ tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
+ tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
+ tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
+ WREG32(MC_FUS_VM_FB_OFFSET, tmp);
+ }
+ tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+ tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+ WREG32(MC_VM_FB_LOCATION, tmp);
+ WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+ WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+ WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+ if (rdev->flags & RADEON_IS_AGP) {
+ WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
+ WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
+ WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
+ } else {
+ WREG32(MC_VM_AGP_BASE, 0);
+ WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+ WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+ }
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ evergreen_mc_resume(rdev, &save);
+ /* we need to own VRAM, so turn off the VGA renderer here
+ * to stop it overwriting our objects */
+ rv515_vga_render_disable(rdev);
+}
+
+/*
+ * CP.
+ */
+void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ u32 next_rptr;
+
+ /* set to DX10/11 mode */
+ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+ radeon_ring_write(ring, 1);
+
+ if (ring->rptr_save_reg) {
+ next_rptr = ring->wptr + 3 + 4;
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((ring->rptr_save_reg -
+ PACKET3_SET_CONFIG_REG_START) >> 2));
+ radeon_ring_write(ring, next_rptr);
+ } else if (rdev->wb.enabled) {
+ next_rptr = ring->wptr + 5 + 4;
+ radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
+ radeon_ring_write(ring, next_rptr);
+ radeon_ring_write(ring, 0);
+ }
+
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+ radeon_ring_write(ring, ib->length_dw);
+}
+
+
+static int evergreen_cp_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ int i;
+
+ if (!rdev->me_fw || !rdev->pfp_fw)
+ return -EINVAL;
+
+ r700_cp_stop(rdev);
+ WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+ BUF_SWAP_32BIT |
+#endif
+ RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(CP_ME_RAM_RADDR, 0);
+ return 0;
+}
+
+static int evergreen_cp_start(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ int r, i;
+ uint32_t cp_me;
+
+ r = radeon_ring_lock(rdev, ring, 7);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(ring, 0x1);
+ radeon_ring_write(ring, 0x0);
+ radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
+ radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_unlock_commit(rdev, ring);
+
+ cp_me = 0xff;
+ WREG32(CP_ME_CNTL, cp_me);
+
+ r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+
+ /* setup clear context state */
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ for (i = 0; i < evergreen_default_size; i++)
+ radeon_ring_write(ring, evergreen_default_state[i]);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ /* set clear context state */
+ radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(ring, 0);
+
+ /* SQ_VTX_BASE_VTX_LOC */
+ radeon_ring_write(ring, 0xc0026f00);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
+
+ /* Clear consts */
+ radeon_ring_write(ring, 0xc0036f00);
+ radeon_ring_write(ring, 0x00000bc4);
+ radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(ring, 0xffffffff);
+
+ radeon_ring_write(ring, 0xc0026900);
+ radeon_ring_write(ring, 0x00000316);
+ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ radeon_ring_write(ring, 0x00000010); /* */
+
+ radeon_ring_unlock_commit(rdev, ring);
+
+ return 0;
+}
+
+static int evergreen_cp_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ u32 tmp;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+ SOFT_RESET_PA |
+ SOFT_RESET_SH |
+ SOFT_RESET_VGT |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SX));
+ RREG32(GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(GRBM_SOFT_RESET, 0);
+ RREG32(GRBM_SOFT_RESET);
+
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(ring->ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB_CNTL, tmp);
+ WREG32(CP_SEM_WAIT_TIMER, 0x0);
+ WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+ /* Set the write pointer delay */
+ WREG32(CP_RB_WPTR_DELAY, 0);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(CP_RB_RPTR_WR, 0);
+ ring->wptr = 0;
+ WREG32(CP_RB_WPTR, ring->wptr);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(CP_RB_RPTR_ADDR,
+ ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
+ WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+ WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+ if (rdev->wb.enabled)
+ WREG32(SCRATCH_UMSK, 0xff);
+ else {
+ tmp |= RB_NO_UPDATE;
+ WREG32(SCRATCH_UMSK, 0);
+ }
+
+ mdelay(1);
+ WREG32(CP_RB_CNTL, tmp);
+
+ WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
+ WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+
+ ring->rptr = RREG32(CP_RB_RPTR);
+
+ evergreen_cp_start(rdev);
+ ring->ready = true;
+ r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+ return 0;
+}
+
+/*
+ * Core functions
+ */
+static void evergreen_gpu_init(struct radeon_device *rdev)
+{
+ u32 gb_addr_config;
+ u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 sx_debug_1;
+ u32 smx_dc_ctl0;
+ u32 sq_config;
+ u32 sq_lds_resource_mgmt;
+ u32 sq_gpr_resource_mgmt_1;
+ u32 sq_gpr_resource_mgmt_2;
+ u32 sq_gpr_resource_mgmt_3;
+ u32 sq_thread_resource_mgmt;
+ u32 sq_thread_resource_mgmt_2;
+ u32 sq_stack_resource_mgmt_1;
+ u32 sq_stack_resource_mgmt_2;
+ u32 sq_stack_resource_mgmt_3;
+ u32 vgt_cache_invalidation;
+ u32 hdp_host_path_cntl, tmp;
+ u32 disabled_rb_mask;
+ int i, j, num_shader_engines, ps_thread_count;
+
+ switch (rdev->family) {
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ rdev->config.evergreen.num_ses = 2;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 8;
+ rdev->config.evergreen.max_simds = 10;
+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_JUNIPER:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 10;
+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_REDWOOD:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 5;
+ rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_CEDAR:
+ default:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 2;
+ rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 192;
+ rdev->config.evergreen.max_gs_threads = 16;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 128;
+ rdev->config.evergreen.sx_max_export_pos_size = 32;
+ rdev->config.evergreen.sx_max_export_smx_size = 96;
+ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 1;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_PALM:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 2;
+ rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 192;
+ rdev->config.evergreen.max_gs_threads = 16;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 128;
+ rdev->config.evergreen.sx_max_export_pos_size = 32;
+ rdev->config.evergreen.sx_max_export_smx_size = 96;
+ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 1;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_SUMO:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ if (rdev->pdev->device == 0x9648)
+ rdev->config.evergreen.max_simds = 3;
+ else if ((rdev->pdev->device == 0x9647) ||
+ (rdev->pdev->device == 0x964a))
+ rdev->config.evergreen.max_simds = 4;
+ else
+ rdev->config.evergreen.max_simds = 5;
+ rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_SUMO2:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_BARTS:
+ rdev->config.evergreen.num_ses = 2;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 8;
+ rdev->config.evergreen.max_simds = 7;
+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_TURKS:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 6;
+ rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_CAICOS:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 2;
+ rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 192;
+ rdev->config.evergreen.max_gs_threads = 16;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 128;
+ rdev->config.evergreen.sx_max_export_pos_size = 32;
+ rdev->config.evergreen.sx_max_export_smx_size = 96;
+ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 1;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ }
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+
+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+ evergreen_fix_pci_max_read_req_size(rdev);
+
+ mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+ if ((rdev->family == CHIP_PALM) ||
+ (rdev->family == CHIP_SUMO) ||
+ (rdev->family == CHIP_SUMO2))
+ mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
+ else
+ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+ /* setup tiling info dword. gb_addr_config is not adequate since it does
+ * not have bank info, so create a custom tiling dword.
+ * bits 3:0 num_pipes
+ * bits 7:4 num_banks
+ * bits 11:8 group_size
+ * bits 15:12 row_size
+ */
+ rdev->config.evergreen.tile_config = 0;
+ switch (rdev->config.evergreen.max_tile_pipes) {
+ case 1:
+ default:
+ rdev->config.evergreen.tile_config |= (0 << 0);
+ break;
+ case 2:
+ rdev->config.evergreen.tile_config |= (1 << 0);
+ break;
+ case 4:
+ rdev->config.evergreen.tile_config |= (2 << 0);
+ break;
+ case 8:
+ rdev->config.evergreen.tile_config |= (3 << 0);
+ break;
+ }
+ /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
+ if (rdev->flags & RADEON_IS_IGP)
+ rdev->config.evergreen.tile_config |= 1 << 4;
+ else {
+ switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+ case 0: /* four banks */
+ rdev->config.evergreen.tile_config |= 0 << 4;
+ break;
+ case 1: /* eight banks */
+ rdev->config.evergreen.tile_config |= 1 << 4;
+ break;
+ case 2: /* sixteen banks */
+ default:
+ rdev->config.evergreen.tile_config |= 2 << 4;
+ break;
+ }
+ }
+ rdev->config.evergreen.tile_config |= 0 << 8;
+ rdev->config.evergreen.tile_config |=
+ ((gb_addr_config & 0x30000000) >> 28) << 12;
+
+ num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
+
+ if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
+ u32 efuse_straps_4;
+ u32 efuse_straps_3;
+
+ WREG32(RCU_IND_INDEX, 0x204);
+ efuse_straps_4 = RREG32(RCU_IND_DATA);
+ WREG32(RCU_IND_INDEX, 0x203);
+ efuse_straps_3 = RREG32(RCU_IND_DATA);
+ tmp = (((efuse_straps_4 & 0xf) << 4) |
+ ((efuse_straps_3 & 0xf0000000) >> 28));
+ } else {
+ tmp = 0;
+ for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
+ u32 rb_disable_bitmap;
+
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+ tmp <<= 4;
+ tmp |= rb_disable_bitmap;
+ }
+ }
+ /* enabled rb are just the one not disabled :) */
+ disabled_rb_mask = tmp;
+ tmp = 0;
+ for (i = 0; i < rdev->config.evergreen.max_backends; i++)
+ tmp |= (1 << i);
+ /* if all the backends are disabled, fix it up here */
+ if ((disabled_rb_mask & tmp) == tmp) {
+ for (i = 0; i < rdev->config.evergreen.max_backends; i++)
+ disabled_rb_mask &= ~(1 << i);
+ }
+
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+
+ WREG32(GB_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+
+ if ((rdev->config.evergreen.max_backends == 1) &&
+ (rdev->flags & RADEON_IS_IGP)) {
+ if ((disabled_rb_mask & 3) == 1) {
+ /* RB0 disabled, RB1 enabled */
+ tmp = 0x11111111;
+ } else {
+ /* RB1 disabled, RB0 enabled */
+ tmp = 0x00000000;
+ }
+ } else {
+ tmp = gb_addr_config & NUM_PIPES_MASK;
+ tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
+ EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+ }
+ WREG32(GB_BACKEND_MAP, tmp);
+
+ WREG32(CGTS_SYS_TCC_DISABLE, 0);
+ WREG32(CGTS_TCC_DISABLE, 0);
+ WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+ WREG32(CGTS_USER_TCC_DISABLE, 0);
+
+ /* set HW defaults for 3D engine */
+ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+ ROQ_IB2_START(0x2b)));
+
+ WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
+
+ WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
+ SYNC_GRADIENT |
+ SYNC_WALKER |
+ SYNC_ALIGNER));
+
+ sx_debug_1 = RREG32(SX_DEBUG_1);
+ sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+ WREG32(SX_DEBUG_1, sx_debug_1);
+
+
+ smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+ smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+ smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+ WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+ if (rdev->family <= CHIP_SUMO2)
+ WREG32(SMX_SAR_CTL0, 0x00010000);
+
+ WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
+ POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
+ SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+
+ WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
+ SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
+ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
+
+ WREG32(VGT_NUM_INSTANCES, 1);
+ WREG32(SPI_CONFIG_CNTL, 0);
+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+ WREG32(CP_PERFMON_CNTL, 0);
+
+ WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
+ FETCH_FIFO_HIWATER(0x4) |
+ DONE_FIFO_HIWATER(0xe0) |
+ ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+ sq_config = RREG32(SQ_CONFIG);
+ sq_config &= ~(PS_PRIO(3) |
+ VS_PRIO(3) |
+ GS_PRIO(3) |
+ ES_PRIO(3));
+ sq_config |= (VC_ENABLE |
+ EXPORT_SRC_C |
+ PS_PRIO(0) |
+ VS_PRIO(1) |
+ GS_PRIO(2) |
+ ES_PRIO(3));
+
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ case CHIP_PALM:
+ case CHIP_SUMO:
+ case CHIP_SUMO2:
+ case CHIP_CAICOS:
+ /* no vertex cache */
+ sq_config &= ~VC_ENABLE;
+ break;
+ default:
+ break;
+ }
+
+ sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
+
+ sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
+ sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
+ sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
+ sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+ sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+ sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+ sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ case CHIP_PALM:
+ case CHIP_SUMO:
+ case CHIP_SUMO2:
+ ps_thread_count = 96;
+ break;
+ default:
+ ps_thread_count = 128;
+ break;
+ }
+
+ sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
+ sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+ sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+ sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+ sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+ sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+
+ sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+
+ WREG32(SQ_CONFIG, sq_config);
+ WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
+ WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
+ WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
+ WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+ WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
+ WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+ WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+ WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
+ WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
+ WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
+
+ WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+ FORCE_EOV_MAX_REZ_CNT(255)));
+
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ case CHIP_PALM:
+ case CHIP_SUMO:
+ case CHIP_SUMO2:
+ case CHIP_CAICOS:
+ vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
+ break;
+ default:
+ vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
+ break;
+ }
+ vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
+ WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
+
+ WREG32(VGT_GS_VERTEX_REUSE, 16);
+ WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+ WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
+ WREG32(VGT_OUT_DEALLOC_CNTL, 16);
+
+ WREG32(CB_PERF_CTR0_SEL_0, 0);
+ WREG32(CB_PERF_CTR0_SEL_1, 0);
+ WREG32(CB_PERF_CTR1_SEL_0, 0);
+ WREG32(CB_PERF_CTR1_SEL_1, 0);
+ WREG32(CB_PERF_CTR2_SEL_0, 0);
+ WREG32(CB_PERF_CTR2_SEL_1, 0);
+ WREG32(CB_PERF_CTR3_SEL_0, 0);
+ WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+ /* clear render buffer base addresses */
+ WREG32(CB_COLOR0_BASE, 0);
+ WREG32(CB_COLOR1_BASE, 0);
+ WREG32(CB_COLOR2_BASE, 0);
+ WREG32(CB_COLOR3_BASE, 0);
+ WREG32(CB_COLOR4_BASE, 0);
+ WREG32(CB_COLOR5_BASE, 0);
+ WREG32(CB_COLOR6_BASE, 0);
+ WREG32(CB_COLOR7_BASE, 0);
+ WREG32(CB_COLOR8_BASE, 0);
+ WREG32(CB_COLOR9_BASE, 0);
+ WREG32(CB_COLOR10_BASE, 0);
+ WREG32(CB_COLOR11_BASE, 0);
+
+ /* set the shader const cache sizes to 0 */
+ for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
+ WREG32(i, 0);
+ for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
+ WREG32(i, 0);
+
+ tmp = RREG32(HDP_MISC_CNTL);
+ tmp |= HDP_FLUSH_INVALIDATE_CACHE;
+ WREG32(HDP_MISC_CNTL, tmp);
+
+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+ WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+ udelay(50);
+
+}
+
+int evergreen_mc_init(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int chansize, numchan;
+
+ /* Get VRAM informations */
+ rdev->mc.vram_is_ddr = true;
+ if ((rdev->family == CHIP_PALM) ||
+ (rdev->family == CHIP_SUMO) ||
+ (rdev->family == CHIP_SUMO2))
+ tmp = RREG32(FUS_MC_ARB_RAMCFG);
+ else
+ tmp = RREG32(MC_ARB_RAMCFG);
+ if (tmp & CHANSIZE_OVERRIDE) {
+ chansize = 16;
+ } else if (tmp & CHANSIZE_MASK) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ }
+ rdev->mc.vram_width = numchan * chansize;
+ /* Could aper size report 0 ? */
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+ /* Setup GPU memory space */
+ if ((rdev->family == CHIP_PALM) ||
+ (rdev->family == CHIP_SUMO) ||
+ (rdev->family == CHIP_SUMO2)) {
+ /* size in bytes on fusion */
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+ } else {
+ /* size in MB on evergreen/cayman/tn */
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ }
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ r700_vram_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
+
+ return 0;
+}
+
+void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
+{
+ dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
+ RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " SRBM_STATUS2 = 0x%08X\n",
+ RREG32(SRBM_STATUS2));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+ if (rdev->family >= CHIP_CAYMAN) {
+ dev_info(rdev->dev, " R_00D834_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG + 0x800));
+ }
+}
+
+bool evergreen_is_display_hung(struct radeon_device *rdev)
+{
+ u32 crtc_hung = 0;
+ u32 crtc_status[6];
+ u32 i, j, tmp;
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
+ crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ crtc_hung |= (1 << i);
+ }
+ }
+
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (crtc_hung & (1 << i)) {
+ tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ if (tmp != crtc_status[i])
+ crtc_hung &= ~(1 << i);
+ }
+ }
+ if (crtc_hung == 0)
+ return false;
+ udelay(100);
+ }
+
+ return true;
+}
+
+static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
+{
+ u32 reset_mask = 0;
+ u32 tmp;
+
+ /* GRBM_STATUS */
+ tmp = RREG32(GRBM_STATUS);
+ if (tmp & (PA_BUSY | SC_BUSY |
+ SH_BUSY | SX_BUSY |
+ TA_BUSY | VGT_BUSY |
+ DB_BUSY | CB_BUSY |
+ SPI_BUSY | VGT_BUSY_NO_DMA))
+ reset_mask |= RADEON_RESET_GFX;
+
+ if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+ CP_BUSY | CP_COHERENCY_BUSY))
+ reset_mask |= RADEON_RESET_CP;
+
+ if (tmp & GRBM_EE_BUSY)
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
+
+ /* DMA_STATUS_REG */
+ tmp = RREG32(DMA_STATUS_REG);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
+
+ /* SRBM_STATUS2 */
+ tmp = RREG32(SRBM_STATUS2);
+ if (tmp & DMA_BUSY)
+ reset_mask |= RADEON_RESET_DMA;
+
+ /* SRBM_STATUS */
+ tmp = RREG32(SRBM_STATUS);
+ if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+ reset_mask |= RADEON_RESET_RLC;
+
+ if (tmp & IH_BUSY)
+ reset_mask |= RADEON_RESET_IH;
+
+ if (tmp & SEM_BUSY)
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (tmp & GRBM_RQ_PENDING)
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (tmp & VMC_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+ MCC_BUSY | MCD_BUSY))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (evergreen_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+
+ /* VM_L2_STATUS */
+ tmp = RREG32(VM_L2_STATUS);
+ if (tmp & L2_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ /* Skip MC reset as it's mostly likely not hung, just busy */
+ if (reset_mask & RADEON_RESET_MC) {
+ DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+ reset_mask &= ~RADEON_RESET_MC;
+ }
+
+ return reset_mask;
+}
+
+static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct evergreen_mc_save save;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
+
+ if (reset_mask == 0)
+ return;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ evergreen_print_gpu_status_regs(rdev);
+
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+ }
+
+ udelay(50);
+
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+ grbm_soft_reset |= SOFT_RESET_DB |
+ SOFT_RESET_CB |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SX |
+ SOFT_RESET_SH |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VC |
+ SOFT_RESET_VGT;
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= SOFT_RESET_CP |
+ SOFT_RESET_VGT;
+
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+ }
+
+ if (reset_mask & RADEON_RESET_DMA)
+ srbm_soft_reset |= SOFT_RESET_DMA;
+
+ if (reset_mask & RADEON_RESET_DISPLAY)
+ srbm_soft_reset |= SOFT_RESET_DC;
+
+ if (reset_mask & RADEON_RESET_RLC)
+ srbm_soft_reset |= SOFT_RESET_RLC;
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= SOFT_RESET_SEM;
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= SOFT_RESET_IH;
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= SOFT_RESET_VMC;
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= SOFT_RESET_MC;
+ }
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
+ evergreen_mc_resume(rdev, &save);
+ udelay(50);
+
+ evergreen_print_gpu_status_regs(rdev);
+}
+
+int evergreen_asic_reset(struct radeon_device *rdev)
+{
+ u32 reset_mask;
+
+ reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ evergreen_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
+ return 0;
+}
+
+/**
+ * evergreen_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * evergreen_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & RADEON_RESET_DMA)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/* Interrupts */
+
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+ if (crtc >= rdev->num_crtc)
+ return 0;
+ else
+ return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (rdev->family >= CHIP_CAYMAN) {
+ cayman_cp_int_cntl_setup(rdev, 0,
+ CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ cayman_cp_int_cntl_setup(rdev, 1, 0);
+ cayman_cp_int_cntl_setup(rdev, 2, 0);
+ tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+ WREG32(CAYMAN_DMA1_CNTL, tmp);
+ } else
+ WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL, tmp);
+ WREG32(GRBM_INT_CNTL, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ if (rdev->num_crtc >= 4) {
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
+
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ if (rdev->num_crtc >= 4) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
+
+ /* only one DAC on DCE6 */
+ if (!ASIC_IS_DCE6(rdev))
+ WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+
+ tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+
+}
+
+int evergreen_irq_set(struct radeon_device *rdev)
+{
+ u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
+ u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
+ u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+ u32 grbm_int_cntl = 0;
+ u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+ u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
+ u32 dma_cntl, dma_cntl1 = 0;
+
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+ return -EINVAL;
+ }
+ /* don't enable anything if the ih is disabled */
+ if (!rdev->ih.enabled) {
+ r600_disable_interrupts(rdev);
+ /* force the active interrupt state to all disabled */
+ evergreen_disable_interrupt_state(rdev);
+ return 0;
+ }
+
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+
+ afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+
+ dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+
+ if (rdev->family >= CHIP_CAYMAN) {
+ /* enable CP interrupts on all rings */
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+ DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+ cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+ }
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
+ DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
+ cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
+ }
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
+ DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
+ cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
+ }
+ } else {
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+ DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+ cp_int_cntl |= RB_INT_ENABLE;
+ cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+ }
+ }
+
+ if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+ DRM_DEBUG("r600_irq_set: sw int dma\n");
+ dma_cntl |= TRAP_ENABLE;
+ }
+
+ if (rdev->family >= CHIP_CAYMAN) {
+ dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+ DRM_DEBUG("r600_irq_set: sw int dma1\n");
+ dma_cntl1 |= TRAP_ENABLE;
+ }
+ }
+
+ if (rdev->irq.crtc_vblank_int[0] ||
+ atomic_read(&rdev->irq.pflip[0])) {
+ DRM_DEBUG("evergreen_irq_set: vblank 0\n");
+ crtc1 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[1] ||
+ atomic_read(&rdev->irq.pflip[1])) {
+ DRM_DEBUG("evergreen_irq_set: vblank 1\n");
+ crtc2 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[2] ||
+ atomic_read(&rdev->irq.pflip[2])) {
+ DRM_DEBUG("evergreen_irq_set: vblank 2\n");
+ crtc3 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[3] ||
+ atomic_read(&rdev->irq.pflip[3])) {
+ DRM_DEBUG("evergreen_irq_set: vblank 3\n");
+ crtc4 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[4] ||
+ atomic_read(&rdev->irq.pflip[4])) {
+ DRM_DEBUG("evergreen_irq_set: vblank 4\n");
+ crtc5 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[5] ||
+ atomic_read(&rdev->irq.pflip[5])) {
+ DRM_DEBUG("evergreen_irq_set: vblank 5\n");
+ crtc6 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.hpd[0]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 1\n");
+ hpd1 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[1]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 2\n");
+ hpd2 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[2]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 3\n");
+ hpd3 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[3]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 4\n");
+ hpd4 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[4]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 5\n");
+ hpd5 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[5]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 6\n");
+ hpd6 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.afmt[0]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
+ afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[1]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 1\n");
+ afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[2]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 2\n");
+ afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[3]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 3\n");
+ afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[4]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 4\n");
+ afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[5]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
+ afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+
+ if (rdev->family >= CHIP_CAYMAN) {
+ cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
+ cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
+ cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
+ } else
+ WREG32(CP_INT_CNTL, cp_int_cntl);
+
+ WREG32(DMA_CNTL, dma_cntl);
+
+ if (rdev->family >= CHIP_CAYMAN)
+ WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
+
+ WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+ if (rdev->num_crtc >= 4) {
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+ }
+
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+ if (rdev->num_crtc >= 4) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+ }
+
+ WREG32(DC_HPD1_INT_CONTROL, hpd1);
+ WREG32(DC_HPD2_INT_CONTROL, hpd2);
+ WREG32(DC_HPD3_INT_CONTROL, hpd3);
+ WREG32(DC_HPD4_INT_CONTROL, hpd4);
+ WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ WREG32(DC_HPD6_INT_CONTROL, hpd6);
+
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
+
+ return 0;
+}
+
+static void evergreen_irq_ack(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+ rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ if (rdev->num_crtc >= 4) {
+ rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ }
+ if (rdev->num_crtc >= 6) {
+ rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ }
+
+ rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+
+ if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
+
+ if (rdev->num_crtc >= 4) {
+ if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+ }
+
+ if (rdev->num_crtc >= 6) {
+ if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+ }
+
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp);
+ }
+}
+
+static void evergreen_irq_disable(struct radeon_device *rdev)
+{
+ r600_disable_interrupts(rdev);
+ /* Wait and acknowledge irq */
+ mdelay(1);
+ evergreen_irq_ack(rdev);
+ evergreen_disable_interrupt_state(rdev);
+}
+
+void evergreen_irq_suspend(struct radeon_device *rdev)
+{
+ evergreen_irq_disable(rdev);
+ r600_rlc_stop(rdev);
+}
+
+static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
+{
+ u32 wptr, tmp;
+
+ if (rdev->wb.enabled)
+ wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
+ else
+ wptr = RREG32(IH_RB_WPTR);
+
+ if (wptr & RB_OVERFLOW) {
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+ tmp = RREG32(IH_RB_CNTL);
+ tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ WREG32(IH_RB_CNTL, tmp);
+ }
+ return (wptr & rdev->ih.ptr_mask);
+}
+
+int evergreen_irq_process(struct radeon_device *rdev)
+{
+ u32 wptr;
+ u32 rptr;
+ u32 src_id, src_data;
+ u32 ring_index;
+ bool queue_hotplug = false;
+ bool queue_hdmi = false;
+
+ if (!rdev->ih.enabled || rdev->shutdown)
+ return IRQ_NONE;
+
+ wptr = evergreen_get_ih_wptr(rdev);
+
+restart_ih:
+ /* is somebody else already processing irqs? */
+ if (atomic_xchg(&rdev->ih.lock, 1))
+ return IRQ_NONE;
+
+ rptr = rdev->ih.rptr;
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+ /* Order reading of wptr vs. reading of IH ring data */
+ rmb();
+
+ /* display interrupts */
+ evergreen_irq_ack(rdev);
+
+ while (rptr != wptr) {
+ /* wptr/rptr are in bytes! */
+ ring_index = rptr / 4;
+ src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+ src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
+
+ switch (src_id) {
+ case 1: /* D1 vblank/vline */
+ switch (src_data) {
+ case 0: /* D1 vblank */
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_flip(rdev, 0);
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+ }
+ break;
+ case 1: /* D1 vline */
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 2: /* D2 vblank/vline */
+ switch (src_data) {
+ case 0: /* D2 vblank */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_flip(rdev, 1);
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+ }
+ break;
+ case 1: /* D2 vline */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 3: /* D3 vblank/vline */
+ switch (src_data) {
+ case 0: /* D3 vblank */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[2]))
+ radeon_crtc_handle_flip(rdev, 2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+ }
+ break;
+ case 1: /* D3 vline */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 4: /* D4 vblank/vline */
+ switch (src_data) {
+ case 0: /* D4 vblank */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[3]))
+ radeon_crtc_handle_flip(rdev, 3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+ }
+ break;
+ case 1: /* D4 vline */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 5: /* D5 vblank/vline */
+ switch (src_data) {
+ case 0: /* D5 vblank */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[4]))
+ radeon_crtc_handle_flip(rdev, 4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+ }
+ break;
+ case 1: /* D5 vline */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 6: /* D6 vblank/vline */
+ switch (src_data) {
+ case 0: /* D6 vblank */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[5]))
+ radeon_crtc_handle_flip(rdev, 5);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+ }
+ break;
+ case 1: /* D6 vline */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 42: /* HPD hotplug */
+ switch (src_data) {
+ case 0:
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+ }
+ break;
+ case 1:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+ }
+ break;
+ case 2:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+ }
+ break;
+ case 3:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+ }
+ break;
+ case 4:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+ }
+ break;
+ case 5:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 44: /* hdmi */
+ switch (src_data) {
+ case 0:
+ if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
+ }
+ break;
+ case 1:
+ if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
+ }
+ break;
+ case 2:
+ if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI2\n");
+ }
+ break;
+ case 3:
+ if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI3\n");
+ }
+ break;
+ case 4:
+ if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI4\n");
+ }
+ break;
+ case 5:
+ if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI5\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+ break;
+ case 146:
+ case 147:
+ dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ break;
+ case 176: /* CP_INT in ring buffer */
+ case 177: /* CP_INT in IB1 */
+ case 178: /* CP_INT in IB2 */
+ DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ break;
+ case 181: /* CP EOP event */
+ DRM_DEBUG("IH: CP EOP\n");
+ if (rdev->family >= CHIP_CAYMAN) {
+ switch (src_data) {
+ case 0:
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ break;
+ case 1:
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+ break;
+ case 2:
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+ break;
+ }
+ } else
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ break;
+ case 224: /* DMA trap event */
+ DRM_DEBUG("IH: DMA trap\n");
+ radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+ break;
+ case 233: /* GUI IDLE */
+ DRM_DEBUG("IH: GUI idle\n");
+ break;
+ case 244: /* DMA trap event */
+ if (rdev->family >= CHIP_CAYMAN) {
+ DRM_DEBUG("IH: DMA1 trap\n");
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+
+ /* wptr/rptr are in bytes! */
+ rptr += 16;
+ rptr &= rdev->ih.ptr_mask;
+ }
+ if (queue_hotplug)
+ schedule_work(&rdev->hotplug_work);
+ if (queue_hdmi)
+ schedule_work(&rdev->audio_work);
+ rdev->ih.rptr = rptr;
+ WREG32(IH_RB_RPTR, rdev->ih.rptr);
+ atomic_set(&rdev->ih.lock, 0);
+
+ /* make sure wptr hasn't changed while processing */
+ wptr = evergreen_get_ih_wptr(rdev);
+ if (wptr != rptr)
+ goto restart_ih;
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (evergreen-SI).
+ */
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, fence->seq);
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
+ /* flush HDP */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+}
+
+/**
+ * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (evergreen).
+ */
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * evergreen_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (evergreen-cayman).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int evergreen_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFFF)
+ cur_size_in_dw = 0xFFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
+static int evergreen_startup(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ int r;
+
+ /* enable pcie gen2 link */
+ evergreen_pcie_gen2_enable(rdev);
+
+ evergreen_mc_program(rdev);
+
+ if (ASIC_IS_DCE5(rdev)) {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ r = ni_mc_load_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load MC firmware!\n");
+ return r;
+ }
+ } else {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ }
+
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
+ if (rdev->flags & RADEON_IS_AGP) {
+ evergreen_agp_enable(rdev);
+ } else {
+ r = evergreen_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ }
+ evergreen_gpu_init(rdev);
+
+ r = evergreen_blit_init(rdev);
+ if (r) {
+ r600_blit_fini(rdev);
+ rdev->asic->copy.copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
+ r = rv770_uvd_resume(rdev);
+ if (!r) {
+ r = radeon_fence_driver_start_ring(rdev,
+ R600_RING_TYPE_UVD_INDEX);
+ if (r)
+ dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ }
+
+ if (r)
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
+ /* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
+ return r;
+ }
+ evergreen_irq_set(rdev);
+
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (r)
+ return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
+ if (r)
+ return r;
+
+ r = evergreen_cp_load_microcode(rdev);
+ if (r)
+ return r;
+ r = evergreen_cp_resume(rdev);
+ if (r)
+ return r;
+ r = r600_dma_resume(rdev);
+ if (r)
+ return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ if (ring->ring_size) {
+ r = radeon_ring_init(rdev, ring, ring->ring_size,
+ R600_WB_UVD_RPTR_OFFSET,
+ UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (!r)
+ r = r600_uvd_init(rdev);
+
+ if (r)
+ DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
+ }
+
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ return r;
+ }
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: audio init failed\n");
+ return r;
+ }
+
+ return 0;
+}
+
+int evergreen_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* reset the asic, the gfx blocks are often in a bad state
+ * after the driver is unloaded or after a resume
+ */
+ if (radeon_asic_reset(rdev))
+ dev_warn(rdev->dev, "GPU reset failed !\n");
+ /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+ * posting will perform necessary task to bring back GPU into good
+ * shape.
+ */
+ /* post card */
+ atom_asic_init(rdev->mode_info.atom_context);
+
+ /* init golden registers */
+ evergreen_init_golden_registers(rdev);
+
+ rdev->accel_working = true;
+ r = evergreen_startup(rdev);
+ if (r) {
+ DRM_ERROR("evergreen startup failed on resume\n");
+ rdev->accel_working = false;
+ return r;
+ }
+
+ return r;
+
+}
+
+int evergreen_suspend(struct radeon_device *rdev)
+{
+ r600_audio_fini(rdev);
+ r600_uvd_stop(rdev);
+ radeon_uvd_suspend(rdev);
+ r700_cp_stop(rdev);
+ r600_dma_stop(rdev);
+ evergreen_irq_suspend(rdev);
+ radeon_wb_disable(rdev);
+ evergreen_pcie_gart_disable(rdev);
+
+ return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int evergreen_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Read BIOS */
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ /* Must be an ATOMBIOS */
+ if (!rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
+ return -EINVAL;
+ }
+ r = radeon_atombios_init(rdev);
+ if (r)
+ return r;
+ /* reset the asic, the gfx blocks are often in a bad state
+ * after the driver is unloaded or after a resume
+ */
+ if (radeon_asic_reset(rdev))
+ dev_warn(rdev->dev, "GPU reset failed !\n");
+ /* Post card if necessary */
+ if (!radeon_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
+ DRM_INFO("GPU not posted. posting now...\n");
+ atom_asic_init(rdev->mode_info.atom_context);
+ }
+ /* init golden registers */
+ evergreen_init_golden_registers(rdev);
+ /* Initialize scratch registers */
+ r600_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r)
+ radeon_agp_disable(rdev);
+ }
+ /* initialize memory controller */
+ r = evergreen_mc_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
+ r = radeon_uvd_init(rdev);
+ if (!r) {
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
+ 4096);
+ }
+
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
+
+ r = r600_pcie_gart_init(rdev);
+ if (r)
+ return r;
+
+ rdev->accel_working = true;
+ r = evergreen_startup(rdev);
+ if (r) {
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ evergreen_pcie_gart_fini(rdev);
+ rdev->accel_working = false;
+ }
+
+ /* Don't start up if the MC ucode is missing on BTC parts.
+ * The default clocks and voltages before the MC ucode
+ * is loaded are not suffient for advanced operations.
+ */
+ if (ASIC_IS_DCE5(rdev)) {
+ if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+ DRM_ERROR("radeon: MC ucode required for NI+.\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+void evergreen_fini(struct radeon_device *rdev)
+{
+ r600_audio_fini(rdev);
+ r600_blit_fini(rdev);
+ r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ evergreen_pcie_gart_fini(rdev);
+ r600_uvd_stop(rdev);
+ radeon_uvd_fini(rdev);
+ r600_vram_scratch_fini(rdev);
+ radeon_gem_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_agp_fini(rdev);
+ radeon_bo_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
+void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
+{
+ u32 link_width_cntl, speed_cntl;
+
+ if (radeon_pcie_gen2 == 0)
+ return;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return;
+
+ if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+ (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
+ return;
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ if (speed_cntl & LC_CURRENT_DATA_RATE) {
+ DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+ return;
+ }
+
+ DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
+ if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
+ (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_GEN2_EN_STRAP;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ } else {
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+ if (1)
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ else
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
new file mode 100644
index 0000000..057c87b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -0,0 +1,729 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#include "evergreend.h"
+#include "evergreen_blit_shaders.h"
+#include "cayman_blit_shaders.h"
+#include "radeon_blit_common.h"
+
+/* emits 17 */
+static void
+set_render_target(struct radeon_device *rdev, int format,
+ int w, int h, u64 gpu_addr)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ u32 cb_color_info;
+ int pitch, slice;
+
+ h = ALIGN(h, 8);
+ if (h < 8)
+ h = 8;
+
+ cb_color_info = CB_FORMAT(format) |
+ CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
+ CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ pitch = (w / 8) - 1;
+ slice = ((w * h) / 64) - 1;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
+ radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, pitch);
+ radeon_ring_write(ring, slice);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, cb_color_info);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+}
+
+/* emits 5dw */
+static void
+cp_set_surface_sync(struct radeon_device *rdev,
+ u32 sync_type, u32 size,
+ u64 mc_addr)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ u32 cp_coher_size;
+
+ if (size == 0xffffffff)
+ cp_coher_size = 0xffffffff;
+ else
+ cp_coher_size = ((size + 255) >> 8);
+
+ if (rdev->family >= CHIP_CAYMAN) {
+ /* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync
+ * to the RB directly. For IBs, the CP programs this as part of the
+ * surface_sync packet.
+ */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
+ }
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, sync_type);
+ radeon_ring_write(ring, cp_coher_size);
+ radeon_ring_write(ring, mc_addr >> 8);
+ radeon_ring_write(ring, 10); /* poll interval */
+}
+
+/* emits 11dw + 1 surface sync = 16dw */
+static void
+set_shaders(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ u64 gpu_addr;
+
+ /* VS */
+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
+ radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, 2);
+ radeon_ring_write(ring, 0);
+
+ /* PS */
+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
+ radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, 1);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 2);
+
+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+ cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
+}
+
+/* emits 10 + 1 sync (5) = 15 */
+static void
+set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
+
+ /* high addr, stride */
+ sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
+ SQ_VTXC_STRIDE(16);
+#ifdef __BIG_ENDIAN
+ sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
+#endif
+ /* xyzw swizzles */
+ sq_vtx_constant_word3 = SQ_VTCX_SEL_X(SQ_SEL_X) |
+ SQ_VTCX_SEL_Y(SQ_SEL_Y) |
+ SQ_VTCX_SEL_Z(SQ_SEL_Z) |
+ SQ_VTCX_SEL_W(SQ_SEL_W);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
+ radeon_ring_write(ring, 0x580);
+ radeon_ring_write(ring, gpu_addr & 0xffffffff);
+ radeon_ring_write(ring, 48 - 1); /* size */
+ radeon_ring_write(ring, sq_vtx_constant_word2);
+ radeon_ring_write(ring, sq_vtx_constant_word3);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
+
+ if ((rdev->family == CHIP_CEDAR) ||
+ (rdev->family == CHIP_PALM) ||
+ (rdev->family == CHIP_SUMO) ||
+ (rdev->family == CHIP_SUMO2) ||
+ (rdev->family == CHIP_CAICOS))
+ cp_set_surface_sync(rdev,
+ PACKET3_TC_ACTION_ENA, 48, gpu_addr);
+ else
+ cp_set_surface_sync(rdev,
+ PACKET3_VC_ACTION_ENA, 48, gpu_addr);
+
+}
+
+/* emits 10 */
+static void
+set_tex_resource(struct radeon_device *rdev,
+ int format, int w, int h, int pitch,
+ u64 gpu_addr, u32 size)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ u32 sq_tex_resource_word0, sq_tex_resource_word1;
+ u32 sq_tex_resource_word4, sq_tex_resource_word7;
+
+ if (h < 1)
+ h = 1;
+
+ sq_tex_resource_word0 = TEX_DIM(SQ_TEX_DIM_2D);
+ sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
+ ((w - 1) << 18));
+ sq_tex_resource_word1 = ((h - 1) << 0) |
+ TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ /* xyzw swizzles */
+ sq_tex_resource_word4 = TEX_DST_SEL_X(SQ_SEL_X) |
+ TEX_DST_SEL_Y(SQ_SEL_Y) |
+ TEX_DST_SEL_Z(SQ_SEL_Z) |
+ TEX_DST_SEL_W(SQ_SEL_W);
+
+ sq_tex_resource_word7 = format |
+ S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE);
+
+ cp_set_surface_sync(rdev,
+ PACKET3_TC_ACTION_ENA, size, gpu_addr);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, sq_tex_resource_word0);
+ radeon_ring_write(ring, sq_tex_resource_word1);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, sq_tex_resource_word4);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, sq_tex_resource_word7);
+}
+
+/* emits 12 */
+static void
+set_scissors(struct radeon_device *rdev, int x1, int y1,
+ int x2, int y2)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ /* workaround some hw bugs */
+ if (x2 == 0)
+ x1 = 1;
+ if (y2 == 0)
+ y1 = 1;
+ if (rdev->family >= CHIP_CAYMAN) {
+ if ((x2 == 1) && (y2 == 1))
+ x2 = 2;
+ }
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+}
+
+/* emits 10 */
+static void
+draw_auto(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, DI_PT_RECTLIST);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
+ radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 2) |
+#endif
+ DI_INDEX_SIZE_16_BIT);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
+ radeon_ring_write(ring, 1);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+ radeon_ring_write(ring, 3);
+ radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
+
+}
+
+/* emits 39 */
+static void
+set_default_state(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
+ u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
+ u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
+ int num_ps_gprs, num_vs_gprs, num_temp_gprs;
+ int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs;
+ int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
+ int num_hs_threads, num_ls_threads;
+ int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
+ int num_hs_stack_entries, num_ls_stack_entries;
+ u64 gpu_addr;
+ int dwords;
+
+ /* set clear context state */
+ radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(ring, 0);
+
+ if (rdev->family < CHIP_CAYMAN) {
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ default:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 96;
+ num_vs_threads = 16;
+ num_gs_threads = 16;
+ num_es_threads = 16;
+ num_hs_threads = 16;
+ num_ls_threads = 16;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_REDWOOD:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_JUNIPER:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 85;
+ num_vs_stack_entries = 85;
+ num_gs_stack_entries = 85;
+ num_es_stack_entries = 85;
+ num_hs_stack_entries = 85;
+ num_ls_stack_entries = 85;
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 85;
+ num_vs_stack_entries = 85;
+ num_gs_stack_entries = 85;
+ num_es_stack_entries = 85;
+ num_hs_stack_entries = 85;
+ num_ls_stack_entries = 85;
+ break;
+ case CHIP_PALM:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 96;
+ num_vs_threads = 16;
+ num_gs_threads = 16;
+ num_es_threads = 16;
+ num_hs_threads = 16;
+ num_ls_threads = 16;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_SUMO:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 96;
+ num_vs_threads = 25;
+ num_gs_threads = 25;
+ num_es_threads = 25;
+ num_hs_threads = 25;
+ num_ls_threads = 25;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_SUMO2:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 96;
+ num_vs_threads = 25;
+ num_gs_threads = 25;
+ num_es_threads = 25;
+ num_hs_threads = 25;
+ num_ls_threads = 25;
+ num_ps_stack_entries = 85;
+ num_vs_stack_entries = 85;
+ num_gs_stack_entries = 85;
+ num_es_stack_entries = 85;
+ num_hs_stack_entries = 85;
+ num_ls_stack_entries = 85;
+ break;
+ case CHIP_BARTS:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 85;
+ num_vs_stack_entries = 85;
+ num_gs_stack_entries = 85;
+ num_es_stack_entries = 85;
+ num_hs_stack_entries = 85;
+ num_ls_stack_entries = 85;
+ break;
+ case CHIP_TURKS:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_CAICOS:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 10;
+ num_gs_threads = 10;
+ num_es_threads = 10;
+ num_hs_threads = 10;
+ num_ls_threads = 10;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ }
+
+ if ((rdev->family == CHIP_CEDAR) ||
+ (rdev->family == CHIP_PALM) ||
+ (rdev->family == CHIP_SUMO) ||
+ (rdev->family == CHIP_SUMO2) ||
+ (rdev->family == CHIP_CAICOS))
+ sq_config = 0;
+ else
+ sq_config = VC_ENABLE;
+
+ sq_config |= (EXPORT_SRC_C |
+ CS_PRIO(0) |
+ LS_PRIO(0) |
+ HS_PRIO(0) |
+ PS_PRIO(0) |
+ VS_PRIO(1) |
+ GS_PRIO(2) |
+ ES_PRIO(3));
+
+ sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
+ NUM_VS_GPRS(num_vs_gprs) |
+ NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
+ sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
+ NUM_ES_GPRS(num_es_gprs));
+ sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
+ NUM_LS_GPRS(num_ls_gprs));
+ sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
+ NUM_VS_THREADS(num_vs_threads) |
+ NUM_GS_THREADS(num_gs_threads) |
+ NUM_ES_THREADS(num_es_threads));
+ sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
+ NUM_LS_THREADS(num_ls_threads));
+ sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
+ NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
+ sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
+ NUM_ES_STACK_ENTRIES(num_es_stack_entries));
+ sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
+ NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
+
+ /* disable dyn gprs */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, 0);
+
+ /* setup LDS */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, 0x10001000);
+
+ /* SQ config */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
+ radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, sq_config);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, sq_thread_resource_mgmt);
+ radeon_ring_write(ring, sq_thread_resource_mgmt_2);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_1);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_2);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_3);
+ }
+
+ /* CONTEXT_CONTROL */
+ radeon_ring_write(ring, 0xc0012800);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
+
+ /* SQ_VTX_BASE_VTX_LOC */
+ radeon_ring_write(ring, 0xc0026f00);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
+
+ /* SET_SAMPLER */
+ radeon_ring_write(ring, 0xc0036e00);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000012);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
+
+ /* set to DX10/11 mode */
+ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+ radeon_ring_write(ring, 1);
+
+ /* emit an IB pointing at default state */
+ dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
+ radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
+ radeon_ring_write(ring, dwords);
+
+}
+
+int evergreen_blit_init(struct radeon_device *rdev)
+{
+ u32 obj_size;
+ int i, r, dwords;
+ void *ptr;
+ u32 packet2s[16];
+ int num_packet2s = 0;
+
+ rdev->r600_blit.primitives.set_render_target = set_render_target;
+ rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
+ rdev->r600_blit.primitives.set_shaders = set_shaders;
+ rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
+ rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
+ rdev->r600_blit.primitives.set_scissors = set_scissors;
+ rdev->r600_blit.primitives.draw_auto = draw_auto;
+ rdev->r600_blit.primitives.set_default_state = set_default_state;
+
+ rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
+ rdev->r600_blit.ring_size_common += 55; /* shaders + def state */
+ rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
+ rdev->r600_blit.ring_size_common += 5; /* done copy */
+ rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
+
+ rdev->r600_blit.ring_size_per_loop = 74;
+ if (rdev->family >= CHIP_CAYMAN)
+ rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */
+
+ rdev->r600_blit.max_dim = 16384;
+
+ rdev->r600_blit.state_offset = 0;
+
+ if (rdev->family < CHIP_CAYMAN)
+ rdev->r600_blit.state_len = evergreen_default_size;
+ else
+ rdev->r600_blit.state_len = cayman_default_size;
+
+ dwords = rdev->r600_blit.state_len;
+ while (dwords & 0xf) {
+ packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
+ dwords++;
+ }
+
+ obj_size = dwords * 4;
+ obj_size = ALIGN(obj_size, 256);
+
+ rdev->r600_blit.vs_offset = obj_size;
+ if (rdev->family < CHIP_CAYMAN)
+ obj_size += evergreen_vs_size * 4;
+ else
+ obj_size += cayman_vs_size * 4;
+ obj_size = ALIGN(obj_size, 256);
+
+ rdev->r600_blit.ps_offset = obj_size;
+ if (rdev->family < CHIP_CAYMAN)
+ obj_size += evergreen_ps_size * 4;
+ else
+ obj_size += cayman_ps_size * 4;
+ obj_size = ALIGN(obj_size, 256);
+
+ /* pin copy shader into vram if not already initialized */
+ if (!rdev->r600_blit.shader_obj) {
+ r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM,
+ NULL, &rdev->r600_blit.shader_obj);
+ if (r) {
+ DRM_ERROR("evergreen failed to allocate shader\n");
+ return r;
+ }
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+ return r;
+ }
+ }
+
+ DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n",
+ obj_size,
+ rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
+ if (r) {
+ DRM_ERROR("failed to map blit object %d\n", r);
+ return r;
+ }
+
+ if (rdev->family < CHIP_CAYMAN) {
+ memcpy_toio(ptr + rdev->r600_blit.state_offset,
+ evergreen_default_state, rdev->r600_blit.state_len * 4);
+
+ if (num_packet2s)
+ memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+ packet2s, num_packet2s * 4);
+ for (i = 0; i < evergreen_vs_size; i++)
+ *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
+ for (i = 0; i < evergreen_ps_size; i++)
+ *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
+ } else {
+ memcpy_toio(ptr + rdev->r600_blit.state_offset,
+ cayman_default_state, rdev->r600_blit.state_len * 4);
+
+ if (num_packet2s)
+ memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+ packet2s, num_packet2s * 4);
+ for (i = 0; i < cayman_vs_size; i++)
+ *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]);
+ for (i = 0; i < cayman_ps_size; i++)
+ *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]);
+ }
+ radeon_bo_kunmap(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
new file mode 100644
index 0000000..f85c0af
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -0,0 +1,357 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+/*
+ * evergreen cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup. Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables. The regsiter state and shaders
+ * were hand generated to support blitting functionality. See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 evergreen_default_state[] =
+{
+ 0xc0016900,
+ 0x0000023b,
+ 0x00000000, /* SQ_LDS_ALLOC_PS */
+
+ 0xc0066900,
+ 0x00000240,
+ 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0046900,
+ 0x00000247,
+ 0x00000000, /* SQ_GS_VERT_ITEMSIZE */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0026900,
+ 0x00000010,
+ 0x00000000, /* DB_Z_INFO */
+ 0x00000000, /* DB_STENCIL_INFO */
+
+ 0xc0016900,
+ 0x00000200,
+ 0x00000000, /* DB_DEPTH_CONTROL */
+
+ 0xc0066900,
+ 0x00000000,
+ 0x00000060, /* DB_RENDER_CONTROL */
+ 0x00000000, /* DB_COUNT_CONTROL */
+ 0x00000000, /* DB_DEPTH_VIEW */
+ 0x0000002a, /* DB_RENDER_OVERRIDE */
+ 0x00000000, /* DB_RENDER_OVERRIDE2 */
+ 0x00000000, /* DB_HTILE_DATA_BASE */
+
+ 0xc0026900,
+ 0x0000000a,
+ 0x00000000, /* DB_STENCIL_CLEAR */
+ 0x00000000, /* DB_DEPTH_CLEAR */
+
+ 0xc0016900,
+ 0x000002dc,
+ 0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+ 0xc0016900,
+ 0x00000080,
+ 0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+ 0xc00d6900,
+ 0x00000083,
+ 0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+ 0x00000000, /* PA_SC_CLIPRECT_0_TL */
+ 0x20002000, /* PA_SC_CLIPRECT_0_BR */
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0xaaaaaaaa, /* PA_SC_EDGERULE */
+ 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+ 0x0000000f, /* CB_TARGET_MASK */
+ 0x0000000f, /* CB_SHADER_MASK */
+
+ 0xc0226900,
+ 0x00000094,
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+ 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+ 0xc0016900,
+ 0x000000d4,
+ 0x00000000, /* SX_MISC */
+
+ 0xc0026900,
+ 0x00000292,
+ 0x00000000, /* PA_SC_MODE_CNTL_0 */
+ 0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+ 0xc0106900,
+ 0x00000300,
+ 0x00000000, /* PA_SC_LINE_CNTL */
+ 0x00000000, /* PA_SC_AA_CONFIG */
+ 0x00000005, /* PA_SU_VTX_CNTL */
+ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_0 */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_7 */
+ 0xffffffff, /* PA_SC_AA_MASK */
+
+ 0xc00d6900,
+ 0x00000202,
+ 0x00cc0010, /* CB_COLOR_CONTROL */
+ 0x00000210, /* DB_SHADER_CONTROL */
+ 0x00010000, /* PA_CL_CLIP_CNTL */
+ 0x00000004, /* PA_SU_SC_MODE_CNTL */
+ 0x00000100, /* PA_CL_VTE_CNTL */
+ 0x00000000, /* PA_CL_VS_OUT_CNTL */
+ 0x00000000, /* PA_CL_NANINF_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+ 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* SQ_DYN_GPR_RESOURCE_LIMIT_1 */
+
+ 0xc0066900,
+ 0x000002de,
+ 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+
+ 0xc0016900,
+ 0x00000229,
+ 0x00000000, /* SQ_PGM_START_FS */
+
+ 0xc0016900,
+ 0x0000022a,
+ 0x00000000, /* SQ_PGM_RESOURCES_FS */
+
+ 0xc0096900,
+ 0x00000100,
+ 0x00ffffff, /* VGT_MAX_VTX_INDX */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* SX_ALPHA_TEST_CONTROL */
+ 0x00000000, /* CB_BLEND_RED */
+ 0x00000000, /* CB_BLEND_GREEN */
+ 0x00000000, /* CB_BLEND_BLUE */
+ 0x00000000, /* CB_BLEND_ALPHA */
+
+ 0xc0026900,
+ 0x000002a8,
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+ 0x00000000, /* */
+
+ 0xc0026900,
+ 0x000002ad,
+ 0x00000000, /* VGT_REUSE_OFF */
+ 0x00000000, /* */
+
+ 0xc0116900,
+ 0x00000280,
+ 0x00000000, /* PA_SU_POINT_SIZE */
+ 0x00000000, /* PA_SU_POINT_MINMAX */
+ 0x00000008, /* PA_SU_LINE_CNTL */
+ 0x00000000, /* PA_SC_LINE_STIPPLE */
+ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+ 0x00000000, /* VGT_HOS_CNTL */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* VGT_GS_MODE */
+
+ 0xc0016900,
+ 0x000002a1,
+ 0x00000000, /* VGT_PRIMITIVEID_EN */
+
+ 0xc0016900,
+ 0x000002a5,
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+ 0xc0016900,
+ 0x000002d5,
+ 0x00000000, /* VGT_SHADER_STAGES_EN */
+
+ 0xc0026900,
+ 0x000002e5,
+ 0x00000000, /* VGT_STRMOUT_CONFIG */
+ 0x00000000, /* */
+
+ 0xc0016900,
+ 0x000001e0,
+ 0x00000000, /* CB_BLEND0_CONTROL */
+
+ 0xc0016900,
+ 0x000001b1,
+ 0x00000000, /* SPI_VS_OUT_CONFIG */
+
+ 0xc0016900,
+ 0x00000187,
+ 0x00000000, /* SPI_VS_OUT_ID_0 */
+
+ 0xc0016900,
+ 0x00000191,
+ 0x00000100, /* SPI_PS_INPUT_CNTL_0 */
+
+ 0xc00b6900,
+ 0x000001b3,
+ 0x20000001, /* SPI_PS_IN_CONTROL_0 */
+ 0x00000000, /* SPI_PS_IN_CONTROL_1 */
+ 0x00000000, /* SPI_INTERP_CONTROL_0 */
+ 0x00000000, /* SPI_INPUT_Z */
+ 0x00000000, /* SPI_FOG_CNTL */
+ 0x00100000, /* SPI_BARYC_CNTL */
+ 0x00000000, /* SPI_PS_IN_CONTROL_2 */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+
+ 0xc0026900,
+ 0x00000316,
+ 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ 0x00000010, /* */
+};
+
+const u32 evergreen_vs[] =
+{
+ 0x00000004,
+ 0x80800400,
+ 0x0000a03c,
+ 0x95000688,
+ 0x00004000,
+ 0x15200688,
+ 0x00000000,
+ 0x00000000,
+ 0x3c000000,
+ 0x67961001,
+#ifdef __BIG_ENDIAN
+ 0x000a0000,
+#else
+ 0x00080000,
+#endif
+ 0x00000000,
+ 0x1c000000,
+ 0x67961000,
+#ifdef __BIG_ENDIAN
+ 0x00020008,
+#else
+ 0x00000008,
+#endif
+ 0x00000000,
+};
+
+const u32 evergreen_ps[] =
+{
+ 0x00000003,
+ 0xa00c0000,
+ 0x00000008,
+ 0x80400000,
+ 0x00000000,
+ 0x95200688,
+ 0x00380400,
+ 0x00146b10,
+ 0x00380000,
+ 0x20146b10,
+ 0x00380400,
+ 0x40146b00,
+ 0x80380000,
+ 0x60146b00,
+ 0x00000000,
+ 0x00000000,
+ 0x00000010,
+ 0x000d1000,
+ 0xb0800000,
+ 0x00000000,
+};
+
+const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps);
+const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs);
+const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.h b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h
new file mode 100644
index 0000000..bb8d6c7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef EVERGREEN_BLIT_SHADERS_H
+#define EVERGREEN_BLIT_SHADERS_H
+
+extern const u32 evergreen_ps[];
+extern const u32 evergreen_vs[];
+extern const u32 evergreen_default_state[];
+
+extern const u32 evergreen_ps_size, evergreen_vs_size;
+extern const u32 evergreen_default_size;
+
+#endif
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
new file mode 100644
index 0000000..eb8ac31
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -0,0 +1,3514 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "evergreend.h"
+#include "evergreen_reg_safe.h"
+#include "cayman_reg_safe.h"
+
+#define MAX(a,b) (((a)>(b))?(a):(b))
+#define MIN(a,b) (((a)<(b))?(a):(b))
+
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc);
+struct evergreen_cs_track {
+ u32 group_size;
+ u32 nbanks;
+ u32 npipes;
+ u32 row_size;
+ /* value we track */
+ u32 nsamples; /* unused */
+ struct radeon_bo *cb_color_bo[12];
+ u32 cb_color_bo_offset[12];
+ struct radeon_bo *cb_color_fmask_bo[8]; /* unused */
+ struct radeon_bo *cb_color_cmask_bo[8]; /* unused */
+ u32 cb_color_info[12];
+ u32 cb_color_view[12];
+ u32 cb_color_pitch[12];
+ u32 cb_color_slice[12];
+ u32 cb_color_slice_idx[12];
+ u32 cb_color_attrib[12];
+ u32 cb_color_cmask_slice[8];/* unused */
+ u32 cb_color_fmask_slice[8];/* unused */
+ u32 cb_target_mask;
+ u32 cb_shader_mask; /* unused */
+ u32 vgt_strmout_config;
+ u32 vgt_strmout_buffer_config;
+ struct radeon_bo *vgt_strmout_bo[4];
+ u32 vgt_strmout_bo_offset[4];
+ u32 vgt_strmout_size[4];
+ u32 db_depth_control;
+ u32 db_depth_view;
+ u32 db_depth_slice;
+ u32 db_depth_size;
+ u32 db_z_info;
+ u32 db_z_read_offset;
+ u32 db_z_write_offset;
+ struct radeon_bo *db_z_read_bo;
+ struct radeon_bo *db_z_write_bo;
+ u32 db_s_info;
+ u32 db_s_read_offset;
+ u32 db_s_write_offset;
+ struct radeon_bo *db_s_read_bo;
+ struct radeon_bo *db_s_write_bo;
+ bool sx_misc_kill_all_prims;
+ bool cb_dirty;
+ bool db_dirty;
+ bool streamout_dirty;
+ u32 htile_offset;
+ u32 htile_surface;
+ struct radeon_bo *htile_bo;
+};
+
+static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
+{
+ if (tiling_flags & RADEON_TILING_MACRO)
+ return ARRAY_2D_TILED_THIN1;
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ return ARRAY_1D_TILED_THIN1;
+ else
+ return ARRAY_LINEAR_GENERAL;
+}
+
+static u32 evergreen_cs_get_num_banks(u32 nbanks)
+{
+ switch (nbanks) {
+ case 2:
+ return ADDR_SURF_2_BANK;
+ case 4:
+ return ADDR_SURF_4_BANK;
+ case 8:
+ default:
+ return ADDR_SURF_8_BANK;
+ case 16:
+ return ADDR_SURF_16_BANK;
+ }
+}
+
+static void evergreen_cs_track_init(struct evergreen_cs_track *track)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ track->cb_color_fmask_bo[i] = NULL;
+ track->cb_color_cmask_bo[i] = NULL;
+ track->cb_color_cmask_slice[i] = 0;
+ track->cb_color_fmask_slice[i] = 0;
+ }
+
+ for (i = 0; i < 12; i++) {
+ track->cb_color_bo[i] = NULL;
+ track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+ track->cb_color_info[i] = 0;
+ track->cb_color_view[i] = 0xFFFFFFFF;
+ track->cb_color_pitch[i] = 0;
+ track->cb_color_slice[i] = 0xfffffff;
+ track->cb_color_slice_idx[i] = 0;
+ }
+ track->cb_target_mask = 0xFFFFFFFF;
+ track->cb_shader_mask = 0xFFFFFFFF;
+ track->cb_dirty = true;
+
+ track->db_depth_slice = 0xffffffff;
+ track->db_depth_view = 0xFFFFC000;
+ track->db_depth_size = 0xFFFFFFFF;
+ track->db_depth_control = 0xFFFFFFFF;
+ track->db_z_info = 0xFFFFFFFF;
+ track->db_z_read_offset = 0xFFFFFFFF;
+ track->db_z_write_offset = 0xFFFFFFFF;
+ track->db_z_read_bo = NULL;
+ track->db_z_write_bo = NULL;
+ track->db_s_info = 0xFFFFFFFF;
+ track->db_s_read_offset = 0xFFFFFFFF;
+ track->db_s_write_offset = 0xFFFFFFFF;
+ track->db_s_read_bo = NULL;
+ track->db_s_write_bo = NULL;
+ track->db_dirty = true;
+ track->htile_bo = NULL;
+ track->htile_offset = 0xFFFFFFFF;
+ track->htile_surface = 0;
+
+ for (i = 0; i < 4; i++) {
+ track->vgt_strmout_size[i] = 0;
+ track->vgt_strmout_bo[i] = NULL;
+ track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
+ }
+ track->streamout_dirty = true;
+ track->sx_misc_kill_all_prims = false;
+}
+
+struct eg_surface {
+ /* value gathered from cs */
+ unsigned nbx;
+ unsigned nby;
+ unsigned format;
+ unsigned mode;
+ unsigned nbanks;
+ unsigned bankw;
+ unsigned bankh;
+ unsigned tsplit;
+ unsigned mtilea;
+ unsigned nsamples;
+ /* output value */
+ unsigned bpe;
+ unsigned layer_size;
+ unsigned palign;
+ unsigned halign;
+ unsigned long base_align;
+};
+
+static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
+ struct eg_surface *surf,
+ const char *prefix)
+{
+ surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
+ surf->base_align = surf->bpe;
+ surf->palign = 1;
+ surf->halign = 1;
+ return 0;
+}
+
+static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
+ struct eg_surface *surf,
+ const char *prefix)
+{
+ struct evergreen_cs_track *track = p->track;
+ unsigned palign;
+
+ palign = MAX(64, track->group_size / surf->bpe);
+ surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
+ surf->base_align = track->group_size;
+ surf->palign = palign;
+ surf->halign = 1;
+ if (surf->nbx & (palign - 1)) {
+ if (prefix) {
+ dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
+ __func__, __LINE__, prefix, surf->nbx, palign);
+ }
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
+ struct eg_surface *surf,
+ const char *prefix)
+{
+ struct evergreen_cs_track *track = p->track;
+ unsigned palign;
+
+ palign = track->group_size / (8 * surf->bpe * surf->nsamples);
+ palign = MAX(8, palign);
+ surf->layer_size = surf->nbx * surf->nby * surf->bpe;
+ surf->base_align = track->group_size;
+ surf->palign = palign;
+ surf->halign = 8;
+ if ((surf->nbx & (palign - 1))) {
+ if (prefix) {
+ dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
+ __func__, __LINE__, prefix, surf->nbx, palign,
+ track->group_size, surf->bpe, surf->nsamples);
+ }
+ return -EINVAL;
+ }
+ if ((surf->nby & (8 - 1))) {
+ if (prefix) {
+ dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
+ __func__, __LINE__, prefix, surf->nby);
+ }
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
+ struct eg_surface *surf,
+ const char *prefix)
+{
+ struct evergreen_cs_track *track = p->track;
+ unsigned palign, halign, tileb, slice_pt;
+ unsigned mtile_pr, mtile_ps, mtileb;
+
+ tileb = 64 * surf->bpe * surf->nsamples;
+ slice_pt = 1;
+ if (tileb > surf->tsplit) {
+ slice_pt = tileb / surf->tsplit;
+ }
+ tileb = tileb / slice_pt;
+ /* macro tile width & height */
+ palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
+ halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
+ mtileb = (palign / 8) * (halign / 8) * tileb;
+ mtile_pr = surf->nbx / palign;
+ mtile_ps = (mtile_pr * surf->nby) / halign;
+ surf->layer_size = mtile_ps * mtileb * slice_pt;
+ surf->base_align = (palign / 8) * (halign / 8) * tileb;
+ surf->palign = palign;
+ surf->halign = halign;
+
+ if ((surf->nbx & (palign - 1))) {
+ if (prefix) {
+ dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
+ __func__, __LINE__, prefix, surf->nbx, palign);
+ }
+ return -EINVAL;
+ }
+ if ((surf->nby & (halign - 1))) {
+ if (prefix) {
+ dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
+ __func__, __LINE__, prefix, surf->nby, halign);
+ }
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int evergreen_surface_check(struct radeon_cs_parser *p,
+ struct eg_surface *surf,
+ const char *prefix)
+{
+ /* some common value computed here */
+ surf->bpe = r600_fmt_get_blocksize(surf->format);
+
+ switch (surf->mode) {
+ case ARRAY_LINEAR_GENERAL:
+ return evergreen_surface_check_linear(p, surf, prefix);
+ case ARRAY_LINEAR_ALIGNED:
+ return evergreen_surface_check_linear_aligned(p, surf, prefix);
+ case ARRAY_1D_TILED_THIN1:
+ return evergreen_surface_check_1d(p, surf, prefix);
+ case ARRAY_2D_TILED_THIN1:
+ return evergreen_surface_check_2d(p, surf, prefix);
+ default:
+ dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
+ __func__, __LINE__, prefix, surf->mode);
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
+ struct eg_surface *surf,
+ const char *prefix)
+{
+ switch (surf->mode) {
+ case ARRAY_2D_TILED_THIN1:
+ break;
+ case ARRAY_LINEAR_GENERAL:
+ case ARRAY_LINEAR_ALIGNED:
+ case ARRAY_1D_TILED_THIN1:
+ return 0;
+ default:
+ dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
+ __func__, __LINE__, prefix, surf->mode);
+ return -EINVAL;
+ }
+
+ switch (surf->nbanks) {
+ case 0: surf->nbanks = 2; break;
+ case 1: surf->nbanks = 4; break;
+ case 2: surf->nbanks = 8; break;
+ case 3: surf->nbanks = 16; break;
+ default:
+ dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
+ __func__, __LINE__, prefix, surf->nbanks);
+ return -EINVAL;
+ }
+ switch (surf->bankw) {
+ case 0: surf->bankw = 1; break;
+ case 1: surf->bankw = 2; break;
+ case 2: surf->bankw = 4; break;
+ case 3: surf->bankw = 8; break;
+ default:
+ dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
+ __func__, __LINE__, prefix, surf->bankw);
+ return -EINVAL;
+ }
+ switch (surf->bankh) {
+ case 0: surf->bankh = 1; break;
+ case 1: surf->bankh = 2; break;
+ case 2: surf->bankh = 4; break;
+ case 3: surf->bankh = 8; break;
+ default:
+ dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
+ __func__, __LINE__, prefix, surf->bankh);
+ return -EINVAL;
+ }
+ switch (surf->mtilea) {
+ case 0: surf->mtilea = 1; break;
+ case 1: surf->mtilea = 2; break;
+ case 2: surf->mtilea = 4; break;
+ case 3: surf->mtilea = 8; break;
+ default:
+ dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
+ __func__, __LINE__, prefix, surf->mtilea);
+ return -EINVAL;
+ }
+ switch (surf->tsplit) {
+ case 0: surf->tsplit = 64; break;
+ case 1: surf->tsplit = 128; break;
+ case 2: surf->tsplit = 256; break;
+ case 3: surf->tsplit = 512; break;
+ case 4: surf->tsplit = 1024; break;
+ case 5: surf->tsplit = 2048; break;
+ case 6: surf->tsplit = 4096; break;
+ default:
+ dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
+ __func__, __LINE__, prefix, surf->tsplit);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
+{
+ struct evergreen_cs_track *track = p->track;
+ struct eg_surface surf;
+ unsigned pitch, slice, mslice;
+ unsigned long offset;
+ int r;
+
+ mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
+ pitch = track->cb_color_pitch[id];
+ slice = track->cb_color_slice[id];
+ surf.nbx = (pitch + 1) * 8;
+ surf.nby = ((slice + 1) * 64) / surf.nbx;
+ surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
+ surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
+ surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
+ surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
+ surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
+ surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
+ surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
+ surf.nsamples = 1;
+
+ if (!r600_fmt_is_valid_color(surf.format)) {
+ dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
+ __func__, __LINE__, surf.format,
+ id, track->cb_color_info[id]);
+ return -EINVAL;
+ }
+
+ r = evergreen_surface_value_conv_check(p, &surf, "cb");
+ if (r) {
+ return r;
+ }
+
+ r = evergreen_surface_check(p, &surf, "cb");
+ if (r) {
+ dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
+ __func__, __LINE__, id, track->cb_color_pitch[id],
+ track->cb_color_slice[id], track->cb_color_attrib[id],
+ track->cb_color_info[id]);
+ return r;
+ }
+
+ offset = track->cb_color_bo_offset[id] << 8;
+ if (offset & (surf.base_align - 1)) {
+ dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
+ __func__, __LINE__, id, offset, surf.base_align);
+ return -EINVAL;
+ }
+
+ offset += surf.layer_size * mslice;
+ if (offset > radeon_bo_size(track->cb_color_bo[id])) {
+ /* old ddx are broken they allocate bo with w*h*bpp but
+ * program slice with ALIGN(h, 8), catch this and patch
+ * command stream.
+ */
+ if (!surf.mode) {
+ volatile u32 *ib = p->ib.ptr;
+ unsigned long tmp, nby, bsize, size, min = 0;
+
+ /* find the height the ddx wants */
+ if (surf.nby > 8) {
+ min = surf.nby - 8;
+ }
+ bsize = radeon_bo_size(track->cb_color_bo[id]);
+ tmp = track->cb_color_bo_offset[id] << 8;
+ for (nby = surf.nby; nby > min; nby--) {
+ size = nby * surf.nbx * surf.bpe * surf.nsamples;
+ if ((tmp + size * mslice) <= bsize) {
+ break;
+ }
+ }
+ if (nby > min) {
+ surf.nby = nby;
+ slice = ((nby * surf.nbx) / 64) - 1;
+ if (!evergreen_surface_check(p, &surf, "cb")) {
+ /* check if this one works */
+ tmp += surf.layer_size * mslice;
+ if (tmp <= bsize) {
+ ib[track->cb_color_slice_idx[id]] = slice;
+ goto old_ddx_ok;
+ }
+ }
+ }
+ }
+ dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
+ "offset %d, max layer %d, bo size %ld, slice %d)\n",
+ __func__, __LINE__, id, surf.layer_size,
+ track->cb_color_bo_offset[id] << 8, mslice,
+ radeon_bo_size(track->cb_color_bo[id]), slice);
+ dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
+ __func__, __LINE__, surf.nbx, surf.nby,
+ surf.mode, surf.bpe, surf.nsamples,
+ surf.bankw, surf.bankh,
+ surf.tsplit, surf.mtilea);
+ return -EINVAL;
+ }
+old_ddx_ok:
+
+ return 0;
+}
+
+static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
+ unsigned nbx, unsigned nby)
+{
+ struct evergreen_cs_track *track = p->track;
+ unsigned long size;
+
+ if (track->htile_bo == NULL) {
+ dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
+ __func__, __LINE__, track->db_z_info);
+ return -EINVAL;
+ }
+
+ if (G_028ABC_LINEAR(track->htile_surface)) {
+ /* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */
+ nbx = round_up(nbx, 16 * 8);
+ /* height is npipes htiles aligned == npipes * 8 pixel aligned */
+ nby = round_up(nby, track->npipes * 8);
+ } else {
+ /* always assume 8x8 htile */
+ /* align is htile align * 8, htile align vary according to
+ * number of pipe and tile width and nby
+ */
+ switch (track->npipes) {
+ case 8:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 64 * 8);
+ break;
+ case 4:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ case 2:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ case 1:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 16 * 8);
+ break;
+ default:
+ dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
+ __func__, __LINE__, track->npipes);
+ return -EINVAL;
+ }
+ }
+ /* compute number of htile */
+ nbx = nbx >> 3;
+ nby = nby >> 3;
+ /* size must be aligned on npipes * 2K boundary */
+ size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
+ size += track->htile_offset;
+
+ if (size > radeon_bo_size(track->htile_bo)) {
+ dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
+ __func__, __LINE__, radeon_bo_size(track->htile_bo),
+ size, nbx, nby);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
+{
+ struct evergreen_cs_track *track = p->track;
+ struct eg_surface surf;
+ unsigned pitch, slice, mslice;
+ unsigned long offset;
+ int r;
+
+ mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
+ pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
+ slice = track->db_depth_slice;
+ surf.nbx = (pitch + 1) * 8;
+ surf.nby = ((slice + 1) * 64) / surf.nbx;
+ surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
+ surf.format = G_028044_FORMAT(track->db_s_info);
+ surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
+ surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
+ surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
+ surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
+ surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
+ surf.nsamples = 1;
+
+ if (surf.format != 1) {
+ dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
+ __func__, __LINE__, surf.format);
+ return -EINVAL;
+ }
+ /* replace by color format so we can use same code */
+ surf.format = V_028C70_COLOR_8;
+
+ r = evergreen_surface_value_conv_check(p, &surf, "stencil");
+ if (r) {
+ return r;
+ }
+
+ r = evergreen_surface_check(p, &surf, NULL);
+ if (r) {
+ /* old userspace doesn't compute proper depth/stencil alignment
+ * check that alignment against a bigger byte per elements and
+ * only report if that alignment is wrong too.
+ */
+ surf.format = V_028C70_COLOR_8_8_8_8;
+ r = evergreen_surface_check(p, &surf, "stencil");
+ if (r) {
+ dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
+ __func__, __LINE__, track->db_depth_size,
+ track->db_depth_slice, track->db_s_info, track->db_z_info);
+ }
+ return r;
+ }
+
+ offset = track->db_s_read_offset << 8;
+ if (offset & (surf.base_align - 1)) {
+ dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
+ __func__, __LINE__, offset, surf.base_align);
+ return -EINVAL;
+ }
+ offset += surf.layer_size * mslice;
+ if (offset > radeon_bo_size(track->db_s_read_bo)) {
+ dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
+ "offset %ld, max layer %d, bo size %ld)\n",
+ __func__, __LINE__, surf.layer_size,
+ (unsigned long)track->db_s_read_offset << 8, mslice,
+ radeon_bo_size(track->db_s_read_bo));
+ dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
+ __func__, __LINE__, track->db_depth_size,
+ track->db_depth_slice, track->db_s_info, track->db_z_info);
+ return -EINVAL;
+ }
+
+ offset = track->db_s_write_offset << 8;
+ if (offset & (surf.base_align - 1)) {
+ dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
+ __func__, __LINE__, offset, surf.base_align);
+ return -EINVAL;
+ }
+ offset += surf.layer_size * mslice;
+ if (offset > radeon_bo_size(track->db_s_write_bo)) {
+ dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
+ "offset %ld, max layer %d, bo size %ld)\n",
+ __func__, __LINE__, surf.layer_size,
+ (unsigned long)track->db_s_write_offset << 8, mslice,
+ radeon_bo_size(track->db_s_write_bo));
+ return -EINVAL;
+ }
+
+ /* hyperz */
+ if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
+ r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
+ if (r) {
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
+{
+ struct evergreen_cs_track *track = p->track;
+ struct eg_surface surf;
+ unsigned pitch, slice, mslice;
+ unsigned long offset;
+ int r;
+
+ mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
+ pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
+ slice = track->db_depth_slice;
+ surf.nbx = (pitch + 1) * 8;
+ surf.nby = ((slice + 1) * 64) / surf.nbx;
+ surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
+ surf.format = G_028040_FORMAT(track->db_z_info);
+ surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
+ surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
+ surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
+ surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
+ surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
+ surf.nsamples = 1;
+
+ switch (surf.format) {
+ case V_028040_Z_16:
+ surf.format = V_028C70_COLOR_16;
+ break;
+ case V_028040_Z_24:
+ case V_028040_Z_32_FLOAT:
+ surf.format = V_028C70_COLOR_8_8_8_8;
+ break;
+ default:
+ dev_warn(p->dev, "%s:%d depth invalid format %d\n",
+ __func__, __LINE__, surf.format);
+ return -EINVAL;
+ }
+
+ r = evergreen_surface_value_conv_check(p, &surf, "depth");
+ if (r) {
+ dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
+ __func__, __LINE__, track->db_depth_size,
+ track->db_depth_slice, track->db_z_info);
+ return r;
+ }
+
+ r = evergreen_surface_check(p, &surf, "depth");
+ if (r) {
+ dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
+ __func__, __LINE__, track->db_depth_size,
+ track->db_depth_slice, track->db_z_info);
+ return r;
+ }
+
+ offset = track->db_z_read_offset << 8;
+ if (offset & (surf.base_align - 1)) {
+ dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
+ __func__, __LINE__, offset, surf.base_align);
+ return -EINVAL;
+ }
+ offset += surf.layer_size * mslice;
+ if (offset > radeon_bo_size(track->db_z_read_bo)) {
+ dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
+ "offset %ld, max layer %d, bo size %ld)\n",
+ __func__, __LINE__, surf.layer_size,
+ (unsigned long)track->db_z_read_offset << 8, mslice,
+ radeon_bo_size(track->db_z_read_bo));
+ return -EINVAL;
+ }
+
+ offset = track->db_z_write_offset << 8;
+ if (offset & (surf.base_align - 1)) {
+ dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
+ __func__, __LINE__, offset, surf.base_align);
+ return -EINVAL;
+ }
+ offset += surf.layer_size * mslice;
+ if (offset > radeon_bo_size(track->db_z_write_bo)) {
+ dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
+ "offset %ld, max layer %d, bo size %ld)\n",
+ __func__, __LINE__, surf.layer_size,
+ (unsigned long)track->db_z_write_offset << 8, mslice,
+ radeon_bo_size(track->db_z_write_bo));
+ return -EINVAL;
+ }
+
+ /* hyperz */
+ if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
+ r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
+ if (r) {
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
+ struct radeon_bo *texture,
+ struct radeon_bo *mipmap,
+ unsigned idx)
+{
+ struct eg_surface surf;
+ unsigned long toffset, moffset;
+ unsigned dim, llevel, mslice, width, height, depth, i;
+ u32 texdw[8];
+ int r;
+
+ texdw[0] = radeon_get_ib_value(p, idx + 0);
+ texdw[1] = radeon_get_ib_value(p, idx + 1);
+ texdw[2] = radeon_get_ib_value(p, idx + 2);
+ texdw[3] = radeon_get_ib_value(p, idx + 3);
+ texdw[4] = radeon_get_ib_value(p, idx + 4);
+ texdw[5] = radeon_get_ib_value(p, idx + 5);
+ texdw[6] = radeon_get_ib_value(p, idx + 6);
+ texdw[7] = radeon_get_ib_value(p, idx + 7);
+ dim = G_030000_DIM(texdw[0]);
+ llevel = G_030014_LAST_LEVEL(texdw[5]);
+ mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
+ width = G_030000_TEX_WIDTH(texdw[0]) + 1;
+ height = G_030004_TEX_HEIGHT(texdw[1]) + 1;
+ depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
+ surf.format = G_03001C_DATA_FORMAT(texdw[7]);
+ surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
+ surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
+ surf.nby = r600_fmt_get_nblocksy(surf.format, height);
+ surf.mode = G_030004_ARRAY_MODE(texdw[1]);
+ surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
+ surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
+ surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
+ surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
+ surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
+ surf.nsamples = 1;
+ toffset = texdw[2] << 8;
+ moffset = texdw[3] << 8;
+
+ if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
+ dev_warn(p->dev, "%s:%d texture invalid format %d\n",
+ __func__, __LINE__, surf.format);
+ return -EINVAL;
+ }
+ switch (dim) {
+ case V_030000_SQ_TEX_DIM_1D:
+ case V_030000_SQ_TEX_DIM_2D:
+ case V_030000_SQ_TEX_DIM_CUBEMAP:
+ case V_030000_SQ_TEX_DIM_1D_ARRAY:
+ case V_030000_SQ_TEX_DIM_2D_ARRAY:
+ depth = 1;
+ break;
+ case V_030000_SQ_TEX_DIM_2D_MSAA:
+ case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+ surf.nsamples = 1 << llevel;
+ llevel = 0;
+ depth = 1;
+ break;
+ case V_030000_SQ_TEX_DIM_3D:
+ break;
+ default:
+ dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
+ __func__, __LINE__, dim);
+ return -EINVAL;
+ }
+
+ r = evergreen_surface_value_conv_check(p, &surf, "texture");
+ if (r) {
+ return r;
+ }
+
+ /* align height */
+ evergreen_surface_check(p, &surf, NULL);
+ surf.nby = ALIGN(surf.nby, surf.halign);
+
+ r = evergreen_surface_check(p, &surf, "texture");
+ if (r) {
+ dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ __func__, __LINE__, texdw[0], texdw[1], texdw[4],
+ texdw[5], texdw[6], texdw[7]);
+ return r;
+ }
+
+ /* check texture size */
+ if (toffset & (surf.base_align - 1)) {
+ dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
+ __func__, __LINE__, toffset, surf.base_align);
+ return -EINVAL;
+ }
+ if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) {
+ dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
+ __func__, __LINE__, moffset, surf.base_align);
+ return -EINVAL;
+ }
+ if (dim == SQ_TEX_DIM_3D) {
+ toffset += surf.layer_size * depth;
+ } else {
+ toffset += surf.layer_size * mslice;
+ }
+ if (toffset > radeon_bo_size(texture)) {
+ dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
+ "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
+ __func__, __LINE__, surf.layer_size,
+ (unsigned long)texdw[2] << 8, mslice,
+ depth, radeon_bo_size(texture),
+ surf.nbx, surf.nby);
+ return -EINVAL;
+ }
+
+ if (!mipmap) {
+ if (llevel) {
+ dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ } else {
+ return 0; /* everything's ok */
+ }
+ }
+
+ /* check mipmap size */
+ for (i = 1; i <= llevel; i++) {
+ unsigned w, h, d;
+
+ w = r600_mip_minify(width, i);
+ h = r600_mip_minify(height, i);
+ d = r600_mip_minify(depth, i);
+ surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
+ surf.nby = r600_fmt_get_nblocksy(surf.format, h);
+
+ switch (surf.mode) {
+ case ARRAY_2D_TILED_THIN1:
+ if (surf.nbx < surf.palign || surf.nby < surf.halign) {
+ surf.mode = ARRAY_1D_TILED_THIN1;
+ }
+ /* recompute alignment */
+ evergreen_surface_check(p, &surf, NULL);
+ break;
+ case ARRAY_LINEAR_GENERAL:
+ case ARRAY_LINEAR_ALIGNED:
+ case ARRAY_1D_TILED_THIN1:
+ break;
+ default:
+ dev_warn(p->dev, "%s:%d invalid array mode %d\n",
+ __func__, __LINE__, surf.mode);
+ return -EINVAL;
+ }
+ surf.nbx = ALIGN(surf.nbx, surf.palign);
+ surf.nby = ALIGN(surf.nby, surf.halign);
+
+ r = evergreen_surface_check(p, &surf, "mipmap");
+ if (r) {
+ return r;
+ }
+
+ if (dim == SQ_TEX_DIM_3D) {
+ moffset += surf.layer_size * d;
+ } else {
+ moffset += surf.layer_size * mslice;
+ }
+ if (moffset > radeon_bo_size(mipmap)) {
+ dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
+ "offset %ld, coffset %ld, max layer %d, depth %d, "
+ "bo size %ld) level0 (%d %d %d)\n",
+ __func__, __LINE__, i, surf.layer_size,
+ (unsigned long)texdw[3] << 8, moffset, mslice,
+ d, radeon_bo_size(mipmap),
+ width, height, depth);
+ dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
+ __func__, __LINE__, surf.nbx, surf.nby,
+ surf.mode, surf.bpe, surf.nsamples,
+ surf.bankw, surf.bankh,
+ surf.tsplit, surf.mtilea);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int evergreen_cs_track_check(struct radeon_cs_parser *p)
+{
+ struct evergreen_cs_track *track = p->track;
+ unsigned tmp, i;
+ int r;
+ unsigned buffer_mask = 0;
+
+ /* check streamout */
+ if (track->streamout_dirty && track->vgt_strmout_config) {
+ for (i = 0; i < 4; i++) {
+ if (track->vgt_strmout_config & (1 << i)) {
+ buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (buffer_mask & (1 << i)) {
+ if (track->vgt_strmout_bo[i]) {
+ u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
+ (u64)track->vgt_strmout_size[i];
+ if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
+ DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
+ i, offset,
+ radeon_bo_size(track->vgt_strmout_bo[i]));
+ return -EINVAL;
+ }
+ } else {
+ dev_warn(p->dev, "No buffer for streamout %d\n", i);
+ return -EINVAL;
+ }
+ }
+ }
+ track->streamout_dirty = false;
+ }
+
+ if (track->sx_misc_kill_all_prims)
+ return 0;
+
+ /* check that we have a cb for each enabled target
+ */
+ if (track->cb_dirty) {
+ tmp = track->cb_target_mask;
+ for (i = 0; i < 8; i++) {
+ if ((tmp >> (i * 4)) & 0xF) {
+ /* at least one component is enabled */
+ if (track->cb_color_bo[i] == NULL) {
+ dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+ __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+ return -EINVAL;
+ }
+ /* check cb */
+ r = evergreen_cs_track_validate_cb(p, i);
+ if (r) {
+ return r;
+ }
+ }
+ }
+ track->cb_dirty = false;
+ }
+
+ if (track->db_dirty) {
+ /* Check stencil buffer */
+ if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID &&
+ G_028800_STENCIL_ENABLE(track->db_depth_control)) {
+ r = evergreen_cs_track_validate_stencil(p);
+ if (r)
+ return r;
+ }
+ /* Check depth buffer */
+ if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID &&
+ G_028800_Z_ENABLE(track->db_depth_control)) {
+ r = evergreen_cs_track_validate_depth(p);
+ if (r)
+ return r;
+ }
+ track->db_dirty = false;
+ }
+
+ return 0;
+}
+
+/**
+ * evergreen_cs_packet_parse_vline() - parse userspace VLINE packet
+ * @parser: parser structure holding parsing context.
+ *
+ * This is an Evergreen(+)-specific function for parsing VLINE packets.
+ * Real work is done by r600_cs_common_vline_parse function.
+ * Here we just set up ASIC-specific register table and call
+ * the common implementation function.
+ */
+static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
+{
+
+ static uint32_t vline_start_end[6] = {
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET
+ };
+ static uint32_t vline_status[6] = {
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET
+ };
+
+ return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
+}
+
+static int evergreen_packet0_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx, unsigned reg)
+{
+ int r;
+
+ switch (reg) {
+ case EVERGREEN_VLINE_START_END:
+ r = evergreen_cs_packet_parse_vline(p);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ return r;
+ }
+ break;
+ default:
+ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
+ reg, idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ unsigned reg, i;
+ unsigned idx;
+ int r;
+
+ idx = pkt->idx + 1;
+ reg = pkt->reg;
+ for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
+ r = evergreen_packet0_check(p, pkt, idx, reg);
+ if (r) {
+ return r;
+ }
+ }
+ return 0;
+}
+
+/**
+ * evergreen_cs_check_reg() - check if register is authorized or not
+ * @parser: parser structure holding parsing context
+ * @reg: register we are testing
+ * @idx: index into the cs buffer
+ *
+ * This function will test against evergreen_reg_safe_bm and return 0
+ * if register is safe. If register is not flag as safe this function
+ * will test it against a list of register needind special handling.
+ */
+static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+ struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
+ struct radeon_cs_reloc *reloc;
+ u32 last_reg;
+ u32 m, i, tmp, *ib;
+ int r;
+
+ if (p->rdev->family >= CHIP_CAYMAN)
+ last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
+ else
+ last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+
+ i = (reg >> 7);
+ if (i >= last_reg) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ m = 1 << ((reg >> 2) & 31);
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ if (!(cayman_reg_safe_bm[i] & m))
+ return 0;
+ } else {
+ if (!(evergreen_reg_safe_bm[i] & m))
+ return 0;
+ }
+ ib = p->ib.ptr;
+ switch (reg) {
+ /* force following reg to 0 in an attempt to disable out buffer
+ * which will need us to better understand how it works to perform
+ * security check on it (Jerome)
+ */
+ case SQ_ESGS_RING_SIZE:
+ case SQ_GSVS_RING_SIZE:
+ case SQ_ESTMP_RING_SIZE:
+ case SQ_GSTMP_RING_SIZE:
+ case SQ_HSTMP_RING_SIZE:
+ case SQ_LSTMP_RING_SIZE:
+ case SQ_PSTMP_RING_SIZE:
+ case SQ_VSTMP_RING_SIZE:
+ case SQ_ESGS_RING_ITEMSIZE:
+ case SQ_ESTMP_RING_ITEMSIZE:
+ case SQ_GSTMP_RING_ITEMSIZE:
+ case SQ_GSVS_RING_ITEMSIZE:
+ case SQ_GS_VERT_ITEMSIZE:
+ case SQ_GS_VERT_ITEMSIZE_1:
+ case SQ_GS_VERT_ITEMSIZE_2:
+ case SQ_GS_VERT_ITEMSIZE_3:
+ case SQ_GSVS_RING_OFFSET_1:
+ case SQ_GSVS_RING_OFFSET_2:
+ case SQ_GSVS_RING_OFFSET_3:
+ case SQ_HSTMP_RING_ITEMSIZE:
+ case SQ_LSTMP_RING_ITEMSIZE:
+ case SQ_PSTMP_RING_ITEMSIZE:
+ case SQ_VSTMP_RING_ITEMSIZE:
+ case VGT_TF_RING_SIZE:
+ /* get value to populate the IB don't remove */
+ /*tmp =radeon_get_ib_value(p, idx);
+ ib[idx] = 0;*/
+ break;
+ case SQ_ESGS_RING_BASE:
+ case SQ_GSVS_RING_BASE:
+ case SQ_ESTMP_RING_BASE:
+ case SQ_GSTMP_RING_BASE:
+ case SQ_HSTMP_RING_BASE:
+ case SQ_LSTMP_RING_BASE:
+ case SQ_PSTMP_RING_BASE:
+ case SQ_VSTMP_RING_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
+ case DB_DEPTH_CONTROL:
+ track->db_depth_control = radeon_get_ib_value(p, idx);
+ track->db_dirty = true;
+ break;
+ case CAYMAN_DB_EQAA:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ break;
+ case CAYMAN_DB_DEPTH_INFO:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ break;
+ case DB_Z_INFO:
+ track->db_z_info = radeon_get_ib_value(p, idx);
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] &= ~Z_ARRAY_MODE(0xf);
+ track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+ ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ unsigned bankw, bankh, mtaspect, tile_split;
+
+ evergreen_tiling_fields(reloc->lobj.tiling_flags,
+ &bankw, &bankh, &mtaspect,
+ &tile_split);
+ ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= DB_TILE_SPLIT(tile_split) |
+ DB_BANK_WIDTH(bankw) |
+ DB_BANK_HEIGHT(bankh) |
+ DB_MACRO_TILE_ASPECT(mtaspect);
+ }
+ }
+ track->db_dirty = true;
+ break;
+ case DB_STENCIL_INFO:
+ track->db_s_info = radeon_get_ib_value(p, idx);
+ track->db_dirty = true;
+ break;
+ case DB_DEPTH_VIEW:
+ track->db_depth_view = radeon_get_ib_value(p, idx);
+ track->db_dirty = true;
+ break;
+ case DB_DEPTH_SIZE:
+ track->db_depth_size = radeon_get_ib_value(p, idx);
+ track->db_dirty = true;
+ break;
+ case R_02805C_DB_DEPTH_SLICE:
+ track->db_depth_slice = radeon_get_ib_value(p, idx);
+ track->db_dirty = true;
+ break;
+ case DB_Z_READ_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_z_read_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_z_read_bo = reloc->robj;
+ track->db_dirty = true;
+ break;
+ case DB_Z_WRITE_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_z_write_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_z_write_bo = reloc->robj;
+ track->db_dirty = true;
+ break;
+ case DB_STENCIL_READ_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_s_read_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_s_read_bo = reloc->robj;
+ track->db_dirty = true;
+ break;
+ case DB_STENCIL_WRITE_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_s_write_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_s_write_bo = reloc->robj;
+ track->db_dirty = true;
+ break;
+ case VGT_STRMOUT_CONFIG:
+ track->vgt_strmout_config = radeon_get_ib_value(p, idx);
+ track->streamout_dirty = true;
+ break;
+ case VGT_STRMOUT_BUFFER_CONFIG:
+ track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
+ track->streamout_dirty = true;
+ break;
+ case VGT_STRMOUT_BUFFER_BASE_0:
+ case VGT_STRMOUT_BUFFER_BASE_1:
+ case VGT_STRMOUT_BUFFER_BASE_2:
+ case VGT_STRMOUT_BUFFER_BASE_3:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
+ track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->vgt_strmout_bo[tmp] = reloc->robj;
+ track->streamout_dirty = true;
+ break;
+ case VGT_STRMOUT_BUFFER_SIZE_0:
+ case VGT_STRMOUT_BUFFER_SIZE_1:
+ case VGT_STRMOUT_BUFFER_SIZE_2:
+ case VGT_STRMOUT_BUFFER_SIZE_3:
+ tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
+ /* size in register is DWs, convert to bytes */
+ track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
+ track->streamout_dirty = true;
+ break;
+ case CP_COHER_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ case CB_TARGET_MASK:
+ track->cb_target_mask = radeon_get_ib_value(p, idx);
+ track->cb_dirty = true;
+ break;
+ case CB_SHADER_MASK:
+ track->cb_shader_mask = radeon_get_ib_value(p, idx);
+ track->cb_dirty = true;
+ break;
+ case PA_SC_AA_CONFIG:
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
+ track->nsamples = 1 << tmp;
+ break;
+ case CAYMAN_PA_SC_AA_CONFIG:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
+ track->nsamples = 1 << tmp;
+ break;
+ case CB_COLOR0_VIEW:
+ case CB_COLOR1_VIEW:
+ case CB_COLOR2_VIEW:
+ case CB_COLOR3_VIEW:
+ case CB_COLOR4_VIEW:
+ case CB_COLOR5_VIEW:
+ case CB_COLOR6_VIEW:
+ case CB_COLOR7_VIEW:
+ tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
+ track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_dirty = true;
+ break;
+ case CB_COLOR8_VIEW:
+ case CB_COLOR9_VIEW:
+ case CB_COLOR10_VIEW:
+ case CB_COLOR11_VIEW:
+ tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
+ track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_dirty = true;
+ break;
+ case CB_COLOR0_INFO:
+ case CB_COLOR1_INFO:
+ case CB_COLOR2_INFO:
+ case CB_COLOR3_INFO:
+ case CB_COLOR4_INFO:
+ case CB_COLOR5_INFO:
+ case CB_COLOR6_INFO:
+ case CB_COLOR7_INFO:
+ tmp = (reg - CB_COLOR0_INFO) / 0x3c;
+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ }
+ track->cb_dirty = true;
+ break;
+ case CB_COLOR8_INFO:
+ case CB_COLOR9_INFO:
+ case CB_COLOR10_INFO:
+ case CB_COLOR11_INFO:
+ tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ }
+ track->cb_dirty = true;
+ break;
+ case CB_COLOR0_PITCH:
+ case CB_COLOR1_PITCH:
+ case CB_COLOR2_PITCH:
+ case CB_COLOR3_PITCH:
+ case CB_COLOR4_PITCH:
+ case CB_COLOR5_PITCH:
+ case CB_COLOR6_PITCH:
+ case CB_COLOR7_PITCH:
+ tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
+ track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_dirty = true;
+ break;
+ case CB_COLOR8_PITCH:
+ case CB_COLOR9_PITCH:
+ case CB_COLOR10_PITCH:
+ case CB_COLOR11_PITCH:
+ tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
+ track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_dirty = true;
+ break;
+ case CB_COLOR0_SLICE:
+ case CB_COLOR1_SLICE:
+ case CB_COLOR2_SLICE:
+ case CB_COLOR3_SLICE:
+ case CB_COLOR4_SLICE:
+ case CB_COLOR5_SLICE:
+ case CB_COLOR6_SLICE:
+ case CB_COLOR7_SLICE:
+ tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
+ track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_slice_idx[tmp] = idx;
+ track->cb_dirty = true;
+ break;
+ case CB_COLOR8_SLICE:
+ case CB_COLOR9_SLICE:
+ case CB_COLOR10_SLICE:
+ case CB_COLOR11_SLICE:
+ tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
+ track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_slice_idx[tmp] = idx;
+ track->cb_dirty = true;
+ break;
+ case CB_COLOR0_ATTRIB:
+ case CB_COLOR1_ATTRIB:
+ case CB_COLOR2_ATTRIB:
+ case CB_COLOR3_ATTRIB:
+ case CB_COLOR4_ATTRIB:
+ case CB_COLOR5_ATTRIB:
+ case CB_COLOR6_ATTRIB:
+ case CB_COLOR7_ATTRIB:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ unsigned bankw, bankh, mtaspect, tile_split;
+
+ evergreen_tiling_fields(reloc->lobj.tiling_flags,
+ &bankw, &bankh, &mtaspect,
+ &tile_split);
+ ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= CB_TILE_SPLIT(tile_split) |
+ CB_BANK_WIDTH(bankw) |
+ CB_BANK_HEIGHT(bankh) |
+ CB_MACRO_TILE_ASPECT(mtaspect);
+ }
+ }
+ tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
+ track->cb_color_attrib[tmp] = ib[idx];
+ track->cb_dirty = true;
+ break;
+ case CB_COLOR8_ATTRIB:
+ case CB_COLOR9_ATTRIB:
+ case CB_COLOR10_ATTRIB:
+ case CB_COLOR11_ATTRIB:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ unsigned bankw, bankh, mtaspect, tile_split;
+
+ evergreen_tiling_fields(reloc->lobj.tiling_flags,
+ &bankw, &bankh, &mtaspect,
+ &tile_split);
+ ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= CB_TILE_SPLIT(tile_split) |
+ CB_BANK_WIDTH(bankw) |
+ CB_BANK_HEIGHT(bankh) |
+ CB_MACRO_TILE_ASPECT(mtaspect);
+ }
+ }
+ tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
+ track->cb_color_attrib[tmp] = ib[idx];
+ track->cb_dirty = true;
+ break;
+ case CB_COLOR0_FMASK:
+ case CB_COLOR1_FMASK:
+ case CB_COLOR2_FMASK:
+ case CB_COLOR3_FMASK:
+ case CB_COLOR4_FMASK:
+ case CB_COLOR5_FMASK:
+ case CB_COLOR6_FMASK:
+ case CB_COLOR7_FMASK:
+ tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_fmask_bo[tmp] = reloc->robj;
+ break;
+ case CB_COLOR0_CMASK:
+ case CB_COLOR1_CMASK:
+ case CB_COLOR2_CMASK:
+ case CB_COLOR3_CMASK:
+ case CB_COLOR4_CMASK:
+ case CB_COLOR5_CMASK:
+ case CB_COLOR6_CMASK:
+ case CB_COLOR7_CMASK:
+ tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_cmask_bo[tmp] = reloc->robj;
+ break;
+ case CB_COLOR0_FMASK_SLICE:
+ case CB_COLOR1_FMASK_SLICE:
+ case CB_COLOR2_FMASK_SLICE:
+ case CB_COLOR3_FMASK_SLICE:
+ case CB_COLOR4_FMASK_SLICE:
+ case CB_COLOR5_FMASK_SLICE:
+ case CB_COLOR6_FMASK_SLICE:
+ case CB_COLOR7_FMASK_SLICE:
+ tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
+ track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
+ break;
+ case CB_COLOR0_CMASK_SLICE:
+ case CB_COLOR1_CMASK_SLICE:
+ case CB_COLOR2_CMASK_SLICE:
+ case CB_COLOR3_CMASK_SLICE:
+ case CB_COLOR4_CMASK_SLICE:
+ case CB_COLOR5_CMASK_SLICE:
+ case CB_COLOR6_CMASK_SLICE:
+ case CB_COLOR7_CMASK_SLICE:
+ tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
+ track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
+ break;
+ case CB_COLOR0_BASE:
+ case CB_COLOR1_BASE:
+ case CB_COLOR2_BASE:
+ case CB_COLOR3_BASE:
+ case CB_COLOR4_BASE:
+ case CB_COLOR5_BASE:
+ case CB_COLOR6_BASE:
+ case CB_COLOR7_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - CB_COLOR0_BASE) / 0x3c;
+ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_bo[tmp] = reloc->robj;
+ track->cb_dirty = true;
+ break;
+ case CB_COLOR8_BASE:
+ case CB_COLOR9_BASE:
+ case CB_COLOR10_BASE:
+ case CB_COLOR11_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
+ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_bo[tmp] = reloc->robj;
+ track->cb_dirty = true;
+ break;
+ case DB_HTILE_DATA_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->htile_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->htile_bo = reloc->robj;
+ track->db_dirty = true;
+ break;
+ case DB_HTILE_SURFACE:
+ /* 8x8 only */
+ track->htile_surface = radeon_get_ib_value(p, idx);
+ /* force 8x8 htile width and height */
+ ib[idx] |= 3;
+ track->db_dirty = true;
+ break;
+ case CB_IMMED0_BASE:
+ case CB_IMMED1_BASE:
+ case CB_IMMED2_BASE:
+ case CB_IMMED3_BASE:
+ case CB_IMMED4_BASE:
+ case CB_IMMED5_BASE:
+ case CB_IMMED6_BASE:
+ case CB_IMMED7_BASE:
+ case CB_IMMED8_BASE:
+ case CB_IMMED9_BASE:
+ case CB_IMMED10_BASE:
+ case CB_IMMED11_BASE:
+ case SQ_PGM_START_FS:
+ case SQ_PGM_START_ES:
+ case SQ_PGM_START_VS:
+ case SQ_PGM_START_GS:
+ case SQ_PGM_START_PS:
+ case SQ_PGM_START_HS:
+ case SQ_PGM_START_LS:
+ case SQ_CONST_MEM_BASE:
+ case SQ_ALU_CONST_CACHE_GS_0:
+ case SQ_ALU_CONST_CACHE_GS_1:
+ case SQ_ALU_CONST_CACHE_GS_2:
+ case SQ_ALU_CONST_CACHE_GS_3:
+ case SQ_ALU_CONST_CACHE_GS_4:
+ case SQ_ALU_CONST_CACHE_GS_5:
+ case SQ_ALU_CONST_CACHE_GS_6:
+ case SQ_ALU_CONST_CACHE_GS_7:
+ case SQ_ALU_CONST_CACHE_GS_8:
+ case SQ_ALU_CONST_CACHE_GS_9:
+ case SQ_ALU_CONST_CACHE_GS_10:
+ case SQ_ALU_CONST_CACHE_GS_11:
+ case SQ_ALU_CONST_CACHE_GS_12:
+ case SQ_ALU_CONST_CACHE_GS_13:
+ case SQ_ALU_CONST_CACHE_GS_14:
+ case SQ_ALU_CONST_CACHE_GS_15:
+ case SQ_ALU_CONST_CACHE_PS_0:
+ case SQ_ALU_CONST_CACHE_PS_1:
+ case SQ_ALU_CONST_CACHE_PS_2:
+ case SQ_ALU_CONST_CACHE_PS_3:
+ case SQ_ALU_CONST_CACHE_PS_4:
+ case SQ_ALU_CONST_CACHE_PS_5:
+ case SQ_ALU_CONST_CACHE_PS_6:
+ case SQ_ALU_CONST_CACHE_PS_7:
+ case SQ_ALU_CONST_CACHE_PS_8:
+ case SQ_ALU_CONST_CACHE_PS_9:
+ case SQ_ALU_CONST_CACHE_PS_10:
+ case SQ_ALU_CONST_CACHE_PS_11:
+ case SQ_ALU_CONST_CACHE_PS_12:
+ case SQ_ALU_CONST_CACHE_PS_13:
+ case SQ_ALU_CONST_CACHE_PS_14:
+ case SQ_ALU_CONST_CACHE_PS_15:
+ case SQ_ALU_CONST_CACHE_VS_0:
+ case SQ_ALU_CONST_CACHE_VS_1:
+ case SQ_ALU_CONST_CACHE_VS_2:
+ case SQ_ALU_CONST_CACHE_VS_3:
+ case SQ_ALU_CONST_CACHE_VS_4:
+ case SQ_ALU_CONST_CACHE_VS_5:
+ case SQ_ALU_CONST_CACHE_VS_6:
+ case SQ_ALU_CONST_CACHE_VS_7:
+ case SQ_ALU_CONST_CACHE_VS_8:
+ case SQ_ALU_CONST_CACHE_VS_9:
+ case SQ_ALU_CONST_CACHE_VS_10:
+ case SQ_ALU_CONST_CACHE_VS_11:
+ case SQ_ALU_CONST_CACHE_VS_12:
+ case SQ_ALU_CONST_CACHE_VS_13:
+ case SQ_ALU_CONST_CACHE_VS_14:
+ case SQ_ALU_CONST_CACHE_VS_15:
+ case SQ_ALU_CONST_CACHE_HS_0:
+ case SQ_ALU_CONST_CACHE_HS_1:
+ case SQ_ALU_CONST_CACHE_HS_2:
+ case SQ_ALU_CONST_CACHE_HS_3:
+ case SQ_ALU_CONST_CACHE_HS_4:
+ case SQ_ALU_CONST_CACHE_HS_5:
+ case SQ_ALU_CONST_CACHE_HS_6:
+ case SQ_ALU_CONST_CACHE_HS_7:
+ case SQ_ALU_CONST_CACHE_HS_8:
+ case SQ_ALU_CONST_CACHE_HS_9:
+ case SQ_ALU_CONST_CACHE_HS_10:
+ case SQ_ALU_CONST_CACHE_HS_11:
+ case SQ_ALU_CONST_CACHE_HS_12:
+ case SQ_ALU_CONST_CACHE_HS_13:
+ case SQ_ALU_CONST_CACHE_HS_14:
+ case SQ_ALU_CONST_CACHE_HS_15:
+ case SQ_ALU_CONST_CACHE_LS_0:
+ case SQ_ALU_CONST_CACHE_LS_1:
+ case SQ_ALU_CONST_CACHE_LS_2:
+ case SQ_ALU_CONST_CACHE_LS_3:
+ case SQ_ALU_CONST_CACHE_LS_4:
+ case SQ_ALU_CONST_CACHE_LS_5:
+ case SQ_ALU_CONST_CACHE_LS_6:
+ case SQ_ALU_CONST_CACHE_LS_7:
+ case SQ_ALU_CONST_CACHE_LS_8:
+ case SQ_ALU_CONST_CACHE_LS_9:
+ case SQ_ALU_CONST_CACHE_LS_10:
+ case SQ_ALU_CONST_CACHE_LS_11:
+ case SQ_ALU_CONST_CACHE_LS_12:
+ case SQ_ALU_CONST_CACHE_LS_13:
+ case SQ_ALU_CONST_CACHE_LS_14:
+ case SQ_ALU_CONST_CACHE_LS_15:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
+ case SX_MEMORY_EXPORT_BASE:
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
+ case CAYMAN_SX_SCATTER_EXPORT_BASE:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
+ case SX_MISC:
+ track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
+ break;
+ default:
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+ u32 last_reg, m, i;
+
+ if (p->rdev->family >= CHIP_CAYMAN)
+ last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
+ else
+ last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+
+ i = (reg >> 7);
+ if (i >= last_reg) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return false;
+ }
+ m = 1 << ((reg >> 2) & 31);
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ if (!(cayman_reg_safe_bm[i] & m))
+ return true;
+ } else {
+ if (!(evergreen_reg_safe_bm[i] & m))
+ return true;
+ }
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return false;
+}
+
+static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ struct radeon_cs_reloc *reloc;
+ struct evergreen_cs_track *track;
+ volatile u32 *ib;
+ unsigned idx;
+ unsigned i;
+ unsigned start_reg, end_reg, reg;
+ int r;
+ u32 idx_value;
+
+ track = (struct evergreen_cs_track *)p->track;
+ ib = p->ib.ptr;
+ idx = pkt->idx + 1;
+ idx_value = radeon_get_ib_value(p, idx);
+
+ switch (pkt->opcode) {
+ case PACKET3_SET_PREDICATION:
+ {
+ int pred_op;
+ int tmp;
+ uint64_t offset;
+
+ if (pkt->count != 1) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx + 1);
+ pred_op = (tmp >> 16) & 0x7;
+
+ /* for the clear predicate operation */
+ if (pred_op == 0)
+ return 0;
+
+ if (pred_op > 2) {
+ DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+ return -EINVAL;
+ }
+
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ offset = reloc->lobj.gpu_offset +
+ (idx_value & 0xfffffff0) +
+ ((u64)(tmp & 0xff) << 32);
+
+ ib[idx + 0] = offset;
+ ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ }
+ break;
+ case PACKET3_CONTEXT_CONTROL:
+ if (pkt->count != 1) {
+ DRM_ERROR("bad CONTEXT_CONTROL\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_INDEX_TYPE:
+ case PACKET3_NUM_INSTANCES:
+ case PACKET3_CLEAR_STATE:
+ if (pkt->count) {
+ DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+ return -EINVAL;
+ }
+ break;
+ case CAYMAN_PACKET3_DEALLOC_STATE:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
+ return -EINVAL;
+ }
+ if (pkt->count) {
+ DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_INDEX_BASE:
+ {
+ uint64_t offset;
+
+ if (pkt->count != 1) {
+ DRM_ERROR("bad INDEX_BASE\n");
+ return -EINVAL;
+ }
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad INDEX_BASE\n");
+ return -EINVAL;
+ }
+
+ offset = reloc->lobj.gpu_offset +
+ idx_value +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+ ib[idx+0] = offset;
+ ib[idx+1] = upper_32_bits(offset) & 0xff;
+
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ }
+ case PACKET3_DRAW_INDEX:
+ {
+ uint64_t offset;
+ if (pkt->count != 3) {
+ DRM_ERROR("bad DRAW_INDEX\n");
+ return -EINVAL;
+ }
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad DRAW_INDEX\n");
+ return -EINVAL;
+ }
+
+ offset = reloc->lobj.gpu_offset +
+ idx_value +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+ ib[idx+0] = offset;
+ ib[idx+1] = upper_32_bits(offset) & 0xff;
+
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ }
+ case PACKET3_DRAW_INDEX_2:
+ {
+ uint64_t offset;
+
+ if (pkt->count != 4) {
+ DRM_ERROR("bad DRAW_INDEX_2\n");
+ return -EINVAL;
+ }
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad DRAW_INDEX_2\n");
+ return -EINVAL;
+ }
+
+ offset = reloc->lobj.gpu_offset +
+ radeon_get_ib_value(p, idx+1) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+ ib[idx+1] = offset;
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
+
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ }
+ case PACKET3_DRAW_INDEX_AUTO:
+ if (pkt->count != 1) {
+ DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_MULTI_AUTO:
+ if (pkt->count != 2) {
+ DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_IMMD:
+ if (pkt->count < 2) {
+ DRM_ERROR("bad DRAW_INDEX_IMMD\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_OFFSET:
+ if (pkt->count != 2) {
+ DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_OFFSET_2:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_DISPATCH_DIRECT:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad DISPATCH_DIRECT\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ return r;
+ }
+ break;
+ case PACKET3_DISPATCH_INDIRECT:
+ if (pkt->count != 1) {
+ DRM_ERROR("bad DISPATCH_INDIRECT\n");
+ return -EINVAL;
+ }
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad DISPATCH_INDIRECT\n");
+ return -EINVAL;
+ }
+ ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_WAIT_REG_MEM:
+ if (pkt->count != 5) {
+ DRM_ERROR("bad WAIT_REG_MEM\n");
+ return -EINVAL;
+ }
+ /* bit 4 is reg (0) or mem (1) */
+ if (idx_value & 0x10) {
+ uint64_t offset;
+
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad WAIT_REG_MEM\n");
+ return -EINVAL;
+ }
+
+ offset = reloc->lobj.gpu_offset +
+ (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+ ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
+ } else if (idx_value & 0x100) {
+ DRM_ERROR("cannot use PFP on REG wait\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_CP_DMA:
+ {
+ u32 command, size, info;
+ u64 offset, tmp;
+ if (pkt->count != 4) {
+ DRM_ERROR("bad CP DMA\n");
+ return -EINVAL;
+ }
+ command = radeon_get_ib_value(p, idx+4);
+ size = command & 0x1fffff;
+ info = radeon_get_ib_value(p, idx+1);
+ if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+ (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+ ((((info & 0x00300000) >> 20) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+ ((((info & 0x60000000) >> 29) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+ /* non mem to mem copies requires dw aligned count */
+ if (size % 4) {
+ DRM_ERROR("CP DMA command requires dw count alignment\n");
+ return -EINVAL;
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ /* GDS is ok */
+ if (((info & 0x60000000) >> 29) != 1) {
+ DRM_ERROR("CP DMA SAS not supported\n");
+ return -EINVAL;
+ }
+ } else {
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ DRM_ERROR("CP DMA SAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ /* src address space is memory */
+ if (((info & 0x60000000) >> 29) == 0) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad CP DMA SRC\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx) +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx] = offset;
+ ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ } else if (((info & 0x60000000) >> 29) != 2) {
+ DRM_ERROR("bad CP DMA SRC_SEL\n");
+ return -EINVAL;
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ /* GDS is ok */
+ if (((info & 0x00300000) >> 20) != 1) {
+ DRM_ERROR("CP DMA DAS not supported\n");
+ return -EINVAL;
+ }
+ } else {
+ /* dst address space is memory */
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ DRM_ERROR("CP DMA DAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ if (((info & 0x00300000) >> 20) == 0) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad CP DMA DST\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx+2) +
+ ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx+2] = offset;
+ ib[idx+3] = upper_32_bits(offset) & 0xff;
+ } else {
+ DRM_ERROR("bad CP DMA DST_SEL\n");
+ return -EINVAL;
+ }
+ }
+ break;
+ }
+ case PACKET3_SURFACE_SYNC:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad SURFACE_SYNC\n");
+ return -EINVAL;
+ }
+ /* 0xffffffff/0x0 is flush all cache flag */
+ if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
+ radeon_get_ib_value(p, idx + 2) != 0) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad SURFACE_SYNC\n");
+ return -EINVAL;
+ }
+ ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ }
+ break;
+ case PACKET3_EVENT_WRITE:
+ if (pkt->count != 2 && pkt->count != 0) {
+ DRM_ERROR("bad EVENT_WRITE\n");
+ return -EINVAL;
+ }
+ if (pkt->count) {
+ uint64_t offset;
+
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad EVENT_WRITE\n");
+ return -EINVAL;
+ }
+ offset = reloc->lobj.gpu_offset +
+ (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+ ib[idx+1] = offset & 0xfffffff8;
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
+ }
+ break;
+ case PACKET3_EVENT_WRITE_EOP:
+ {
+ uint64_t offset;
+
+ if (pkt->count != 4) {
+ DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ return -EINVAL;
+ }
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ return -EINVAL;
+ }
+
+ offset = reloc->lobj.gpu_offset +
+ (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+ ib[idx+1] = offset & 0xfffffffc;
+ ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ break;
+ }
+ case PACKET3_EVENT_WRITE_EOS:
+ {
+ uint64_t offset;
+
+ if (pkt->count != 3) {
+ DRM_ERROR("bad EVENT_WRITE_EOS\n");
+ return -EINVAL;
+ }
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad EVENT_WRITE_EOS\n");
+ return -EINVAL;
+ }
+
+ offset = reloc->lobj.gpu_offset +
+ (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+ ib[idx+1] = offset & 0xfffffffc;
+ ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ break;
+ }
+ case PACKET3_SET_CONFIG_REG:
+ start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+ (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+ (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+ DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ r = evergreen_cs_check_reg(p, reg, idx+1+i);
+ if (r)
+ return r;
+ }
+ break;
+ case PACKET3_SET_CONTEXT_REG:
+ start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
+ (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
+ (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
+ DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ r = evergreen_cs_check_reg(p, reg, idx+1+i);
+ if (r)
+ return r;
+ }
+ break;
+ case PACKET3_SET_RESOURCE:
+ if (pkt->count % 8) {
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_RESOURCE_START) ||
+ (start_reg >= PACKET3_SET_RESOURCE_END) ||
+ (end_reg >= PACKET3_SET_RESOURCE_END)) {
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < (pkt->count / 8); i++) {
+ struct radeon_bo *texture, *mipmap;
+ u32 toffset, moffset;
+ u32 size, offset, mip_address, tex_dim;
+
+ switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
+ case SQ_TEX_VTX_VALID_TEXTURE:
+ /* tex base */
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad SET_RESOURCE (tex)\n");
+ return -EINVAL;
+ }
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ ib[idx+1+(i*8)+1] |=
+ TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ unsigned bankw, bankh, mtaspect, tile_split;
+
+ evergreen_tiling_fields(reloc->lobj.tiling_flags,
+ &bankw, &bankh, &mtaspect,
+ &tile_split);
+ ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
+ ib[idx+1+(i*8)+7] |=
+ TEX_BANK_WIDTH(bankw) |
+ TEX_BANK_HEIGHT(bankh) |
+ MACRO_TILE_ASPECT(mtaspect) |
+ TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ }
+ }
+ texture = reloc->robj;
+ toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+
+ /* tex mip base */
+ tex_dim = ib[idx+1+(i*8)+0] & 0x7;
+ mip_address = ib[idx+1+(i*8)+3];
+
+ if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
+ !mip_address &&
+ !radeon_cs_packet_next_is_pkt3_nop(p)) {
+ /* MIP_ADDRESS should point to FMASK for an MSAA texture.
+ * It should be 0 if FMASK is disabled. */
+ moffset = 0;
+ mipmap = NULL;
+ } else {
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad SET_RESOURCE (tex)\n");
+ return -EINVAL;
+ }
+ moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ mipmap = reloc->robj;
+ }
+
+ r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
+ if (r)
+ return r;
+ ib[idx+1+(i*8)+2] += toffset;
+ ib[idx+1+(i*8)+3] += moffset;
+ break;
+ case SQ_TEX_VTX_VALID_BUFFER:
+ {
+ uint64_t offset64;
+ /* vtx base */
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad SET_RESOURCE (vtx)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
+ size = radeon_get_ib_value(p, idx+1+(i*8)+1);
+ if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
+ /* force size to size of the buffer */
+ dev_warn(p->dev, "vbo resource seems too big for the bo\n");
+ ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
+ }
+
+ offset64 = reloc->lobj.gpu_offset + offset;
+ ib[idx+1+(i*8)+0] = offset64;
+ ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
+ (upper_32_bits(offset64) & 0xff);
+ break;
+ }
+ case SQ_TEX_VTX_INVALID_TEXTURE:
+ case SQ_TEX_VTX_INVALID_BUFFER:
+ default:
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ }
+ break;
+ case PACKET3_SET_ALU_CONST:
+ /* XXX fix me ALU const buffers only */
+ break;
+ case PACKET3_SET_BOOL_CONST:
+ start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
+ (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
+ (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
+ DRM_ERROR("bad SET_BOOL_CONST\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_LOOP_CONST:
+ start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
+ (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
+ (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
+ DRM_ERROR("bad SET_LOOP_CONST\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_CTL_CONST:
+ start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
+ (start_reg >= PACKET3_SET_CTL_CONST_END) ||
+ (end_reg >= PACKET3_SET_CTL_CONST_END)) {
+ DRM_ERROR("bad SET_CTL_CONST\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_SAMPLER:
+ if (pkt->count % 3) {
+ DRM_ERROR("bad SET_SAMPLER\n");
+ return -EINVAL;
+ }
+ start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_SAMPLER_START) ||
+ (start_reg >= PACKET3_SET_SAMPLER_END) ||
+ (end_reg >= PACKET3_SET_SAMPLER_END)) {
+ DRM_ERROR("bad SET_SAMPLER\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_STRMOUT_BUFFER_UPDATE:
+ if (pkt->count != 4) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
+ return -EINVAL;
+ }
+ /* Updating memory at DST_ADDRESS. */
+ if (idx_value & 0x1) {
+ u64 offset;
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+1);
+ offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+1] = offset;
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
+ }
+ /* Reading data from SRC_ADDRESS. */
+ if (((idx_value >> 1) & 0x3) == 2) {
+ u64 offset;
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+3);
+ offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+3] = offset;
+ ib[idx+4] = upper_32_bits(offset) & 0xff;
+ }
+ break;
+ case PACKET3_MEM_WRITE:
+ {
+ u64 offset;
+
+ if (pkt->count != 3) {
+ DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+ return -EINVAL;
+ }
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+0);
+ offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+ if (offset & 0x7) {
+ DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+ return -EINVAL;
+ }
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+0] = offset;
+ ib[idx+1] = upper_32_bits(offset) & 0xff;
+ break;
+ }
+ case PACKET3_COPY_DW:
+ if (pkt->count != 4) {
+ DRM_ERROR("bad COPY_DW (invalid count)\n");
+ return -EINVAL;
+ }
+ if (idx_value & 0x1) {
+ u64 offset;
+ /* SRC is memory. */
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad COPY_DW (missing src reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+1);
+ offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+1] = offset;
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
+ } else {
+ /* SRC is a reg. */
+ reg = radeon_get_ib_value(p, idx+1) << 2;
+ if (!evergreen_is_safe_reg(p, reg, idx+1))
+ return -EINVAL;
+ }
+ if (idx_value & 0x2) {
+ u64 offset;
+ /* DST is memory. */
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+3);
+ offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+3] = offset;
+ ib[idx+4] = upper_32_bits(offset) & 0xff;
+ } else {
+ /* DST is a reg. */
+ reg = radeon_get_ib_value(p, idx+3) << 2;
+ if (!evergreen_is_safe_reg(p, reg, idx+3))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_NOP:
+ break;
+ default:
+ DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int evergreen_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet pkt;
+ struct evergreen_cs_track *track;
+ u32 tmp;
+ int r;
+
+ if (p->track == NULL) {
+ /* initialize tracker, we are in kms */
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (track == NULL)
+ return -ENOMEM;
+ evergreen_cs_track_init(track);
+ if (p->rdev->family >= CHIP_CAYMAN)
+ tmp = p->rdev->config.cayman.tile_config;
+ else
+ tmp = p->rdev->config.evergreen.tile_config;
+
+ switch (tmp & 0xf) {
+ case 0:
+ track->npipes = 1;
+ break;
+ case 1:
+ default:
+ track->npipes = 2;
+ break;
+ case 2:
+ track->npipes = 4;
+ break;
+ case 3:
+ track->npipes = 8;
+ break;
+ }
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0:
+ track->nbanks = 4;
+ break;
+ case 1:
+ default:
+ track->nbanks = 8;
+ break;
+ case 2:
+ track->nbanks = 16;
+ break;
+ }
+
+ switch ((tmp & 0xf00) >> 8) {
+ case 0:
+ track->group_size = 256;
+ break;
+ case 1:
+ default:
+ track->group_size = 512;
+ break;
+ }
+
+ switch ((tmp & 0xf000) >> 12) {
+ case 0:
+ track->row_size = 1;
+ break;
+ case 1:
+ default:
+ track->row_size = 2;
+ break;
+ case 2:
+ track->row_size = 4;
+ break;
+ }
+
+ p->track = track;
+ }
+ do {
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
+ if (r) {
+ kfree(p->track);
+ p->track = NULL;
+ return r;
+ }
+ p->idx += pkt.count + 2;
+ switch (pkt.type) {
+ case RADEON_PACKET_TYPE0:
+ r = evergreen_cs_parse_packet0(p, &pkt);
+ break;
+ case RADEON_PACKET_TYPE2:
+ break;
+ case RADEON_PACKET_TYPE3:
+ r = evergreen_packet3_check(p, &pkt);
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ kfree(p->track);
+ p->track = NULL;
+ return -EINVAL;
+ }
+ if (r) {
+ kfree(p->track);
+ p->track = NULL;
+ return r;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+ for (r = 0; r < p->ib.length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
+ mdelay(1);
+ }
+#endif
+ kfree(p->track);
+ p->track = NULL;
+ return 0;
+}
+
+/**
+ * evergreen_dma_cs_parse() - parse the DMA IB
+ * @p: parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (Evergreen-Cayman)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
+ u32 header, cmd, count, sub_cmd;
+ volatile u32 *ib = p->ib.ptr;
+ u32 idx;
+ u64 src_offset, dst_offset, dst2_offset;
+ int r;
+
+ do {
+ if (p->idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ p->idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ idx = p->idx;
+ header = radeon_get_ib_value(p, idx);
+ cmd = GET_DMA_CMD(header);
+ count = GET_DMA_COUNT(header);
+ sub_cmd = GET_DMA_SUB_CMD(header);
+
+ switch (cmd) {
+ case DMA_PACKET_WRITE:
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ return -EINVAL;
+ }
+ switch (sub_cmd) {
+ /* tiled */
+ case 8:
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += count + 7;
+ break;
+ /* linear */
+ case 0:
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += count + 3;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+ dst_offset, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_COPY:
+ r = r600_dma_cs_next_reloc(p, &src_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ switch (sub_cmd) {
+ /* Copy L2L, DW aligned */
+ case 0x00:
+ /* L2L, dw */
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ break;
+ /* Copy L2T/T2L */
+ case 0x08:
+ /* detile bit */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = radeon_get_ib_value(p, idx+1);
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = radeon_get_ib_value(p, idx + 7);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = radeon_get_ib_value(p, idx+7);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ /* Copy L2L, byte aligned */
+ case 0x40:
+ /* L2L, byte */
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+ src_offset + count, radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+ dst_offset + count, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ break;
+ /* Copy L2L, partial */
+ case 0x41:
+ /* L2L, partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+
+ p->idx += 9;
+ break;
+ /* Copy L2L, DW aligned, broadcast */
+ case 0x44:
+ /* L2L, dw, broadcast */
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+3);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 7;
+ break;
+ /* Copy L2T Frame to Field */
+ case 0x48:
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ /* Copy L2T/T2L, partial */
+ case 0x49:
+ /* L2T, T2L partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ /* detile bit */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ /* tiled src, linear dst */
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ p->idx += 12;
+ break;
+ /* Copy L2T broadcast */
+ case 0x4b:
+ /* L2T, broadcast */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ /* Copy L2T/T2L (tile units) */
+ case 0x4c:
+ /* L2T, T2L */
+ /* detile bit */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = radeon_get_ib_value(p, idx+1);
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = radeon_get_ib_value(p, idx+7);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = radeon_get_ib_value(p, idx+7);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ /* Copy T2T, partial (tile units) */
+ case 0x4d:
+ /* T2T partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += 13;
+ break;
+ /* Copy L2T broadcast (tile units) */
+ case 0x4f:
+ /* L2T, broadcast */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_CONSTANT_FILL:
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+ dst_offset, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+ p->idx += 4;
+ break;
+ case DMA_PACKET_NOP:
+ p->idx += 1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ return -EINVAL;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+ for (r = 0; r < p->ib->length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
+ mdelay(1);
+ }
+#endif
+ return 0;
+}
+
+/* vm parser */
+static bool evergreen_vm_reg_valid(u32 reg)
+{
+ /* context regs are fine */
+ if (reg >= 0x28000)
+ return true;
+
+ /* check config regs */
+ switch (reg) {
+ case WAIT_UNTIL:
+ case GRBM_GFX_INDEX:
+ case CP_STRMOUT_CNTL:
+ case CP_COHER_CNTL:
+ case CP_COHER_SIZE:
+ case VGT_VTX_VECT_EJECT_REG:
+ case VGT_CACHE_INVALIDATION:
+ case VGT_GS_VERTEX_REUSE:
+ case VGT_PRIMITIVE_TYPE:
+ case VGT_INDEX_TYPE:
+ case VGT_NUM_INDICES:
+ case VGT_NUM_INSTANCES:
+ case VGT_COMPUTE_DIM_X:
+ case VGT_COMPUTE_DIM_Y:
+ case VGT_COMPUTE_DIM_Z:
+ case VGT_COMPUTE_START_X:
+ case VGT_COMPUTE_START_Y:
+ case VGT_COMPUTE_START_Z:
+ case VGT_COMPUTE_INDEX:
+ case VGT_COMPUTE_THREAD_GROUP_SIZE:
+ case VGT_HS_OFFCHIP_PARAM:
+ case PA_CL_ENHANCE:
+ case PA_SU_LINE_STIPPLE_VALUE:
+ case PA_SC_LINE_STIPPLE_STATE:
+ case PA_SC_ENHANCE:
+ case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
+ case SQ_DYN_GPR_SIMD_LOCK_EN:
+ case SQ_CONFIG:
+ case SQ_GPR_RESOURCE_MGMT_1:
+ case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
+ case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
+ case SQ_CONST_MEM_BASE:
+ case SQ_STATIC_THREAD_MGMT_1:
+ case SQ_STATIC_THREAD_MGMT_2:
+ case SQ_STATIC_THREAD_MGMT_3:
+ case SPI_CONFIG_CNTL:
+ case SPI_CONFIG_CNTL_1:
+ case TA_CNTL_AUX:
+ case DB_DEBUG:
+ case DB_DEBUG2:
+ case DB_DEBUG3:
+ case DB_DEBUG4:
+ case DB_WATERMARKS:
+ case TD_PS_BORDER_COLOR_INDEX:
+ case TD_PS_BORDER_COLOR_RED:
+ case TD_PS_BORDER_COLOR_GREEN:
+ case TD_PS_BORDER_COLOR_BLUE:
+ case TD_PS_BORDER_COLOR_ALPHA:
+ case TD_VS_BORDER_COLOR_INDEX:
+ case TD_VS_BORDER_COLOR_RED:
+ case TD_VS_BORDER_COLOR_GREEN:
+ case TD_VS_BORDER_COLOR_BLUE:
+ case TD_VS_BORDER_COLOR_ALPHA:
+ case TD_GS_BORDER_COLOR_INDEX:
+ case TD_GS_BORDER_COLOR_RED:
+ case TD_GS_BORDER_COLOR_GREEN:
+ case TD_GS_BORDER_COLOR_BLUE:
+ case TD_GS_BORDER_COLOR_ALPHA:
+ case TD_HS_BORDER_COLOR_INDEX:
+ case TD_HS_BORDER_COLOR_RED:
+ case TD_HS_BORDER_COLOR_GREEN:
+ case TD_HS_BORDER_COLOR_BLUE:
+ case TD_HS_BORDER_COLOR_ALPHA:
+ case TD_LS_BORDER_COLOR_INDEX:
+ case TD_LS_BORDER_COLOR_RED:
+ case TD_LS_BORDER_COLOR_GREEN:
+ case TD_LS_BORDER_COLOR_BLUE:
+ case TD_LS_BORDER_COLOR_ALPHA:
+ case TD_CS_BORDER_COLOR_INDEX:
+ case TD_CS_BORDER_COLOR_RED:
+ case TD_CS_BORDER_COLOR_GREEN:
+ case TD_CS_BORDER_COLOR_BLUE:
+ case TD_CS_BORDER_COLOR_ALPHA:
+ case SQ_ESGS_RING_SIZE:
+ case SQ_GSVS_RING_SIZE:
+ case SQ_ESTMP_RING_SIZE:
+ case SQ_GSTMP_RING_SIZE:
+ case SQ_HSTMP_RING_SIZE:
+ case SQ_LSTMP_RING_SIZE:
+ case SQ_PSTMP_RING_SIZE:
+ case SQ_VSTMP_RING_SIZE:
+ case SQ_ESGS_RING_ITEMSIZE:
+ case SQ_ESTMP_RING_ITEMSIZE:
+ case SQ_GSTMP_RING_ITEMSIZE:
+ case SQ_GSVS_RING_ITEMSIZE:
+ case SQ_GS_VERT_ITEMSIZE:
+ case SQ_GS_VERT_ITEMSIZE_1:
+ case SQ_GS_VERT_ITEMSIZE_2:
+ case SQ_GS_VERT_ITEMSIZE_3:
+ case SQ_GSVS_RING_OFFSET_1:
+ case SQ_GSVS_RING_OFFSET_2:
+ case SQ_GSVS_RING_OFFSET_3:
+ case SQ_HSTMP_RING_ITEMSIZE:
+ case SQ_LSTMP_RING_ITEMSIZE:
+ case SQ_PSTMP_RING_ITEMSIZE:
+ case SQ_VSTMP_RING_ITEMSIZE:
+ case VGT_TF_RING_SIZE:
+ case SQ_ESGS_RING_BASE:
+ case SQ_GSVS_RING_BASE:
+ case SQ_ESTMP_RING_BASE:
+ case SQ_GSTMP_RING_BASE:
+ case SQ_HSTMP_RING_BASE:
+ case SQ_LSTMP_RING_BASE:
+ case SQ_PSTMP_RING_BASE:
+ case SQ_VSTMP_RING_BASE:
+ case CAYMAN_VGT_OFFCHIP_LDS_BASE:
+ case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
+ return true;
+ default:
+ DRM_ERROR("Invalid register 0x%x in CS\n", reg);
+ return false;
+ }
+}
+
+static int evergreen_vm_packet3_check(struct radeon_device *rdev,
+ u32 *ib, struct radeon_cs_packet *pkt)
+{
+ u32 idx = pkt->idx + 1;
+ u32 idx_value = ib[idx];
+ u32 start_reg, end_reg, reg, i;
+ u32 command, info;
+
+ switch (pkt->opcode) {
+ case PACKET3_NOP:
+ case PACKET3_SET_BASE:
+ case PACKET3_CLEAR_STATE:
+ case PACKET3_INDEX_BUFFER_SIZE:
+ case PACKET3_DISPATCH_DIRECT:
+ case PACKET3_DISPATCH_INDIRECT:
+ case PACKET3_MODE_CONTROL:
+ case PACKET3_SET_PREDICATION:
+ case PACKET3_COND_EXEC:
+ case PACKET3_PRED_EXEC:
+ case PACKET3_DRAW_INDIRECT:
+ case PACKET3_DRAW_INDEX_INDIRECT:
+ case PACKET3_INDEX_BASE:
+ case PACKET3_DRAW_INDEX_2:
+ case PACKET3_CONTEXT_CONTROL:
+ case PACKET3_DRAW_INDEX_OFFSET:
+ case PACKET3_INDEX_TYPE:
+ case PACKET3_DRAW_INDEX:
+ case PACKET3_DRAW_INDEX_AUTO:
+ case PACKET3_DRAW_INDEX_IMMD:
+ case PACKET3_NUM_INSTANCES:
+ case PACKET3_DRAW_INDEX_MULTI_AUTO:
+ case PACKET3_STRMOUT_BUFFER_UPDATE:
+ case PACKET3_DRAW_INDEX_OFFSET_2:
+ case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
+ case PACKET3_MPEG_INDEX:
+ case PACKET3_WAIT_REG_MEM:
+ case PACKET3_MEM_WRITE:
+ case PACKET3_SURFACE_SYNC:
+ case PACKET3_EVENT_WRITE:
+ case PACKET3_EVENT_WRITE_EOP:
+ case PACKET3_EVENT_WRITE_EOS:
+ case PACKET3_SET_CONTEXT_REG:
+ case PACKET3_SET_BOOL_CONST:
+ case PACKET3_SET_LOOP_CONST:
+ case PACKET3_SET_RESOURCE:
+ case PACKET3_SET_SAMPLER:
+ case PACKET3_SET_CTL_CONST:
+ case PACKET3_SET_RESOURCE_OFFSET:
+ case PACKET3_SET_CONTEXT_REG_INDIRECT:
+ case PACKET3_SET_RESOURCE_INDIRECT:
+ case CAYMAN_PACKET3_DEALLOC_STATE:
+ break;
+ case PACKET3_COND_WRITE:
+ if (idx_value & 0x100) {
+ reg = ib[idx + 5] * 4;
+ if (!evergreen_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_COPY_DW:
+ if (idx_value & 0x2) {
+ reg = ib[idx + 3] * 4;
+ if (!evergreen_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_CONFIG_REG:
+ start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+ (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+ (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+ DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ if (!evergreen_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_CP_DMA:
+ command = ib[idx + 4];
+ info = ib[idx + 1];
+ if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+ (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+ ((((info & 0x00300000) >> 20) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+ ((((info & 0x60000000) >> 29) == 0) &&
+ (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+ /* non mem to mem copies requires dw aligned count */
+ if ((command & 0x1fffff) % 4) {
+ DRM_ERROR("CP DMA command requires dw count alignment\n");
+ return -EINVAL;
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ if (((info & 0x60000000) >> 29) == 0) {
+ start_reg = idx_value << 2;
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ reg = start_reg;
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ if (((info & 0x00300000) >> 20) == 0) {
+ start_reg = ib[idx + 2];
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ reg = start_reg;
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!evergreen_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ int ret = 0;
+ u32 idx = 0;
+ struct radeon_cs_packet pkt;
+
+ do {
+ pkt.idx = idx;
+ pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
+ pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
+ pkt.one_reg_wr = 0;
+ switch (pkt.type) {
+ case RADEON_PACKET_TYPE0:
+ dev_err(rdev->dev, "Packet0 not allowed!\n");
+ ret = -EINVAL;
+ break;
+ case RADEON_PACKET_TYPE2:
+ idx += 1;
+ break;
+ case RADEON_PACKET_TYPE3:
+ pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+ ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
+ idx += pkt.count + 2;
+ break;
+ default:
+ dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
+ ret = -EINVAL;
+ break;
+ }
+ if (ret)
+ break;
+ } while (idx < ib->length_dw);
+
+ return ret;
+}
+
+/**
+ * evergreen_dma_ib_parse() - parse the DMA IB for VM
+ * @rdev: radeon_device pointer
+ * @ib: radeon_ib pointer
+ *
+ * Parses the DMA IB from the VM CS ioctl
+ * checks for errors. (Cayman-SI)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ u32 idx = 0;
+ u32 header, cmd, count, sub_cmd;
+
+ do {
+ header = ib->ptr[idx];
+ cmd = GET_DMA_CMD(header);
+ count = GET_DMA_COUNT(header);
+ sub_cmd = GET_DMA_SUB_CMD(header);
+
+ switch (cmd) {
+ case DMA_PACKET_WRITE:
+ switch (sub_cmd) {
+ /* tiled */
+ case 8:
+ idx += count + 7;
+ break;
+ /* linear */
+ case 0:
+ idx += count + 3;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_COPY:
+ switch (sub_cmd) {
+ /* Copy L2L, DW aligned */
+ case 0x00:
+ idx += 5;
+ break;
+ /* Copy L2T/T2L */
+ case 0x08:
+ idx += 9;
+ break;
+ /* Copy L2L, byte aligned */
+ case 0x40:
+ idx += 5;
+ break;
+ /* Copy L2L, partial */
+ case 0x41:
+ idx += 9;
+ break;
+ /* Copy L2L, DW aligned, broadcast */
+ case 0x44:
+ idx += 7;
+ break;
+ /* Copy L2T Frame to Field */
+ case 0x48:
+ idx += 10;
+ break;
+ /* Copy L2T/T2L, partial */
+ case 0x49:
+ idx += 12;
+ break;
+ /* Copy L2T broadcast */
+ case 0x4b:
+ idx += 10;
+ break;
+ /* Copy L2T/T2L (tile units) */
+ case 0x4c:
+ idx += 9;
+ break;
+ /* Copy T2T, partial (tile units) */
+ case 0x4d:
+ idx += 13;
+ break;
+ /* Copy L2T broadcast (tile units) */
+ case 0x4f:
+ idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_CONSTANT_FILL:
+ idx += 4;
+ break;
+ case DMA_PACKET_NOP:
+ idx += 1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ return -EINVAL;
+ }
+ } while (idx < ib->length_dw);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
new file mode 100644
index 0000000..bb9ea36
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ * Rafał Miłecki
+ */
+#include <linux/hdmi.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "evergreend.h"
+#include "atom.h"
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ WREG32(HDMI_ACR_32_0 + offset, HDMI_ACR_CTS_32(acr.cts_32khz));
+ WREG32(HDMI_ACR_32_1 + offset, acr.n_32khz);
+
+ WREG32(HDMI_ACR_44_0 + offset, HDMI_ACR_CTS_44(acr.cts_44_1khz));
+ WREG32(HDMI_ACR_44_1 + offset, acr.n_44_1khz);
+
+ WREG32(HDMI_ACR_48_0 + offset, HDMI_ACR_CTS_48(acr.cts_48khz));
+ WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
+}
+
+static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ struct cea_sad *sads;
+ int i, sad_count;
+
+ static const u16 eld_reg_to_type[][2] = {
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+ };
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ return;
+ }
+ BUG_ON(!sads);
+
+ for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+ u32 value = 0;
+ int j;
+
+ for (j = 0; j < sad_count; j++) {
+ struct cea_sad *sad = &sads[j];
+
+ if (sad->format == eld_reg_to_type[i][1]) {
+ value = MAX_CHANNELS(sad->channels) |
+ DESCRIPTOR_BYTE_2(sad->byte2) |
+ SUPPORTED_FREQUENCIES(sad->freq);
+ if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+ value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
+ break;
+ }
+ }
+ WREG32(eld_reg_to_type[i][0], value);
+ }
+
+ kfree(sads);
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
+ void *buffer, size_t size)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+ uint8_t *frame = buffer + 3;
+ uint8_t *header = buffer;
+
+ WREG32(AFMT_AVI_INFO0 + offset,
+ frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+ WREG32(AFMT_AVI_INFO1 + offset,
+ frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+ WREG32(AFMT_AVI_INFO2 + offset,
+ frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+ WREG32(AFMT_AVI_INFO3 + offset,
+ frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
+}
+
+static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ u32 base_rate = 24000;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* XXX two dtos; generally use dto0 for hdmi */
+ /* Express [24MHz / target pixel clock] as an exact rational
+ * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
+ WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
+ WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+}
+
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ struct hdmi_avi_infoframe frame;
+ uint32_t offset;
+ ssize_t err;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (!dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
+
+ evergreen_audio_set_dto(encoder, mode->clock);
+
+ WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+ HDMI_NULL_SEND); /* send null packets when required */
+
+ WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+ WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+ HDMI_NULL_SEND | /* send null packets when required */
+ HDMI_GC_SEND | /* send general control packets */
+ HDMI_GC_CONT); /* send general control packets every frame */
+
+ WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
+ HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
+
+ WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
+ AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
+
+ WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
+ HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+ WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
+
+ WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
+ HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+ HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+
+ /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
+
+ WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+ HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+ HDMI_ACR_SOURCE); /* select SW CTS value */
+
+ evergreen_hdmi_update_ACR(encoder, mode->clock);
+
+ WREG32(AFMT_60958_0 + offset,
+ AFMT_60958_CS_CHANNEL_NUMBER_L(1));
+
+ WREG32(AFMT_60958_1 + offset,
+ AFMT_60958_CS_CHANNEL_NUMBER_R(2));
+
+ WREG32(AFMT_60958_2 + offset,
+ AFMT_60958_CS_CHANNEL_NUMBER_2(3) |
+ AFMT_60958_CS_CHANNEL_NUMBER_3(4) |
+ AFMT_60958_CS_CHANNEL_NUMBER_4(5) |
+ AFMT_60958_CS_CHANNEL_NUMBER_5(6) |
+ AFMT_60958_CS_CHANNEL_NUMBER_6(7) |
+ AFMT_60958_CS_CHANNEL_NUMBER_7(8));
+
+ /* fglrx sets 0x0001005f | (x & 0x00fc0000) in 0x5f78 here */
+
+ WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
+ AFMT_AUDIO_CHANNEL_ENABLE(0xff));
+
+ /* fglrx sets 0x40 in 0x5f80 here */
+ evergreen_hdmi_write_sad_regs(encoder);
+
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
+
+ WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset,
+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
+
+ WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
+ HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
+ ~HDMI_AVI_INFO_LINE_MASK);
+
+ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND); /* send audio packets */
+
+ /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
+ WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+ WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
+ WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
+ WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
+}
+
+void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (enable && dig->afmt->enabled)
+ return;
+ if (!enable && !dig->afmt->enabled)
+ return;
+
+ dig->afmt->enabled = enable;
+
+ DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+ enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
new file mode 100644
index 0000000..881aba2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __EVERGREEN_REG_H__
+#define __EVERGREEN_REG_H__
+
+/* evergreen */
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324
+#define EVERGREEN_D3VGA_CONTROL 0x3e0
+#define EVERGREEN_D4VGA_CONTROL 0x3e4
+#define EVERGREEN_D5VGA_CONTROL 0x3e8
+#define EVERGREEN_D6VGA_CONTROL 0x3ec
+
+#define EVERGREEN_P1PLL_SS_CNTL 0x414
+#define EVERGREEN_P2PLL_SS_CNTL 0x454
+# define EVERGREEN_PxPLL_SS_EN (1 << 12)
+
+#define EVERGREEN_AUDIO_PLL1_MUL 0x5b0
+#define EVERGREEN_AUDIO_PLL1_DIV 0x5b4
+#define EVERGREEN_AUDIO_PLL1_UNK 0x5bc
+
+#define EVERGREEN_AUDIO_ENABLE 0x5e78
+#define EVERGREEN_AUDIO_VENDOR_ID 0x5ec0
+
+/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
+#define EVERGREEN_GRPH_ENABLE 0x6800
+#define EVERGREEN_GRPH_CONTROL 0x6804
+# define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0)
+# define EVERGREEN_GRPH_DEPTH_8BPP 0
+# define EVERGREEN_GRPH_DEPTH_16BPP 1
+# define EVERGREEN_GRPH_DEPTH_32BPP 2
+# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+# define EVERGREEN_ADDR_SURF_2_BANK 0
+# define EVERGREEN_ADDR_SURF_4_BANK 1
+# define EVERGREEN_ADDR_SURF_8_BANK 2
+# define EVERGREEN_ADDR_SURF_16_BANK 3
+# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
+# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
+# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
+/* 8 BPP */
+# define EVERGREEN_GRPH_FORMAT_INDEXED 0
+/* 16 BPP */
+# define EVERGREEN_GRPH_FORMAT_ARGB1555 0
+# define EVERGREEN_GRPH_FORMAT_ARGB565 1
+# define EVERGREEN_GRPH_FORMAT_ARGB4444 2
+# define EVERGREEN_GRPH_FORMAT_AI88 3
+# define EVERGREEN_GRPH_FORMAT_MONO16 4
+# define EVERGREEN_GRPH_FORMAT_BGRA5551 5
+/* 32 BPP */
+# define EVERGREEN_GRPH_FORMAT_ARGB8888 0
+# define EVERGREEN_GRPH_FORMAT_ARGB2101010 1
+# define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2
+# define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3
+# define EVERGREEN_GRPH_FORMAT_BGRA1010102 4
+# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
+# define EVERGREEN_GRPH_FORMAT_RGB111110 6
+# define EVERGREEN_GRPH_FORMAT_BGR101111 7
+# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
+# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
+# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
+# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
+# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
+# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
+# define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2
+# define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4
+#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
+# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
+# define EVERGREEN_GRPH_ENDIAN_NONE 0
+# define EVERGREEN_GRPH_ENDIAN_8IN16 1
+# define EVERGREEN_GRPH_ENDIAN_8IN32 2
+# define EVERGREEN_GRPH_ENDIAN_8IN64 3
+# define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
+# define EVERGREEN_GRPH_RED_SEL_R 0
+# define EVERGREEN_GRPH_RED_SEL_G 1
+# define EVERGREEN_GRPH_RED_SEL_B 2
+# define EVERGREEN_GRPH_RED_SEL_A 3
+# define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
+# define EVERGREEN_GRPH_GREEN_SEL_G 0
+# define EVERGREEN_GRPH_GREEN_SEL_B 1
+# define EVERGREEN_GRPH_GREEN_SEL_A 2
+# define EVERGREEN_GRPH_GREEN_SEL_R 3
+# define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
+# define EVERGREEN_GRPH_BLUE_SEL_B 0
+# define EVERGREEN_GRPH_BLUE_SEL_A 1
+# define EVERGREEN_GRPH_BLUE_SEL_R 2
+# define EVERGREEN_GRPH_BLUE_SEL_G 3
+# define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
+# define EVERGREEN_GRPH_ALPHA_SEL_A 0
+# define EVERGREEN_GRPH_ALPHA_SEL_R 1
+# define EVERGREEN_GRPH_ALPHA_SEL_G 2
+# define EVERGREEN_GRPH_ALPHA_SEL_B 3
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x6810
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x6814
+# define EVERGREEN_GRPH_DFQ_ENABLE (1 << 0)
+# define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00
+#define EVERGREEN_GRPH_PITCH 0x6818
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x681c
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x6820
+#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x6824
+#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x6828
+#define EVERGREEN_GRPH_X_START 0x682c
+#define EVERGREEN_GRPH_Y_START 0x6830
+#define EVERGREEN_GRPH_X_END 0x6834
+#define EVERGREEN_GRPH_Y_END 0x6838
+#define EVERGREEN_GRPH_UPDATE 0x6844
+# define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
+# define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
+#define EVERGREEN_GRPH_FLIP_CONTROL 0x6848
+# define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
+
+/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
+#define EVERGREEN_CUR_CONTROL 0x6998
+# define EVERGREEN_CURSOR_EN (1 << 0)
+# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8)
+# define EVERGREEN_CURSOR_MONO 0
+# define EVERGREEN_CURSOR_24_1 1
+# define EVERGREEN_CURSOR_24_8_PRE_MULT 2
+# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3
+# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16)
+# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20)
+# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
+# define EVERGREEN_CURSOR_URGENT_ALWAYS 0
+# define EVERGREEN_CURSOR_URGENT_1_8 1
+# define EVERGREEN_CURSOR_URGENT_1_4 2
+# define EVERGREEN_CURSOR_URGENT_3_8 3
+# define EVERGREEN_CURSOR_URGENT_1_2 4
+#define EVERGREEN_CUR_SURFACE_ADDRESS 0x699c
+# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000
+#define EVERGREEN_CUR_SIZE 0x69a0
+#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x69a4
+#define EVERGREEN_CUR_POSITION 0x69a8
+#define EVERGREEN_CUR_HOT_SPOT 0x69ac
+#define EVERGREEN_CUR_COLOR1 0x69b0
+#define EVERGREEN_CUR_COLOR2 0x69b4
+#define EVERGREEN_CUR_UPDATE 0x69b8
+# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0)
+# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1)
+# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16)
+# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
+
+/* LUT blocks at 0x69e0, 0x75e0, 0x101e0, 0x10de0, 0x119e0, 0x125e0 */
+#define EVERGREEN_DC_LUT_RW_MODE 0x69e0
+#define EVERGREEN_DC_LUT_RW_INDEX 0x69e4
+#define EVERGREEN_DC_LUT_SEQ_COLOR 0x69e8
+#define EVERGREEN_DC_LUT_PWL_DATA 0x69ec
+#define EVERGREEN_DC_LUT_30_COLOR 0x69f0
+#define EVERGREEN_DC_LUT_VGA_ACCESS_ENABLE 0x69f4
+#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x69f8
+#define EVERGREEN_DC_LUT_AUTOFILL 0x69fc
+#define EVERGREEN_DC_LUT_CONTROL 0x6a00
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x6a04
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x6a08
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x6a0c
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x6a10
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x6a14
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x6a18
+
+#define EVERGREEN_DATA_FORMAT 0x6b00
+# define EVERGREEN_INTERLEAVE_EN (1 << 0)
+#define EVERGREEN_DESKTOP_HEIGHT 0x6b04
+#define EVERGREEN_VLINE_START_END 0x6b08
+#define EVERGREEN_VLINE_STATUS 0x6bb8
+# define EVERGREEN_VLINE_STAT (1 << 12)
+
+#define EVERGREEN_VIEWPORT_START 0x6d70
+#define EVERGREEN_VIEWPORT_SIZE 0x6d74
+
+/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
+#define EVERGREEN_CRTC0_REGISTER_OFFSET (0x6df0 - 0x6df0)
+#define EVERGREEN_CRTC1_REGISTER_OFFSET (0x79f0 - 0x6df0)
+#define EVERGREEN_CRTC2_REGISTER_OFFSET (0x105f0 - 0x6df0)
+#define EVERGREEN_CRTC3_REGISTER_OFFSET (0x111f0 - 0x6df0)
+#define EVERGREEN_CRTC4_REGISTER_OFFSET (0x11df0 - 0x6df0)
+#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0)
+
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END 0x6e34
+#define EVERGREEN_CRTC_CONTROL 0x6e70
+# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
+# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL 0x6e74
+# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
+#define EVERGREEN_CRTC_STATUS 0x6e8c
+# define EVERGREEN_CRTC_V_BLANK (1 << 0)
+#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
+#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
+#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
+#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
+#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
+#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
+
+#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
+#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
+#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8
+#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc
+
+/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
+#define EVERGREEN_HDMI_BASE 0x7030
+
+#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
new file mode 100644
index 0000000..150e318
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -0,0 +1,2100 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef EVERGREEND_H
+#define EVERGREEND_H
+
+#define EVERGREEN_MAX_SH_GPRS 256
+#define EVERGREEN_MAX_TEMP_GPRS 16
+#define EVERGREEN_MAX_SH_THREADS 256
+#define EVERGREEN_MAX_SH_STACK_ENTRIES 4096
+#define EVERGREEN_MAX_FRC_EOV_CNT 16384
+#define EVERGREEN_MAX_BACKENDS 8
+#define EVERGREEN_MAX_BACKENDS_MASK 0xFF
+#define EVERGREEN_MAX_SIMDS 16
+#define EVERGREEN_MAX_SIMDS_MASK 0xFFFF
+#define EVERGREEN_MAX_PIPES 8
+#define EVERGREEN_MAX_PIPES_MASK 0xFF
+#define EVERGREEN_MAX_LDS_NUM 0xFFFF
+
+#define CYPRESS_GB_ADDR_CONFIG_GOLDEN 0x02011003
+#define BARTS_GB_ADDR_CONFIG_GOLDEN 0x02011003
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
+#define JUNIPER_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define REDWOOD_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
+#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
+#define SUMO_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define SUMO2_GB_ADDR_CONFIG_GOLDEN 0x02010002
+
+/* Registers */
+
+#define RCU_IND_INDEX 0x100
+#define RCU_IND_DATA 0x104
+
+/* discrete uvd clocks */
+#define CG_UPLL_FUNC_CNTL 0x718
+# define UPLL_RESET_MASK 0x00000001
+# define UPLL_SLEEP_MASK 0x00000002
+# define UPLL_BYPASS_EN_MASK 0x00000004
+# define UPLL_CTLREQ_MASK 0x00000008
+# define UPLL_REF_DIV_MASK 0x003F0000
+# define UPLL_VCO_MODE_MASK 0x00000200
+# define UPLL_CTLACK_MASK 0x40000000
+# define UPLL_CTLACK2_MASK 0x80000000
+#define CG_UPLL_FUNC_CNTL_2 0x71c
+# define UPLL_PDIV_A(x) ((x) << 0)
+# define UPLL_PDIV_A_MASK 0x0000007F
+# define UPLL_PDIV_B(x) ((x) << 8)
+# define UPLL_PDIV_B_MASK 0x00007F00
+# define VCLK_SRC_SEL(x) ((x) << 20)
+# define VCLK_SRC_SEL_MASK 0x01F00000
+# define DCLK_SRC_SEL(x) ((x) << 25)
+# define DCLK_SRC_SEL_MASK 0x3E000000
+#define CG_UPLL_FUNC_CNTL_3 0x720
+# define UPLL_FB_DIV(x) ((x) << 0)
+# define UPLL_FB_DIV_MASK 0x01FFFFFF
+#define CG_UPLL_FUNC_CNTL_4 0x854
+# define UPLL_SPARE_ISPARE9 0x00020000
+#define CG_UPLL_SPREAD_SPECTRUM 0x79c
+# define SSEN_MASK 0x00000001
+
+/* fusion uvd clocks */
+#define CG_DCLK_CNTL 0x610
+# define DCLK_DIVIDER_MASK 0x7f
+# define DCLK_DIR_CNTL_EN (1 << 8)
+#define CG_DCLK_STATUS 0x614
+# define DCLK_STATUS (1 << 0)
+#define CG_VCLK_CNTL 0x618
+#define CG_VCLK_STATUS 0x61c
+#define CG_SCRATCH1 0x820
+
+#define GRBM_GFX_INDEX 0x802C
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SE_INDEX(x) ((x) << 16)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
+#define RLC_GFX_INDEX 0x3fC4
+#define CC_GC_SHADER_PIPE_CONFIG 0x8950
+#define WRITE_DIS (1 << 0)
+#define CC_RB_BACKEND_DISABLE 0x98F4
+#define BACKEND_DISABLE(x) ((x) << 16)
+#define GB_ADDR_CONFIG 0x98F8
+#define NUM_PIPES(x) ((x) << 0)
+#define NUM_PIPES_MASK 0x0000000f
+#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
+#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
+#define NUM_SHADER_ENGINES(x) ((x) << 12)
+#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
+#define NUM_GPUS(x) ((x) << 20)
+#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
+#define ROW_SIZE(x) ((x) << 28)
+#define GB_BACKEND_MAP 0x98FC
+#define DMIF_ADDR_CONFIG 0xBD4
+#define HDP_ADDR_CONFIG 0x2F48
+#define HDP_MISC_CNTL 0x2F4C
+#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
+
+#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
+#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
+
+#define CGTS_SYS_TCC_DISABLE 0x3F90
+#define CGTS_TCC_DISABLE 0x9148
+#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
+#define CGTS_USER_TCC_DISABLE 0x914C
+
+#define CONFIG_MEMSIZE 0x5428
+
+#define BIF_FB_EN 0x5490
+#define FB_READ_EN (1 << 0)
+#define FB_WRITE_EN (1 << 1)
+
+#define CP_STRMOUT_CNTL 0x84FC
+
+#define CP_COHER_CNTL 0x85F0
+#define CP_COHER_SIZE 0x85F4
+#define CP_COHER_BASE 0x85F8
+#define CP_STALLED_STAT1 0x8674
+#define CP_STALLED_STAT2 0x8678
+#define CP_BUSY_STAT 0x867C
+#define CP_STAT 0x8680
+#define CP_ME_CNTL 0x86D8
+#define CP_ME_HALT (1 << 28)
+#define CP_PFP_HALT (1 << 26)
+#define CP_ME_RAM_DATA 0xC160
+#define CP_ME_RAM_RADDR 0xC158
+#define CP_ME_RAM_WADDR 0xC15C
+#define CP_MEQ_THRESHOLDS 0x8764
+#define STQ_SPLIT(x) ((x) << 0)
+#define CP_PERFMON_CNTL 0x87FC
+#define CP_PFP_UCODE_ADDR 0xC150
+#define CP_PFP_UCODE_DATA 0xC154
+#define CP_QUEUE_THRESHOLDS 0x8760
+#define ROQ_IB1_START(x) ((x) << 0)
+#define ROQ_IB2_START(x) ((x) << 8)
+#define CP_RB_BASE 0xC100
+#define CP_RB_CNTL 0xC104
+#define RB_BUFSZ(x) ((x) << 0)
+#define RB_BLKSZ(x) ((x) << 8)
+#define RB_NO_UPDATE (1 << 27)
+#define RB_RPTR_WR_ENA (1 << 31)
+#define BUF_SWAP_32BIT (2 << 16)
+#define CP_RB_RPTR 0x8700
+#define CP_RB_RPTR_ADDR 0xC10C
+#define RB_RPTR_SWAP(x) ((x) << 0)
+#define CP_RB_RPTR_ADDR_HI 0xC110
+#define CP_RB_RPTR_WR 0xC108
+#define CP_RB_WPTR 0xC114
+#define CP_RB_WPTR_ADDR 0xC118
+#define CP_RB_WPTR_ADDR_HI 0xC11C
+#define CP_RB_WPTR_DELAY 0x8704
+#define CP_SEM_WAIT_TIMER 0x85BC
+#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
+#define CP_DEBUG 0xC1FC
+
+/* Audio clocks */
+#define DCCG_AUDIO_DTO_SOURCE 0x05ac
+# define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
+# define DCCG_AUDIO_DTO_SEL (1 << 4) /* 0=dto0 1=dto1 */
+
+#define DCCG_AUDIO_DTO0_PHASE 0x05b0
+#define DCCG_AUDIO_DTO0_MODULE 0x05b4
+#define DCCG_AUDIO_DTO0_LOAD 0x05b8
+#define DCCG_AUDIO_DTO0_CNTL 0x05bc
+
+#define DCCG_AUDIO_DTO1_PHASE 0x05c0
+#define DCCG_AUDIO_DTO1_MODULE 0x05c4
+#define DCCG_AUDIO_DTO1_LOAD 0x05c8
+#define DCCG_AUDIO_DTO1_CNTL 0x05cc
+
+/* DCE 4.0 AFMT */
+#define HDMI_CONTROL 0x7030
+# define HDMI_KEEPOUT_MODE (1 << 0)
+# define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */
+# define HDMI_ERROR_ACK (1 << 8)
+# define HDMI_ERROR_MASK (1 << 9)
+# define HDMI_DEEP_COLOR_ENABLE (1 << 24)
+# define HDMI_DEEP_COLOR_DEPTH (((x) & 3) << 28)
+# define HDMI_24BIT_DEEP_COLOR 0
+# define HDMI_30BIT_DEEP_COLOR 1
+# define HDMI_36BIT_DEEP_COLOR 2
+#define HDMI_STATUS 0x7034
+# define HDMI_ACTIVE_AVMUTE (1 << 0)
+# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
+# define HDMI_VBI_PACKET_ERROR (1 << 20)
+#define HDMI_AUDIO_PACKET_CONTROL 0x7038
+# define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
+# define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
+#define HDMI_ACR_PACKET_CONTROL 0x703c
+# define HDMI_ACR_SEND (1 << 0)
+# define HDMI_ACR_CONT (1 << 1)
+# define HDMI_ACR_SELECT(x) (((x) & 3) << 4)
+# define HDMI_ACR_HW 0
+# define HDMI_ACR_32 1
+# define HDMI_ACR_44 2
+# define HDMI_ACR_48 3
+# define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
+# define HDMI_ACR_AUTO_SEND (1 << 12)
+# define HDMI_ACR_N_MULTIPLE(x) (((x) & 7) << 16)
+# define HDMI_ACR_X1 1
+# define HDMI_ACR_X2 2
+# define HDMI_ACR_X4 4
+# define HDMI_ACR_AUDIO_PRIORITY (1 << 31)
+#define HDMI_VBI_PACKET_CONTROL 0x7040
+# define HDMI_NULL_SEND (1 << 0)
+# define HDMI_GC_SEND (1 << 4)
+# define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI_INFOFRAME_CONTROL0 0x7044
+# define HDMI_AVI_INFO_SEND (1 << 0)
+# define HDMI_AVI_INFO_CONT (1 << 1)
+# define HDMI_AUDIO_INFO_SEND (1 << 4)
+# define HDMI_AUDIO_INFO_CONT (1 << 5)
+# define HDMI_MPEG_INFO_SEND (1 << 8)
+# define HDMI_MPEG_INFO_CONT (1 << 9)
+#define HDMI_INFOFRAME_CONTROL1 0x7048
+# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
+# define HDMI_AVI_INFO_LINE_MASK (0x3f << 0)
+# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
+# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
+#define HDMI_GENERIC_PACKET_CONTROL 0x704c
+# define HDMI_GENERIC0_SEND (1 << 0)
+# define HDMI_GENERIC0_CONT (1 << 1)
+# define HDMI_GENERIC1_SEND (1 << 4)
+# define HDMI_GENERIC1_CONT (1 << 5)
+# define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
+# define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
+#define HDMI_GC 0x7058
+# define HDMI_GC_AVMUTE (1 << 0)
+# define HDMI_GC_AVMUTE_CONT (1 << 2)
+#define AFMT_AUDIO_PACKET_CONTROL2 0x705c
+# define AFMT_AUDIO_LAYOUT_OVRD (1 << 0)
+# define AFMT_AUDIO_LAYOUT_SELECT (1 << 1)
+# define AFMT_60958_CS_SOURCE (1 << 4)
+# define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8)
+# define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16)
+#define AFMT_AVI_INFO0 0x7084
+# define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_AVI_INFO_S(x) (((x) & 3) << 8)
+# define AFMT_AVI_INFO_B(x) (((x) & 3) << 10)
+# define AFMT_AVI_INFO_A(x) (((x) & 1) << 12)
+# define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13)
+# define AFMT_AVI_INFO_Y_RGB 0
+# define AFMT_AVI_INFO_Y_YCBCR422 1
+# define AFMT_AVI_INFO_Y_YCBCR444 2
+# define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
+# define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16)
+# define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20)
+# define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22)
+# define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
+# define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24)
+# define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26)
+# define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28)
+# define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31)
+# define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
+#define AFMT_AVI_INFO1 0x7088
+# define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+# define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+# define AFMT_AVI_INFO_CN(x) (((x) & 0x3) << 12)
+# define AFMT_AVI_INFO_YQ(x) (((x) & 0x3) << 14)
+# define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO2 0x708c
+# define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
+# define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO3 0x7090
+# define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
+# define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24)
+#define AFMT_MPEG_INFO0 0x7094
+# define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
+# define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
+# define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
+#define AFMT_MPEG_INFO1 0x7098
+# define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
+# define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8)
+# define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12)
+#define AFMT_GENERIC0_HDR 0x709c
+#define AFMT_GENERIC0_0 0x70a0
+#define AFMT_GENERIC0_1 0x70a4
+#define AFMT_GENERIC0_2 0x70a8
+#define AFMT_GENERIC0_3 0x70ac
+#define AFMT_GENERIC0_4 0x70b0
+#define AFMT_GENERIC0_5 0x70b4
+#define AFMT_GENERIC0_6 0x70b8
+#define AFMT_GENERIC1_HDR 0x70bc
+#define AFMT_GENERIC1_0 0x70c0
+#define AFMT_GENERIC1_1 0x70c4
+#define AFMT_GENERIC1_2 0x70c8
+#define AFMT_GENERIC1_3 0x70cc
+#define AFMT_GENERIC1_4 0x70d0
+#define AFMT_GENERIC1_5 0x70d4
+#define AFMT_GENERIC1_6 0x70d8
+#define HDMI_ACR_32_0 0x70dc
+# define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_32_1 0x70e0
+# define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_44_0 0x70e4
+# define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_44_1 0x70e8
+# define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_48_0 0x70ec
+# define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_48_1 0x70f0
+# define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_STATUS_0 0x70f4
+#define HDMI_ACR_STATUS_1 0x70f8
+#define AFMT_AUDIO_INFO0 0x70fc
+# define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8)
+# define AFMT_AUDIO_INFO_CT(x) (((x) & 0xf) << 11)
+# define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16)
+# define AFMT_AUDIO_INFO_CXT(x) (((x) & 0x1f) << 24)
+#define AFMT_AUDIO_INFO1 0x7100
+# define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
+# define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
+# define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
+# define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+# define AFMT_AUDIO_INFO_LFEBPL(x) (((x) & 3) << 16)
+#define AFMT_60958_0 0x7104
+# define AFMT_60958_CS_A(x) (((x) & 1) << 0)
+# define AFMT_60958_CS_B(x) (((x) & 1) << 1)
+# define AFMT_60958_CS_C(x) (((x) & 1) << 2)
+# define AFMT_60958_CS_D(x) (((x) & 3) << 3)
+# define AFMT_60958_CS_MODE(x) (((x) & 3) << 6)
+# define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
+# define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
+# define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
+# define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+# define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
+#define AFMT_60958_1 0x7108
+# define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
+# define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
+# define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16)
+# define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18)
+# define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
+#define AFMT_AUDIO_CRC_CONTROL 0x710c
+# define AFMT_AUDIO_CRC_EN (1 << 0)
+#define AFMT_RAMP_CONTROL0 0x7110
+# define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
+# define AFMT_RAMP_DATA_SIGN (1 << 31)
+#define AFMT_RAMP_CONTROL1 0x7114
+# define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
+# define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
+#define AFMT_RAMP_CONTROL2 0x7118
+# define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
+#define AFMT_RAMP_CONTROL3 0x711c
+# define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
+#define AFMT_60958_2 0x7120
+# define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
+# define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
+# define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
+# define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
+# define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
+# define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
+#define AFMT_STATUS 0x7128
+# define AFMT_AUDIO_ENABLE (1 << 4)
+# define AFMT_AUDIO_HBR_ENABLE (1 << 8)
+# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL 0x712c
+# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
+# define AFMT_RESET_FIFO_WHEN_AUDIO_DIS (1 << 11) /* set to 1 */
+# define AFMT_AUDIO_TEST_EN (1 << 12)
+# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
+# define AFMT_60958_CS_UPDATE (1 << 26)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
+#define AFMT_VBI_PACKET_CONTROL 0x7130
+# define AFMT_GENERIC0_UPDATE (1 << 2)
+#define AFMT_INFOFRAME_CONTROL0 0x7134
+# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - afmt regs */
+# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
+# define AFMT_MPEG_INFO_UPDATE (1 << 10)
+#define AFMT_GENERIC0_7 0x7138
+
+/* DCE4/5 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x5f90 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x5f94 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x5f98 /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x5f9c /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x5fa0 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x5fa4 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x5fa8 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x5fac /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x5fb0 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x5fb4 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x5fb8 /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL 0x5e78
+# define AZ_FORCE_CODEC_WAKE (1 << 0)
+# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
+# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
+# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
+# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
+# define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8)
+# define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9)
+# define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10)
+# define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11)
+# define CODEC_HOT_PLUG_ENABLE (1 << 12)
+# define PIN0_AUDIO_ENABLED (1 << 24)
+# define PIN1_AUDIO_ENABLED (1 << 25)
+# define PIN2_AUDIO_ENABLED (1 << 26)
+# define PIN3_AUDIO_ENABLED (1 << 27)
+# define AUDIO_ENABLED (1 << 31)
+
+
+#define GC_USER_SHADER_PIPE_CONFIG 0x8954
+#define INACTIVE_QD_PIPES(x) ((x) << 8)
+#define INACTIVE_QD_PIPES_MASK 0x0000FF00
+#define INACTIVE_SIMDS(x) ((x) << 16)
+#define INACTIVE_SIMDS_MASK 0x00FF0000
+
+#define GRBM_CNTL 0x8000
+#define GRBM_READ_TIMEOUT(x) ((x) << 0)
+#define GRBM_SOFT_RESET 0x8020
+#define SOFT_RESET_CP (1 << 0)
+#define SOFT_RESET_CB (1 << 1)
+#define SOFT_RESET_DB (1 << 3)
+#define SOFT_RESET_PA (1 << 5)
+#define SOFT_RESET_SC (1 << 6)
+#define SOFT_RESET_SPI (1 << 8)
+#define SOFT_RESET_SH (1 << 9)
+#define SOFT_RESET_SX (1 << 10)
+#define SOFT_RESET_TC (1 << 11)
+#define SOFT_RESET_TA (1 << 12)
+#define SOFT_RESET_VC (1 << 13)
+#define SOFT_RESET_VGT (1 << 14)
+
+#define GRBM_STATUS 0x8010
+#define CMDFIFO_AVAIL_MASK 0x0000000F
+#define SRBM_RQ_PENDING (1 << 5)
+#define CF_RQ_PENDING (1 << 7)
+#define PF_RQ_PENDING (1 << 8)
+#define GRBM_EE_BUSY (1 << 10)
+#define SX_CLEAN (1 << 11)
+#define DB_CLEAN (1 << 12)
+#define CB_CLEAN (1 << 13)
+#define TA_BUSY (1 << 14)
+#define VGT_BUSY_NO_DMA (1 << 16)
+#define VGT_BUSY (1 << 17)
+#define SX_BUSY (1 << 20)
+#define SH_BUSY (1 << 21)
+#define SPI_BUSY (1 << 22)
+#define SC_BUSY (1 << 24)
+#define PA_BUSY (1 << 25)
+#define DB_BUSY (1 << 26)
+#define CP_COHERENCY_BUSY (1 << 28)
+#define CP_BUSY (1 << 29)
+#define CB_BUSY (1 << 30)
+#define GUI_ACTIVE (1 << 31)
+#define GRBM_STATUS_SE0 0x8014
+#define GRBM_STATUS_SE1 0x8018
+#define SE_SX_CLEAN (1 << 0)
+#define SE_DB_CLEAN (1 << 1)
+#define SE_CB_CLEAN (1 << 2)
+#define SE_TA_BUSY (1 << 25)
+#define SE_SX_BUSY (1 << 26)
+#define SE_SPI_BUSY (1 << 27)
+#define SE_SH_BUSY (1 << 28)
+#define SE_SC_BUSY (1 << 29)
+#define SE_DB_BUSY (1 << 30)
+#define SE_CB_BUSY (1 << 31)
+/* evergreen */
+#define CG_THERMAL_CTRL 0x72c
+#define TOFFSET_MASK 0x00003FE0
+#define TOFFSET_SHIFT 5
+#define CG_MULT_THERMAL_STATUS 0x740
+#define ASIC_T(x) ((x) << 16)
+#define ASIC_T_MASK 0x07FF0000
+#define ASIC_T_SHIFT 16
+#define CG_TS0_STATUS 0x760
+#define TS0_ADC_DOUT_MASK 0x000003FF
+#define TS0_ADC_DOUT_SHIFT 0
+/* APU */
+#define CG_THERMAL_STATUS 0x678
+
+#define HDP_HOST_PATH_CNTL 0x2C00
+#define HDP_NONSURFACE_BASE 0x2C04
+#define HDP_NONSURFACE_INFO 0x2C08
+#define HDP_NONSURFACE_SIZE 0x2C0C
+#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
+#define HDP_TILING_CONFIG 0x2F3C
+
+#define MC_SHARED_CHMAP 0x2004
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x00003000
+#define MC_SHARED_CHREMAP 0x2008
+
+#define MC_SHARED_BLACKOUT_CNTL 0x20ac
+#define BLACKOUT_MODE_MASK 0x00000007
+
+#define MC_ARB_RAMCFG 0x2760
+#define NOOFBANK_SHIFT 0
+#define NOOFBANK_MASK 0x00000003
+#define NOOFRANK_SHIFT 2
+#define NOOFRANK_MASK 0x00000004
+#define NOOFROWS_SHIFT 3
+#define NOOFROWS_MASK 0x00000038
+#define NOOFCOLS_SHIFT 6
+#define NOOFCOLS_MASK 0x000000C0
+#define CHANSIZE_SHIFT 8
+#define CHANSIZE_MASK 0x00000100
+#define BURSTLENGTH_SHIFT 9
+#define BURSTLENGTH_MASK 0x00000200
+#define CHANSIZE_OVERRIDE (1 << 11)
+#define FUS_MC_ARB_RAMCFG 0x2768
+#define MC_VM_AGP_TOP 0x2028
+#define MC_VM_AGP_BOT 0x202C
+#define MC_VM_AGP_BASE 0x2030
+#define MC_VM_FB_LOCATION 0x2024
+#define MC_FUS_VM_FB_OFFSET 0x2898
+#define MC_VM_MB_L1_TLB0_CNTL 0x2234
+#define MC_VM_MB_L1_TLB1_CNTL 0x2238
+#define MC_VM_MB_L1_TLB2_CNTL 0x223C
+#define MC_VM_MB_L1_TLB3_CNTL 0x2240
+#define ENABLE_L1_TLB (1 << 0)
+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
+#define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15)
+#define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18)
+#define MC_VM_MD_L1_TLB0_CNTL 0x2654
+#define MC_VM_MD_L1_TLB1_CNTL 0x2658
+#define MC_VM_MD_L1_TLB2_CNTL 0x265C
+#define MC_VM_MD_L1_TLB3_CNTL 0x2698
+
+#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C
+#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660
+#define FUS_MC_VM_MD_L1_TLB2_CNTL 0x2664
+
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
+
+#define PA_CL_ENHANCE 0x8A14
+#define CLIP_VTX_REORDER_ENA (1 << 0)
+#define NUM_CLIP_SEQ(x) ((x) << 1)
+#define PA_SC_ENHANCE 0x8BF0
+#define PA_SC_AA_CONFIG 0x28C04
+#define MSAA_NUM_SAMPLES_SHIFT 0
+#define MSAA_NUM_SAMPLES_MASK 0x3
+#define PA_SC_CLIPRECT_RULE 0x2820C
+#define PA_SC_EDGERULE 0x28230
+#define PA_SC_FIFO_SIZE 0x8BCC
+#define SC_PRIM_FIFO_SIZE(x) ((x) << 0)
+#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
+#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
+#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
+#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
+#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
+#define PA_SC_LINE_STIPPLE 0x28A0C
+#define PA_SU_LINE_STIPPLE_VALUE 0x8A60
+#define PA_SC_LINE_STIPPLE_STATE 0x8B10
+
+#define SCRATCH_REG0 0x8500
+#define SCRATCH_REG1 0x8504
+#define SCRATCH_REG2 0x8508
+#define SCRATCH_REG3 0x850C
+#define SCRATCH_REG4 0x8510
+#define SCRATCH_REG5 0x8514
+#define SCRATCH_REG6 0x8518
+#define SCRATCH_REG7 0x851C
+#define SCRATCH_UMSK 0x8540
+#define SCRATCH_ADDR 0x8544
+
+#define SMX_SAR_CTL0 0xA008
+#define SMX_DC_CTL0 0xA020
+#define USE_HASH_FUNCTION (1 << 0)
+#define NUMBER_OF_SETS(x) ((x) << 1)
+#define FLUSH_ALL_ON_EVENT (1 << 10)
+#define STALL_ON_EVENT (1 << 11)
+#define SMX_EVENT_CTL 0xA02C
+#define ES_FLUSH_CTL(x) ((x) << 0)
+#define GS_FLUSH_CTL(x) ((x) << 3)
+#define ACK_FLUSH_CTL(x) ((x) << 6)
+#define SYNC_FLUSH_CTL (1 << 8)
+
+#define SPI_CONFIG_CNTL 0x9100
+#define GPR_WRITE_PRIORITY(x) ((x) << 0)
+#define SPI_CONFIG_CNTL_1 0x913C
+#define VTX_DONE_DELAY(x) ((x) << 0)
+#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
+#define SPI_INPUT_Z 0x286D8
+#define SPI_PS_IN_CONTROL_0 0x286CC
+#define NUM_INTERP(x) ((x)<<0)
+#define POSITION_ENA (1<<8)
+#define POSITION_CENTROID (1<<9)
+#define POSITION_ADDR(x) ((x)<<10)
+#define PARAM_GEN(x) ((x)<<15)
+#define PARAM_GEN_ADDR(x) ((x)<<19)
+#define BARYC_SAMPLE_CNTL(x) ((x)<<26)
+#define PERSP_GRADIENT_ENA (1<<28)
+#define LINEAR_GRADIENT_ENA (1<<29)
+#define POSITION_SAMPLE (1<<30)
+#define BARYC_AT_SAMPLE_ENA (1<<31)
+
+#define SQ_CONFIG 0x8C00
+#define VC_ENABLE (1 << 0)
+#define EXPORT_SRC_C (1 << 1)
+#define CS_PRIO(x) ((x) << 18)
+#define LS_PRIO(x) ((x) << 20)
+#define HS_PRIO(x) ((x) << 22)
+#define PS_PRIO(x) ((x) << 24)
+#define VS_PRIO(x) ((x) << 26)
+#define GS_PRIO(x) ((x) << 28)
+#define ES_PRIO(x) ((x) << 30)
+#define SQ_GPR_RESOURCE_MGMT_1 0x8C04
+#define NUM_PS_GPRS(x) ((x) << 0)
+#define NUM_VS_GPRS(x) ((x) << 16)
+#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
+#define SQ_GPR_RESOURCE_MGMT_2 0x8C08
+#define NUM_GS_GPRS(x) ((x) << 0)
+#define NUM_ES_GPRS(x) ((x) << 16)
+#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C
+#define NUM_HS_GPRS(x) ((x) << 0)
+#define NUM_LS_GPRS(x) ((x) << 16)
+#define SQ_GLOBAL_GPR_RESOURCE_MGMT_1 0x8C10
+#define SQ_GLOBAL_GPR_RESOURCE_MGMT_2 0x8C14
+#define SQ_THREAD_RESOURCE_MGMT 0x8C18
+#define NUM_PS_THREADS(x) ((x) << 0)
+#define NUM_VS_THREADS(x) ((x) << 8)
+#define NUM_GS_THREADS(x) ((x) << 16)
+#define NUM_ES_THREADS(x) ((x) << 24)
+#define SQ_THREAD_RESOURCE_MGMT_2 0x8C1C
+#define NUM_HS_THREADS(x) ((x) << 0)
+#define NUM_LS_THREADS(x) ((x) << 8)
+#define SQ_STACK_RESOURCE_MGMT_1 0x8C20
+#define NUM_PS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_VS_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_STACK_RESOURCE_MGMT_2 0x8C24
+#define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_STACK_RESOURCE_MGMT_3 0x8C28
+#define NUM_HS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_LS_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
+#define SQ_DYN_GPR_SIMD_LOCK_EN 0x8D94
+#define SQ_STATIC_THREAD_MGMT_1 0x8E20
+#define SQ_STATIC_THREAD_MGMT_2 0x8E24
+#define SQ_STATIC_THREAD_MGMT_3 0x8E28
+#define SQ_LDS_RESOURCE_MGMT 0x8E2C
+
+#define SQ_MS_FIFO_SIZES 0x8CF0
+#define CACHE_FIFO_SIZE(x) ((x) << 0)
+#define FETCH_FIFO_HIWATER(x) ((x) << 8)
+#define DONE_FIFO_HIWATER(x) ((x) << 16)
+#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
+
+#define SX_DEBUG_1 0x9058
+#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
+#define SX_EXPORT_BUFFER_SIZES 0x900C
+#define COLOR_BUFFER_SIZE(x) ((x) << 0)
+#define POSITION_BUFFER_SIZE(x) ((x) << 8)
+#define SMX_BUFFER_SIZE(x) ((x) << 16)
+#define SX_MEMORY_EXPORT_BASE 0x9010
+#define SX_MISC 0x28350
+
+#define CB_PERF_CTR0_SEL_0 0x9A20
+#define CB_PERF_CTR0_SEL_1 0x9A24
+#define CB_PERF_CTR1_SEL_0 0x9A28
+#define CB_PERF_CTR1_SEL_1 0x9A2C
+#define CB_PERF_CTR2_SEL_0 0x9A30
+#define CB_PERF_CTR2_SEL_1 0x9A34
+#define CB_PERF_CTR3_SEL_0 0x9A38
+#define CB_PERF_CTR3_SEL_1 0x9A3C
+
+#define TA_CNTL_AUX 0x9508
+#define DISABLE_CUBE_WRAP (1 << 0)
+#define DISABLE_CUBE_ANISO (1 << 1)
+#define SYNC_GRADIENT (1 << 24)
+#define SYNC_WALKER (1 << 25)
+#define SYNC_ALIGNER (1 << 26)
+
+#define TCP_CHAN_STEER_LO 0x960c
+#define TCP_CHAN_STEER_HI 0x9610
+
+#define VGT_CACHE_INVALIDATION 0x88C4
+#define CACHE_INVALIDATION(x) ((x) << 0)
+#define VC_ONLY 0
+#define TC_ONLY 1
+#define VC_AND_TC 2
+#define AUTO_INVLD_EN(x) ((x) << 6)
+#define NO_AUTO 0
+#define ES_AUTO 1
+#define GS_AUTO 2
+#define ES_AND_GS_AUTO 3
+#define VGT_GS_VERTEX_REUSE 0x88D4
+#define VGT_NUM_INSTANCES 0x8974
+#define VGT_OUT_DEALLOC_CNTL 0x28C5C
+#define DEALLOC_DIST_MASK 0x0000007F
+#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
+#define VTX_REUSE_DEPTH_MASK 0x000000FF
+
+#define VM_CONTEXT0_CNTL 0x1410
+#define ENABLE_CONTEXT (1 << 0)
+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define VM_CONTEXT1_CNTL 0x1414
+#define VM_CONTEXT1_CNTL2 0x1434
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
+#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
+#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
+#define RESPONSE_TYPE_MASK 0x000000F0
+#define RESPONSE_TYPE_SHIFT 4
+#define VM_L2_CNTL 0x1400
+#define ENABLE_L2_CACHE (1 << 0)
+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14)
+#define VM_L2_CNTL2 0x1404
+#define INVALIDATE_ALL_L1_TLBS (1 << 0)
+#define INVALIDATE_L2_CACHE (1 << 1)
+#define VM_L2_CNTL3 0x1408
+#define BANK_SELECT(x) ((x) << 0)
+#define CACHE_UPDATE_MODE(x) ((x) << 6)
+#define VM_L2_STATUS 0x140C
+#define L2_BUSY (1 << 0)
+#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
+
+#define WAIT_UNTIL 0x8040
+
+#define SRBM_STATUS 0x0E50
+#define RLC_RQ_PENDING (1 << 3)
+#define GRBM_RQ_PENDING (1 << 5)
+#define VMC_BUSY (1 << 8)
+#define MCB_BUSY (1 << 9)
+#define MCB_NON_DISPLAY_BUSY (1 << 10)
+#define MCC_BUSY (1 << 11)
+#define MCD_BUSY (1 << 12)
+#define SEM_BUSY (1 << 14)
+#define RLC_BUSY (1 << 15)
+#define IH_BUSY (1 << 17)
+#define SRBM_STATUS2 0x0EC4
+#define DMA_BUSY (1 << 5)
+#define SRBM_SOFT_RESET 0x0E60
+#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_CG (1 << 2)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_RLC (1 << 13)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_DMA (1 << 20)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
+
+/* display watermarks */
+#define DC_LB_MEMORY_SPLIT 0x6b0c
+#define PRIORITY_A_CNT 0x6b18
+#define PRIORITY_MARK_MASK 0x7fff
+#define PRIORITY_OFF (1 << 16)
+#define PRIORITY_ALWAYS_ON (1 << 20)
+#define PRIORITY_B_CNT 0x6b1c
+#define PIPE0_ARBITRATION_CONTROL3 0x0bf0
+# define LATENCY_WATERMARK_MASK(x) ((x) << 16)
+#define PIPE0_LATENCY_CONTROL 0x0bf4
+# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
+# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
+
+#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
+# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
+# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
+
+#define IH_RB_CNTL 0x3e00
+# define IH_RB_ENABLE (1 << 0)
+# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
+# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
+# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
+# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
+# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
+#define IH_RB_BASE 0x3e04
+#define IH_RB_RPTR 0x3e08
+#define IH_RB_WPTR 0x3e0c
+# define RB_OVERFLOW (1 << 0)
+# define WPTR_OFFSET_MASK 0x3fffc
+#define IH_RB_WPTR_ADDR_HI 0x3e10
+#define IH_RB_WPTR_ADDR_LO 0x3e14
+#define IH_CNTL 0x3e18
+# define ENABLE_INTR (1 << 0)
+# define IH_MC_SWAP(x) ((x) << 1)
+# define IH_MC_SWAP_NONE 0
+# define IH_MC_SWAP_16BIT 1
+# define IH_MC_SWAP_32BIT 2
+# define IH_MC_SWAP_64BIT 3
+# define RPTR_REARM (1 << 4)
+# define MC_WRREQ_CREDIT(x) ((x) << 15)
+# define MC_WR_CLEAN_CNT(x) ((x) << 20)
+
+#define CP_INT_CNTL 0xc124
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define SCRATCH_INT_ENABLE (1 << 25)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+# define IB2_INT_ENABLE (1 << 29)
+# define IB1_INT_ENABLE (1 << 30)
+# define RB_INT_ENABLE (1 << 31)
+#define CP_INT_STATUS 0xc128
+# define SCRATCH_INT_STAT (1 << 25)
+# define TIME_STAMP_INT_STAT (1 << 26)
+# define IB2_INT_STAT (1 << 29)
+# define IB1_INT_STAT (1 << 30)
+# define RB_INT_STAT (1 << 31)
+
+#define GRBM_INT_CNTL 0x8060
+# define RDERR_INT_ENABLE (1 << 0)
+# define GUI_IDLE_INT_ENABLE (1 << 19)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT 0x6e98
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS 0x6bb8
+# define VLINE_OCCURRED (1 << 0)
+# define VLINE_ACK (1 << 4)
+# define VLINE_STAT (1 << 12)
+# define VLINE_INTERRUPT (1 << 16)
+# define VLINE_INTERRUPT_TYPE (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS 0x6bbc
+# define VBLANK_OCCURRED (1 << 0)
+# define VBLANK_ACK (1 << 4)
+# define VBLANK_STAT (1 << 12)
+# define VBLANK_INTERRUPT (1 << 16)
+# define VBLANK_INTERRUPT_TYPE (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK 0x6b40
+# define VBLANK_INT_MASK (1 << 0)
+# define VLINE_INT_MASK (1 << 4)
+
+#define DISP_INTERRUPT_STATUS 0x60f4
+# define LB_D1_VLINE_INTERRUPT (1 << 2)
+# define LB_D1_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD1_INTERRUPT (1 << 17)
+# define DC_HPD1_RX_INTERRUPT (1 << 18)
+# define DACA_AUTODETECT_INTERRUPT (1 << 22)
+# define DACB_AUTODETECT_INTERRUPT (1 << 23)
+# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
+# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8
+# define LB_D2_VLINE_INTERRUPT (1 << 2)
+# define LB_D2_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD2_INTERRUPT (1 << 17)
+# define DC_HPD2_RX_INTERRUPT (1 << 18)
+# define DISP_TIMER_INTERRUPT (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc
+# define LB_D3_VLINE_INTERRUPT (1 << 2)
+# define LB_D3_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD3_INTERRUPT (1 << 17)
+# define DC_HPD3_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100
+# define LB_D4_VLINE_INTERRUPT (1 << 2)
+# define LB_D4_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD4_INTERRUPT (1 << 17)
+# define DC_HPD4_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c
+# define LB_D5_VLINE_INTERRUPT (1 << 2)
+# define LB_D5_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD5_INTERRUPT (1 << 17)
+# define DC_HPD5_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6150
+# define LB_D6_VLINE_INTERRUPT (1 << 2)
+# define LB_D6_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD6_INTERRUPT (1 << 17)
+# define DC_HPD6_RX_INTERRUPT (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS 0x6858
+# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
+# define GRPH_PFLIP_INT_CLEAR (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define GRPH_INT_CONTROL 0x685c
+# define GRPH_PFLIP_INT_MASK (1 << 0)
+# define GRPH_PFLIP_INT_TYPE (1 << 8)
+
+#define DACA_AUTODETECT_INT_CONTROL 0x66c8
+#define DACB_AUTODETECT_INT_CONTROL 0x67c8
+
+#define DC_HPD1_INT_STATUS 0x601c
+#define DC_HPD2_INT_STATUS 0x6028
+#define DC_HPD3_INT_STATUS 0x6034
+#define DC_HPD4_INT_STATUS 0x6040
+#define DC_HPD5_INT_STATUS 0x604c
+#define DC_HPD6_INT_STATUS 0x6058
+# define DC_HPDx_INT_STATUS (1 << 0)
+# define DC_HPDx_SENSE (1 << 1)
+# define DC_HPDx_RX_INT_STATUS (1 << 8)
+
+#define DC_HPD1_INT_CONTROL 0x6020
+#define DC_HPD2_INT_CONTROL 0x602c
+#define DC_HPD3_INT_CONTROL 0x6038
+#define DC_HPD4_INT_CONTROL 0x6044
+#define DC_HPD5_INT_CONTROL 0x6050
+#define DC_HPD6_INT_CONTROL 0x605c
+# define DC_HPDx_INT_ACK (1 << 0)
+# define DC_HPDx_INT_POLARITY (1 << 8)
+# define DC_HPDx_INT_EN (1 << 16)
+# define DC_HPDx_RX_INT_ACK (1 << 20)
+# define DC_HPDx_RX_INT_EN (1 << 24)
+
+#define DC_HPD1_CONTROL 0x6024
+#define DC_HPD2_CONTROL 0x6030
+#define DC_HPD3_CONTROL 0x603c
+#define DC_HPD4_CONTROL 0x6048
+#define DC_HPD5_CONTROL 0x6054
+#define DC_HPD6_CONTROL 0x6060
+# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
+# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
+# define DC_HPDx_EN (1 << 28)
+
+/* ASYNC DMA */
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_TILING_CONFIG 0xD0B8
+
+#define CAYMAN_DMA1_CNTL 0xd82c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, sub_cmd, n) ((((cmd) & 0xF) << 28) | \
+ (((sub_cmd) & 0xFF) << 20) |\
+ (((n) & 0xFFFFF) << 0))
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
+#define GET_DMA_SUB_CMD(h) (((h) & 0x0ff00000) >> 20)
+
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
+# define LC_LINK_WIDTH_SHIFT 0
+# define LC_LINK_WIDTH_MASK 0x7
+# define LC_LINK_WIDTH_X0 0
+# define LC_LINK_WIDTH_X1 1
+# define LC_LINK_WIDTH_X2 2
+# define LC_LINK_WIDTH_X4 3
+# define LC_LINK_WIDTH_X8 4
+# define LC_LINK_WIDTH_X16 6
+# define LC_LINK_WIDTH_RD_SHIFT 4
+# define LC_LINK_WIDTH_RD_MASK 0x70
+# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define LC_RECONFIG_NOW (1 << 8)
+# define LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define LC_RENEGOTIATE_EN (1 << 10)
+# define LC_SHORT_RECONFIG_EN (1 << 11)
+# define LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define LC_UPCONFIGURE_DIS (1 << 13)
+#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
+# define LC_GEN2_EN_STRAP (1 << 0)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
+# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
+# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
+# define LC_CURRENT_DATA_RATE (1 << 11)
+# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
+# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
+# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
+# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
+#define MM_CFGREGS_CNTL 0x544c
+# define MM_WR_TO_CFG_EN (1 << 3)
+#define LINK_CNTL2 0x88 /* F0 */
+# define TARGET_LINK_SPEED_MASK (0xf << 0)
+# define SELECTABLE_DEEMPHASIS (1 << 6)
+
+
+/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG 0xef4c
+#define UVD_UDEC_DB_ADDR_CONFIG 0xef50
+#define UVD_UDEC_DBW_ADDR_CONFIG 0xef54
+#define UVD_RBC_RB_RPTR 0xf690
+#define UVD_RBC_RB_WPTR 0xf694
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
+ (((reg) >> 2) & 0xFFFF) | \
+ ((n) & 0x3FFF) << 16)
+#define CP_PACKET2 0x80000000
+#define PACKET2_PAD_SHIFT 0
+#define PACKET2_PAD_MASK (0x3fffffff << 0)
+
+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define PACKET3_NOP 0x10
+#define PACKET3_SET_BASE 0x11
+#define PACKET3_CLEAR_STATE 0x12
+#define PACKET3_INDEX_BUFFER_SIZE 0x13
+#define PACKET3_DISPATCH_DIRECT 0x15
+#define PACKET3_DISPATCH_INDIRECT 0x16
+#define PACKET3_INDIRECT_BUFFER_END 0x17
+#define PACKET3_MODE_CONTROL 0x18
+#define PACKET3_SET_PREDICATION 0x20
+#define PACKET3_REG_RMW 0x21
+#define PACKET3_COND_EXEC 0x22
+#define PACKET3_PRED_EXEC 0x23
+#define PACKET3_DRAW_INDIRECT 0x24
+#define PACKET3_DRAW_INDEX_INDIRECT 0x25
+#define PACKET3_INDEX_BASE 0x26
+#define PACKET3_DRAW_INDEX_2 0x27
+#define PACKET3_CONTEXT_CONTROL 0x28
+#define PACKET3_DRAW_INDEX_OFFSET 0x29
+#define PACKET3_INDEX_TYPE 0x2A
+#define PACKET3_DRAW_INDEX 0x2B
+#define PACKET3_DRAW_INDEX_AUTO 0x2D
+#define PACKET3_DRAW_INDEX_IMMD 0x2E
+#define PACKET3_NUM_INSTANCES 0x2F
+#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
+#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
+#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
+#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
+#define PACKET3_MEM_SEMAPHORE 0x39
+#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_COPY_DW 0x3B
+#define PACKET3_WAIT_REG_MEM 0x3C
+#define PACKET3_MEM_WRITE 0x3D
+#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_CP_DMA 0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ * SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
+ /* 0 - DST_ADDR
+ * 1 - GDS
+ */
+# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
+ /* 0 - ME
+ * 1 - PFP
+ */
+# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ * 2 - DATA
+ */
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_CP_DMA_DIS_WC (1 << 21)
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
+#define PACKET3_SURFACE_SYNC 0x43
+# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
+# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
+# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
+# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
+# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
+# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
+# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
+# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
+# define PACKET3_CB8_DEST_BASE_ENA (1 << 15)
+# define PACKET3_CB9_DEST_BASE_ENA (1 << 16)
+# define PACKET3_CB10_DEST_BASE_ENA (1 << 17)
+# define PACKET3_CB11_DEST_BASE_ENA (1 << 18)
+# define PACKET3_FULL_CACHE_ENA (1 << 20)
+# define PACKET3_TC_ACTION_ENA (1 << 23)
+# define PACKET3_VC_ACTION_ENA (1 << 24)
+# define PACKET3_CB_ACTION_ENA (1 << 25)
+# define PACKET3_DB_ACTION_ENA (1 << 26)
+# define PACKET3_SH_ACTION_ENA (1 << 27)
+# define PACKET3_SX_ACTION_ENA (1 << 28)
+#define PACKET3_ME_INITIALIZE 0x44
+#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define PACKET3_COND_WRITE 0x45
+#define PACKET3_EVENT_WRITE 0x46
+#define PACKET3_EVENT_WRITE_EOP 0x47
+#define PACKET3_EVENT_WRITE_EOS 0x48
+#define PACKET3_PREAMBLE_CNTL 0x4A
+# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
+# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
+#define PACKET3_RB_OFFSET 0x4B
+#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C
+#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D
+#define PACKET3_ALU_PS_CONST_UPDATE 0x4E
+#define PACKET3_ALU_VS_CONST_UPDATE 0x4F
+#define PACKET3_ONE_REG_WRITE 0x57
+#define PACKET3_SET_CONFIG_REG 0x68
+#define PACKET3_SET_CONFIG_REG_START 0x00008000
+#define PACKET3_SET_CONFIG_REG_END 0x0000ac00
+#define PACKET3_SET_CONTEXT_REG 0x69
+#define PACKET3_SET_CONTEXT_REG_START 0x00028000
+#define PACKET3_SET_CONTEXT_REG_END 0x00029000
+#define PACKET3_SET_ALU_CONST 0x6A
+/* alu const buffers only; no reg file */
+#define PACKET3_SET_BOOL_CONST 0x6B
+#define PACKET3_SET_BOOL_CONST_START 0x0003a500
+#define PACKET3_SET_BOOL_CONST_END 0x0003a518
+#define PACKET3_SET_LOOP_CONST 0x6C
+#define PACKET3_SET_LOOP_CONST_START 0x0003a200
+#define PACKET3_SET_LOOP_CONST_END 0x0003a500
+#define PACKET3_SET_RESOURCE 0x6D
+#define PACKET3_SET_RESOURCE_START 0x00030000
+#define PACKET3_SET_RESOURCE_END 0x00038000
+#define PACKET3_SET_SAMPLER 0x6E
+#define PACKET3_SET_SAMPLER_START 0x0003c000
+#define PACKET3_SET_SAMPLER_END 0x0003c600
+#define PACKET3_SET_CTL_CONST 0x6F
+#define PACKET3_SET_CTL_CONST_START 0x0003cff0
+#define PACKET3_SET_CTL_CONST_END 0x0003ff0c
+#define PACKET3_SET_RESOURCE_OFFSET 0x70
+#define PACKET3_SET_ALU_CONST_VS 0x71
+#define PACKET3_SET_ALU_CONST_DI 0x72
+#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
+#define PACKET3_SET_RESOURCE_INDIRECT 0x74
+#define PACKET3_SET_APPEND_CNT 0x75
+
+#define SQ_RESOURCE_CONSTANT_WORD7_0 0x3001c
+#define S__SQ_CONSTANT_TYPE(x) (((x) & 3) << 30)
+#define G__SQ_CONSTANT_TYPE(x) (((x) >> 30) & 3)
+#define SQ_TEX_VTX_INVALID_TEXTURE 0x0
+#define SQ_TEX_VTX_INVALID_BUFFER 0x1
+#define SQ_TEX_VTX_VALID_TEXTURE 0x2
+#define SQ_TEX_VTX_VALID_BUFFER 0x3
+
+#define VGT_VTX_VECT_EJECT_REG 0x88b0
+
+#define SQ_CONST_MEM_BASE 0x8df8
+
+#define SQ_ESGS_RING_BASE 0x8c40
+#define SQ_ESGS_RING_SIZE 0x8c44
+#define SQ_GSVS_RING_BASE 0x8c48
+#define SQ_GSVS_RING_SIZE 0x8c4c
+#define SQ_ESTMP_RING_BASE 0x8c50
+#define SQ_ESTMP_RING_SIZE 0x8c54
+#define SQ_GSTMP_RING_BASE 0x8c58
+#define SQ_GSTMP_RING_SIZE 0x8c5c
+#define SQ_VSTMP_RING_BASE 0x8c60
+#define SQ_VSTMP_RING_SIZE 0x8c64
+#define SQ_PSTMP_RING_BASE 0x8c68
+#define SQ_PSTMP_RING_SIZE 0x8c6c
+#define SQ_LSTMP_RING_BASE 0x8e10
+#define SQ_LSTMP_RING_SIZE 0x8e14
+#define SQ_HSTMP_RING_BASE 0x8e18
+#define SQ_HSTMP_RING_SIZE 0x8e1c
+#define VGT_TF_RING_SIZE 0x8988
+
+#define SQ_ESGS_RING_ITEMSIZE 0x28900
+#define SQ_GSVS_RING_ITEMSIZE 0x28904
+#define SQ_ESTMP_RING_ITEMSIZE 0x28908
+#define SQ_GSTMP_RING_ITEMSIZE 0x2890c
+#define SQ_VSTMP_RING_ITEMSIZE 0x28910
+#define SQ_PSTMP_RING_ITEMSIZE 0x28914
+#define SQ_LSTMP_RING_ITEMSIZE 0x28830
+#define SQ_HSTMP_RING_ITEMSIZE 0x28834
+
+#define SQ_GS_VERT_ITEMSIZE 0x2891c
+#define SQ_GS_VERT_ITEMSIZE_1 0x28920
+#define SQ_GS_VERT_ITEMSIZE_2 0x28924
+#define SQ_GS_VERT_ITEMSIZE_3 0x28928
+#define SQ_GSVS_RING_OFFSET_1 0x2892c
+#define SQ_GSVS_RING_OFFSET_2 0x28930
+#define SQ_GSVS_RING_OFFSET_3 0x28934
+
+#define SQ_ALU_CONST_BUFFER_SIZE_PS_0 0x28140
+#define SQ_ALU_CONST_BUFFER_SIZE_HS_0 0x28f80
+
+#define SQ_ALU_CONST_CACHE_PS_0 0x28940
+#define SQ_ALU_CONST_CACHE_PS_1 0x28944
+#define SQ_ALU_CONST_CACHE_PS_2 0x28948
+#define SQ_ALU_CONST_CACHE_PS_3 0x2894c
+#define SQ_ALU_CONST_CACHE_PS_4 0x28950
+#define SQ_ALU_CONST_CACHE_PS_5 0x28954
+#define SQ_ALU_CONST_CACHE_PS_6 0x28958
+#define SQ_ALU_CONST_CACHE_PS_7 0x2895c
+#define SQ_ALU_CONST_CACHE_PS_8 0x28960
+#define SQ_ALU_CONST_CACHE_PS_9 0x28964
+#define SQ_ALU_CONST_CACHE_PS_10 0x28968
+#define SQ_ALU_CONST_CACHE_PS_11 0x2896c
+#define SQ_ALU_CONST_CACHE_PS_12 0x28970
+#define SQ_ALU_CONST_CACHE_PS_13 0x28974
+#define SQ_ALU_CONST_CACHE_PS_14 0x28978
+#define SQ_ALU_CONST_CACHE_PS_15 0x2897c
+#define SQ_ALU_CONST_CACHE_VS_0 0x28980
+#define SQ_ALU_CONST_CACHE_VS_1 0x28984
+#define SQ_ALU_CONST_CACHE_VS_2 0x28988
+#define SQ_ALU_CONST_CACHE_VS_3 0x2898c
+#define SQ_ALU_CONST_CACHE_VS_4 0x28990
+#define SQ_ALU_CONST_CACHE_VS_5 0x28994
+#define SQ_ALU_CONST_CACHE_VS_6 0x28998
+#define SQ_ALU_CONST_CACHE_VS_7 0x2899c
+#define SQ_ALU_CONST_CACHE_VS_8 0x289a0
+#define SQ_ALU_CONST_CACHE_VS_9 0x289a4
+#define SQ_ALU_CONST_CACHE_VS_10 0x289a8
+#define SQ_ALU_CONST_CACHE_VS_11 0x289ac
+#define SQ_ALU_CONST_CACHE_VS_12 0x289b0
+#define SQ_ALU_CONST_CACHE_VS_13 0x289b4
+#define SQ_ALU_CONST_CACHE_VS_14 0x289b8
+#define SQ_ALU_CONST_CACHE_VS_15 0x289bc
+#define SQ_ALU_CONST_CACHE_GS_0 0x289c0
+#define SQ_ALU_CONST_CACHE_GS_1 0x289c4
+#define SQ_ALU_CONST_CACHE_GS_2 0x289c8
+#define SQ_ALU_CONST_CACHE_GS_3 0x289cc
+#define SQ_ALU_CONST_CACHE_GS_4 0x289d0
+#define SQ_ALU_CONST_CACHE_GS_5 0x289d4
+#define SQ_ALU_CONST_CACHE_GS_6 0x289d8
+#define SQ_ALU_CONST_CACHE_GS_7 0x289dc
+#define SQ_ALU_CONST_CACHE_GS_8 0x289e0
+#define SQ_ALU_CONST_CACHE_GS_9 0x289e4
+#define SQ_ALU_CONST_CACHE_GS_10 0x289e8
+#define SQ_ALU_CONST_CACHE_GS_11 0x289ec
+#define SQ_ALU_CONST_CACHE_GS_12 0x289f0
+#define SQ_ALU_CONST_CACHE_GS_13 0x289f4
+#define SQ_ALU_CONST_CACHE_GS_14 0x289f8
+#define SQ_ALU_CONST_CACHE_GS_15 0x289fc
+#define SQ_ALU_CONST_CACHE_HS_0 0x28f00
+#define SQ_ALU_CONST_CACHE_HS_1 0x28f04
+#define SQ_ALU_CONST_CACHE_HS_2 0x28f08
+#define SQ_ALU_CONST_CACHE_HS_3 0x28f0c
+#define SQ_ALU_CONST_CACHE_HS_4 0x28f10
+#define SQ_ALU_CONST_CACHE_HS_5 0x28f14
+#define SQ_ALU_CONST_CACHE_HS_6 0x28f18
+#define SQ_ALU_CONST_CACHE_HS_7 0x28f1c
+#define SQ_ALU_CONST_CACHE_HS_8 0x28f20
+#define SQ_ALU_CONST_CACHE_HS_9 0x28f24
+#define SQ_ALU_CONST_CACHE_HS_10 0x28f28
+#define SQ_ALU_CONST_CACHE_HS_11 0x28f2c
+#define SQ_ALU_CONST_CACHE_HS_12 0x28f30
+#define SQ_ALU_CONST_CACHE_HS_13 0x28f34
+#define SQ_ALU_CONST_CACHE_HS_14 0x28f38
+#define SQ_ALU_CONST_CACHE_HS_15 0x28f3c
+#define SQ_ALU_CONST_CACHE_LS_0 0x28f40
+#define SQ_ALU_CONST_CACHE_LS_1 0x28f44
+#define SQ_ALU_CONST_CACHE_LS_2 0x28f48
+#define SQ_ALU_CONST_CACHE_LS_3 0x28f4c
+#define SQ_ALU_CONST_CACHE_LS_4 0x28f50
+#define SQ_ALU_CONST_CACHE_LS_5 0x28f54
+#define SQ_ALU_CONST_CACHE_LS_6 0x28f58
+#define SQ_ALU_CONST_CACHE_LS_7 0x28f5c
+#define SQ_ALU_CONST_CACHE_LS_8 0x28f60
+#define SQ_ALU_CONST_CACHE_LS_9 0x28f64
+#define SQ_ALU_CONST_CACHE_LS_10 0x28f68
+#define SQ_ALU_CONST_CACHE_LS_11 0x28f6c
+#define SQ_ALU_CONST_CACHE_LS_12 0x28f70
+#define SQ_ALU_CONST_CACHE_LS_13 0x28f74
+#define SQ_ALU_CONST_CACHE_LS_14 0x28f78
+#define SQ_ALU_CONST_CACHE_LS_15 0x28f7c
+
+#define PA_SC_SCREEN_SCISSOR_TL 0x28030
+#define PA_SC_GENERIC_SCISSOR_TL 0x28240
+#define PA_SC_WINDOW_SCISSOR_TL 0x28204
+
+#define VGT_PRIMITIVE_TYPE 0x8958
+#define VGT_INDEX_TYPE 0x895C
+
+#define VGT_NUM_INDICES 0x8970
+
+#define VGT_COMPUTE_DIM_X 0x8990
+#define VGT_COMPUTE_DIM_Y 0x8994
+#define VGT_COMPUTE_DIM_Z 0x8998
+#define VGT_COMPUTE_START_X 0x899C
+#define VGT_COMPUTE_START_Y 0x89A0
+#define VGT_COMPUTE_START_Z 0x89A4
+#define VGT_COMPUTE_INDEX 0x89A8
+#define VGT_COMPUTE_THREAD_GROUP_SIZE 0x89AC
+#define VGT_HS_OFFCHIP_PARAM 0x89B0
+
+#define DB_DEBUG 0x9830
+#define DB_DEBUG2 0x9834
+#define DB_DEBUG3 0x9838
+#define DB_DEBUG4 0x983C
+#define DB_WATERMARKS 0x9854
+#define DB_DEPTH_CONTROL 0x28800
+#define R_028800_DB_DEPTH_CONTROL 0x028800
+#define S_028800_STENCIL_ENABLE(x) (((x) & 0x1) << 0)
+#define G_028800_STENCIL_ENABLE(x) (((x) >> 0) & 0x1)
+#define C_028800_STENCIL_ENABLE 0xFFFFFFFE
+#define S_028800_Z_ENABLE(x) (((x) & 0x1) << 1)
+#define G_028800_Z_ENABLE(x) (((x) >> 1) & 0x1)
+#define C_028800_Z_ENABLE 0xFFFFFFFD
+#define S_028800_Z_WRITE_ENABLE(x) (((x) & 0x1) << 2)
+#define G_028800_Z_WRITE_ENABLE(x) (((x) >> 2) & 0x1)
+#define C_028800_Z_WRITE_ENABLE 0xFFFFFFFB
+#define S_028800_ZFUNC(x) (((x) & 0x7) << 4)
+#define G_028800_ZFUNC(x) (((x) >> 4) & 0x7)
+#define C_028800_ZFUNC 0xFFFFFF8F
+#define S_028800_BACKFACE_ENABLE(x) (((x) & 0x1) << 7)
+#define G_028800_BACKFACE_ENABLE(x) (((x) >> 7) & 0x1)
+#define C_028800_BACKFACE_ENABLE 0xFFFFFF7F
+#define S_028800_STENCILFUNC(x) (((x) & 0x7) << 8)
+#define G_028800_STENCILFUNC(x) (((x) >> 8) & 0x7)
+#define C_028800_STENCILFUNC 0xFFFFF8FF
+#define V_028800_STENCILFUNC_NEVER 0x00000000
+#define V_028800_STENCILFUNC_LESS 0x00000001
+#define V_028800_STENCILFUNC_EQUAL 0x00000002
+#define V_028800_STENCILFUNC_LEQUAL 0x00000003
+#define V_028800_STENCILFUNC_GREATER 0x00000004
+#define V_028800_STENCILFUNC_NOTEQUAL 0x00000005
+#define V_028800_STENCILFUNC_GEQUAL 0x00000006
+#define V_028800_STENCILFUNC_ALWAYS 0x00000007
+#define S_028800_STENCILFAIL(x) (((x) & 0x7) << 11)
+#define G_028800_STENCILFAIL(x) (((x) >> 11) & 0x7)
+#define C_028800_STENCILFAIL 0xFFFFC7FF
+#define V_028800_STENCIL_KEEP 0x00000000
+#define V_028800_STENCIL_ZERO 0x00000001
+#define V_028800_STENCIL_REPLACE 0x00000002
+#define V_028800_STENCIL_INCR 0x00000003
+#define V_028800_STENCIL_DECR 0x00000004
+#define V_028800_STENCIL_INVERT 0x00000005
+#define V_028800_STENCIL_INCR_WRAP 0x00000006
+#define V_028800_STENCIL_DECR_WRAP 0x00000007
+#define S_028800_STENCILZPASS(x) (((x) & 0x7) << 14)
+#define G_028800_STENCILZPASS(x) (((x) >> 14) & 0x7)
+#define C_028800_STENCILZPASS 0xFFFE3FFF
+#define S_028800_STENCILZFAIL(x) (((x) & 0x7) << 17)
+#define G_028800_STENCILZFAIL(x) (((x) >> 17) & 0x7)
+#define C_028800_STENCILZFAIL 0xFFF1FFFF
+#define S_028800_STENCILFUNC_BF(x) (((x) & 0x7) << 20)
+#define G_028800_STENCILFUNC_BF(x) (((x) >> 20) & 0x7)
+#define C_028800_STENCILFUNC_BF 0xFF8FFFFF
+#define S_028800_STENCILFAIL_BF(x) (((x) & 0x7) << 23)
+#define G_028800_STENCILFAIL_BF(x) (((x) >> 23) & 0x7)
+#define C_028800_STENCILFAIL_BF 0xFC7FFFFF
+#define S_028800_STENCILZPASS_BF(x) (((x) & 0x7) << 26)
+#define G_028800_STENCILZPASS_BF(x) (((x) >> 26) & 0x7)
+#define C_028800_STENCILZPASS_BF 0xE3FFFFFF
+#define S_028800_STENCILZFAIL_BF(x) (((x) & 0x7) << 29)
+#define G_028800_STENCILZFAIL_BF(x) (((x) >> 29) & 0x7)
+#define C_028800_STENCILZFAIL_BF 0x1FFFFFFF
+#define DB_DEPTH_VIEW 0x28008
+#define R_028008_DB_DEPTH_VIEW 0x00028008
+#define S_028008_SLICE_START(x) (((x) & 0x7FF) << 0)
+#define G_028008_SLICE_START(x) (((x) >> 0) & 0x7FF)
+#define C_028008_SLICE_START 0xFFFFF800
+#define S_028008_SLICE_MAX(x) (((x) & 0x7FF) << 13)
+#define G_028008_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
+#define C_028008_SLICE_MAX 0xFF001FFF
+#define DB_HTILE_DATA_BASE 0x28014
+#define DB_HTILE_SURFACE 0x28abc
+#define S_028ABC_HTILE_WIDTH(x) (((x) & 0x1) << 0)
+#define G_028ABC_HTILE_WIDTH(x) (((x) >> 0) & 0x1)
+#define C_028ABC_HTILE_WIDTH 0xFFFFFFFE
+#define S_028ABC_HTILE_HEIGHT(x) (((x) & 0x1) << 1)
+#define G_028ABC_HTILE_HEIGHT(x) (((x) >> 1) & 0x1)
+#define C_028ABC_HTILE_HEIGHT 0xFFFFFFFD
+#define G_028ABC_LINEAR(x) (((x) >> 2) & 0x1)
+#define DB_Z_INFO 0x28040
+# define Z_ARRAY_MODE(x) ((x) << 4)
+# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8)
+# define DB_NUM_BANKS(x) (((x) & 0x3) << 12)
+# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16)
+# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20)
+# define DB_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 24)
+#define R_028040_DB_Z_INFO 0x028040
+#define S_028040_FORMAT(x) (((x) & 0x3) << 0)
+#define G_028040_FORMAT(x) (((x) >> 0) & 0x3)
+#define C_028040_FORMAT 0xFFFFFFFC
+#define V_028040_Z_INVALID 0x00000000
+#define V_028040_Z_16 0x00000001
+#define V_028040_Z_24 0x00000002
+#define V_028040_Z_32_FLOAT 0x00000003
+#define S_028040_ARRAY_MODE(x) (((x) & 0xF) << 4)
+#define G_028040_ARRAY_MODE(x) (((x) >> 4) & 0xF)
+#define C_028040_ARRAY_MODE 0xFFFFFF0F
+#define S_028040_READ_SIZE(x) (((x) & 0x1) << 28)
+#define G_028040_READ_SIZE(x) (((x) >> 28) & 0x1)
+#define C_028040_READ_SIZE 0xEFFFFFFF
+#define S_028040_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 29)
+#define G_028040_TILE_SURFACE_ENABLE(x) (((x) >> 29) & 0x1)
+#define C_028040_TILE_SURFACE_ENABLE 0xDFFFFFFF
+#define S_028040_ZRANGE_PRECISION(x) (((x) & 0x1) << 31)
+#define G_028040_ZRANGE_PRECISION(x) (((x) >> 31) & 0x1)
+#define C_028040_ZRANGE_PRECISION 0x7FFFFFFF
+#define S_028040_TILE_SPLIT(x) (((x) & 0x7) << 8)
+#define G_028040_TILE_SPLIT(x) (((x) >> 8) & 0x7)
+#define S_028040_NUM_BANKS(x) (((x) & 0x3) << 12)
+#define G_028040_NUM_BANKS(x) (((x) >> 12) & 0x3)
+#define S_028040_BANK_WIDTH(x) (((x) & 0x3) << 16)
+#define G_028040_BANK_WIDTH(x) (((x) >> 16) & 0x3)
+#define S_028040_BANK_HEIGHT(x) (((x) & 0x3) << 20)
+#define G_028040_BANK_HEIGHT(x) (((x) >> 20) & 0x3)
+#define S_028040_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 24)
+#define G_028040_MACRO_TILE_ASPECT(x) (((x) >> 24) & 0x3)
+#define DB_STENCIL_INFO 0x28044
+#define R_028044_DB_STENCIL_INFO 0x028044
+#define S_028044_FORMAT(x) (((x) & 0x1) << 0)
+#define G_028044_FORMAT(x) (((x) >> 0) & 0x1)
+#define C_028044_FORMAT 0xFFFFFFFE
+#define V_028044_STENCIL_INVALID 0
+#define V_028044_STENCIL_8 1
+#define G_028044_TILE_SPLIT(x) (((x) >> 8) & 0x7)
+#define DB_Z_READ_BASE 0x28048
+#define DB_STENCIL_READ_BASE 0x2804c
+#define DB_Z_WRITE_BASE 0x28050
+#define DB_STENCIL_WRITE_BASE 0x28054
+#define DB_DEPTH_SIZE 0x28058
+#define R_028058_DB_DEPTH_SIZE 0x028058
+#define S_028058_PITCH_TILE_MAX(x) (((x) & 0x7FF) << 0)
+#define G_028058_PITCH_TILE_MAX(x) (((x) >> 0) & 0x7FF)
+#define C_028058_PITCH_TILE_MAX 0xFFFFF800
+#define S_028058_HEIGHT_TILE_MAX(x) (((x) & 0x7FF) << 11)
+#define G_028058_HEIGHT_TILE_MAX(x) (((x) >> 11) & 0x7FF)
+#define C_028058_HEIGHT_TILE_MAX 0xFFC007FF
+#define R_02805C_DB_DEPTH_SLICE 0x02805C
+#define S_02805C_SLICE_TILE_MAX(x) (((x) & 0x3FFFFF) << 0)
+#define G_02805C_SLICE_TILE_MAX(x) (((x) >> 0) & 0x3FFFFF)
+#define C_02805C_SLICE_TILE_MAX 0xFFC00000
+
+#define SQ_PGM_START_PS 0x28840
+#define SQ_PGM_START_VS 0x2885c
+#define SQ_PGM_START_GS 0x28874
+#define SQ_PGM_START_ES 0x2888c
+#define SQ_PGM_START_FS 0x288a4
+#define SQ_PGM_START_HS 0x288b8
+#define SQ_PGM_START_LS 0x288d0
+
+#define VGT_STRMOUT_BUFFER_BASE_0 0x28AD8
+#define VGT_STRMOUT_BUFFER_BASE_1 0x28AE8
+#define VGT_STRMOUT_BUFFER_BASE_2 0x28AF8
+#define VGT_STRMOUT_BUFFER_BASE_3 0x28B08
+#define VGT_STRMOUT_BUFFER_SIZE_0 0x28AD0
+#define VGT_STRMOUT_BUFFER_SIZE_1 0x28AE0
+#define VGT_STRMOUT_BUFFER_SIZE_2 0x28AF0
+#define VGT_STRMOUT_BUFFER_SIZE_3 0x28B00
+#define VGT_STRMOUT_CONFIG 0x28b94
+#define VGT_STRMOUT_BUFFER_CONFIG 0x28b98
+
+#define CB_TARGET_MASK 0x28238
+#define CB_SHADER_MASK 0x2823c
+
+#define GDS_ADDR_BASE 0x28720
+
+#define CB_IMMED0_BASE 0x28b9c
+#define CB_IMMED1_BASE 0x28ba0
+#define CB_IMMED2_BASE 0x28ba4
+#define CB_IMMED3_BASE 0x28ba8
+#define CB_IMMED4_BASE 0x28bac
+#define CB_IMMED5_BASE 0x28bb0
+#define CB_IMMED6_BASE 0x28bb4
+#define CB_IMMED7_BASE 0x28bb8
+#define CB_IMMED8_BASE 0x28bbc
+#define CB_IMMED9_BASE 0x28bc0
+#define CB_IMMED10_BASE 0x28bc4
+#define CB_IMMED11_BASE 0x28bc8
+
+/* all 12 CB blocks have these regs */
+#define CB_COLOR0_BASE 0x28c60
+#define CB_COLOR0_PITCH 0x28c64
+#define CB_COLOR0_SLICE 0x28c68
+#define CB_COLOR0_VIEW 0x28c6c
+#define R_028C6C_CB_COLOR0_VIEW 0x00028C6C
+#define S_028C6C_SLICE_START(x) (((x) & 0x7FF) << 0)
+#define G_028C6C_SLICE_START(x) (((x) >> 0) & 0x7FF)
+#define C_028C6C_SLICE_START 0xFFFFF800
+#define S_028C6C_SLICE_MAX(x) (((x) & 0x7FF) << 13)
+#define G_028C6C_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
+#define C_028C6C_SLICE_MAX 0xFF001FFF
+#define R_028C70_CB_COLOR0_INFO 0x028C70
+#define S_028C70_ENDIAN(x) (((x) & 0x3) << 0)
+#define G_028C70_ENDIAN(x) (((x) >> 0) & 0x3)
+#define C_028C70_ENDIAN 0xFFFFFFFC
+#define S_028C70_FORMAT(x) (((x) & 0x3F) << 2)
+#define G_028C70_FORMAT(x) (((x) >> 2) & 0x3F)
+#define C_028C70_FORMAT 0xFFFFFF03
+#define V_028C70_COLOR_INVALID 0x00000000
+#define V_028C70_COLOR_8 0x00000001
+#define V_028C70_COLOR_4_4 0x00000002
+#define V_028C70_COLOR_3_3_2 0x00000003
+#define V_028C70_COLOR_16 0x00000005
+#define V_028C70_COLOR_16_FLOAT 0x00000006
+#define V_028C70_COLOR_8_8 0x00000007
+#define V_028C70_COLOR_5_6_5 0x00000008
+#define V_028C70_COLOR_6_5_5 0x00000009
+#define V_028C70_COLOR_1_5_5_5 0x0000000A
+#define V_028C70_COLOR_4_4_4_4 0x0000000B
+#define V_028C70_COLOR_5_5_5_1 0x0000000C
+#define V_028C70_COLOR_32 0x0000000D
+#define V_028C70_COLOR_32_FLOAT 0x0000000E
+#define V_028C70_COLOR_16_16 0x0000000F
+#define V_028C70_COLOR_16_16_FLOAT 0x00000010
+#define V_028C70_COLOR_8_24 0x00000011
+#define V_028C70_COLOR_8_24_FLOAT 0x00000012
+#define V_028C70_COLOR_24_8 0x00000013
+#define V_028C70_COLOR_24_8_FLOAT 0x00000014
+#define V_028C70_COLOR_10_11_11 0x00000015
+#define V_028C70_COLOR_10_11_11_FLOAT 0x00000016
+#define V_028C70_COLOR_11_11_10 0x00000017
+#define V_028C70_COLOR_11_11_10_FLOAT 0x00000018
+#define V_028C70_COLOR_2_10_10_10 0x00000019
+#define V_028C70_COLOR_8_8_8_8 0x0000001A
+#define V_028C70_COLOR_10_10_10_2 0x0000001B
+#define V_028C70_COLOR_X24_8_32_FLOAT 0x0000001C
+#define V_028C70_COLOR_32_32 0x0000001D
+#define V_028C70_COLOR_32_32_FLOAT 0x0000001E
+#define V_028C70_COLOR_16_16_16_16 0x0000001F
+#define V_028C70_COLOR_16_16_16_16_FLOAT 0x00000020
+#define V_028C70_COLOR_32_32_32_32 0x00000022
+#define V_028C70_COLOR_32_32_32_32_FLOAT 0x00000023
+#define V_028C70_COLOR_32_32_32_FLOAT 0x00000030
+#define S_028C70_ARRAY_MODE(x) (((x) & 0xF) << 8)
+#define G_028C70_ARRAY_MODE(x) (((x) >> 8) & 0xF)
+#define C_028C70_ARRAY_MODE 0xFFFFF0FF
+#define V_028C70_ARRAY_LINEAR_GENERAL 0x00000000
+#define V_028C70_ARRAY_LINEAR_ALIGNED 0x00000001
+#define V_028C70_ARRAY_1D_TILED_THIN1 0x00000002
+#define V_028C70_ARRAY_2D_TILED_THIN1 0x00000004
+#define S_028C70_NUMBER_TYPE(x) (((x) & 0x7) << 12)
+#define G_028C70_NUMBER_TYPE(x) (((x) >> 12) & 0x7)
+#define C_028C70_NUMBER_TYPE 0xFFFF8FFF
+#define V_028C70_NUMBER_UNORM 0x00000000
+#define V_028C70_NUMBER_SNORM 0x00000001
+#define V_028C70_NUMBER_USCALED 0x00000002
+#define V_028C70_NUMBER_SSCALED 0x00000003
+#define V_028C70_NUMBER_UINT 0x00000004
+#define V_028C70_NUMBER_SINT 0x00000005
+#define V_028C70_NUMBER_SRGB 0x00000006
+#define V_028C70_NUMBER_FLOAT 0x00000007
+#define S_028C70_COMP_SWAP(x) (((x) & 0x3) << 15)
+#define G_028C70_COMP_SWAP(x) (((x) >> 15) & 0x3)
+#define C_028C70_COMP_SWAP 0xFFFE7FFF
+#define V_028C70_SWAP_STD 0x00000000
+#define V_028C70_SWAP_ALT 0x00000001
+#define V_028C70_SWAP_STD_REV 0x00000002
+#define V_028C70_SWAP_ALT_REV 0x00000003
+#define S_028C70_FAST_CLEAR(x) (((x) & 0x1) << 17)
+#define G_028C70_FAST_CLEAR(x) (((x) >> 17) & 0x1)
+#define C_028C70_FAST_CLEAR 0xFFFDFFFF
+#define S_028C70_COMPRESSION(x) (((x) & 0x3) << 18)
+#define G_028C70_COMPRESSION(x) (((x) >> 18) & 0x3)
+#define C_028C70_COMPRESSION 0xFFF3FFFF
+#define S_028C70_BLEND_CLAMP(x) (((x) & 0x1) << 19)
+#define G_028C70_BLEND_CLAMP(x) (((x) >> 19) & 0x1)
+#define C_028C70_BLEND_CLAMP 0xFFF7FFFF
+#define S_028C70_BLEND_BYPASS(x) (((x) & 0x1) << 20)
+#define G_028C70_BLEND_BYPASS(x) (((x) >> 20) & 0x1)
+#define C_028C70_BLEND_BYPASS 0xFFEFFFFF
+#define S_028C70_SIMPLE_FLOAT(x) (((x) & 0x1) << 21)
+#define G_028C70_SIMPLE_FLOAT(x) (((x) >> 21) & 0x1)
+#define C_028C70_SIMPLE_FLOAT 0xFFDFFFFF
+#define S_028C70_ROUND_MODE(x) (((x) & 0x1) << 22)
+#define G_028C70_ROUND_MODE(x) (((x) >> 22) & 0x1)
+#define C_028C70_ROUND_MODE 0xFFBFFFFF
+#define S_028C70_TILE_COMPACT(x) (((x) & 0x1) << 23)
+#define G_028C70_TILE_COMPACT(x) (((x) >> 23) & 0x1)
+#define C_028C70_TILE_COMPACT 0xFF7FFFFF
+#define S_028C70_SOURCE_FORMAT(x) (((x) & 0x3) << 24)
+#define G_028C70_SOURCE_FORMAT(x) (((x) >> 24) & 0x3)
+#define C_028C70_SOURCE_FORMAT 0xFCFFFFFF
+#define V_028C70_EXPORT_4C_32BPC 0x0
+#define V_028C70_EXPORT_4C_16BPC 0x1
+#define V_028C70_EXPORT_2C_32BPC 0x2 /* Do not use */
+#define S_028C70_RAT(x) (((x) & 0x1) << 26)
+#define G_028C70_RAT(x) (((x) >> 26) & 0x1)
+#define C_028C70_RAT 0xFBFFFFFF
+#define S_028C70_RESOURCE_TYPE(x) (((x) & 0x7) << 27)
+#define G_028C70_RESOURCE_TYPE(x) (((x) >> 27) & 0x7)
+#define C_028C70_RESOURCE_TYPE 0xC7FFFFFF
+
+#define CB_COLOR0_INFO 0x28c70
+# define CB_FORMAT(x) ((x) << 2)
+# define CB_ARRAY_MODE(x) ((x) << 8)
+# define ARRAY_LINEAR_GENERAL 0
+# define ARRAY_LINEAR_ALIGNED 1
+# define ARRAY_1D_TILED_THIN1 2
+# define ARRAY_2D_TILED_THIN1 4
+# define CB_SOURCE_FORMAT(x) ((x) << 24)
+# define CB_SF_EXPORT_FULL 0
+# define CB_SF_EXPORT_NORM 1
+#define R_028C74_CB_COLOR0_ATTRIB 0x028C74
+#define S_028C74_NON_DISP_TILING_ORDER(x) (((x) & 0x1) << 4)
+#define G_028C74_NON_DISP_TILING_ORDER(x) (((x) >> 4) & 0x1)
+#define C_028C74_NON_DISP_TILING_ORDER 0xFFFFFFEF
+#define S_028C74_TILE_SPLIT(x) (((x) & 0xf) << 5)
+#define G_028C74_TILE_SPLIT(x) (((x) >> 5) & 0xf)
+#define S_028C74_NUM_BANKS(x) (((x) & 0x3) << 10)
+#define G_028C74_NUM_BANKS(x) (((x) >> 10) & 0x3)
+#define S_028C74_BANK_WIDTH(x) (((x) & 0x3) << 13)
+#define G_028C74_BANK_WIDTH(x) (((x) >> 13) & 0x3)
+#define S_028C74_BANK_HEIGHT(x) (((x) & 0x3) << 16)
+#define G_028C74_BANK_HEIGHT(x) (((x) >> 16) & 0x3)
+#define S_028C74_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 19)
+#define G_028C74_MACRO_TILE_ASPECT(x) (((x) >> 19) & 0x3)
+#define CB_COLOR0_ATTRIB 0x28c74
+# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5)
+# define ADDR_SURF_TILE_SPLIT_64B 0
+# define ADDR_SURF_TILE_SPLIT_128B 1
+# define ADDR_SURF_TILE_SPLIT_256B 2
+# define ADDR_SURF_TILE_SPLIT_512B 3
+# define ADDR_SURF_TILE_SPLIT_1KB 4
+# define ADDR_SURF_TILE_SPLIT_2KB 5
+# define ADDR_SURF_TILE_SPLIT_4KB 6
+# define CB_NUM_BANKS(x) (((x) & 0x3) << 10)
+# define ADDR_SURF_2_BANK 0
+# define ADDR_SURF_4_BANK 1
+# define ADDR_SURF_8_BANK 2
+# define ADDR_SURF_16_BANK 3
+# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13)
+# define ADDR_SURF_BANK_WIDTH_1 0
+# define ADDR_SURF_BANK_WIDTH_2 1
+# define ADDR_SURF_BANK_WIDTH_4 2
+# define ADDR_SURF_BANK_WIDTH_8 3
+# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16)
+# define ADDR_SURF_BANK_HEIGHT_1 0
+# define ADDR_SURF_BANK_HEIGHT_2 1
+# define ADDR_SURF_BANK_HEIGHT_4 2
+# define ADDR_SURF_BANK_HEIGHT_8 3
+# define CB_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 19)
+#define CB_COLOR0_DIM 0x28c78
+/* only CB0-7 blocks have these regs */
+#define CB_COLOR0_CMASK 0x28c7c
+#define CB_COLOR0_CMASK_SLICE 0x28c80
+#define CB_COLOR0_FMASK 0x28c84
+#define CB_COLOR0_FMASK_SLICE 0x28c88
+#define CB_COLOR0_CLEAR_WORD0 0x28c8c
+#define CB_COLOR0_CLEAR_WORD1 0x28c90
+#define CB_COLOR0_CLEAR_WORD2 0x28c94
+#define CB_COLOR0_CLEAR_WORD3 0x28c98
+
+#define CB_COLOR1_BASE 0x28c9c
+#define CB_COLOR2_BASE 0x28cd8
+#define CB_COLOR3_BASE 0x28d14
+#define CB_COLOR4_BASE 0x28d50
+#define CB_COLOR5_BASE 0x28d8c
+#define CB_COLOR6_BASE 0x28dc8
+#define CB_COLOR7_BASE 0x28e04
+#define CB_COLOR8_BASE 0x28e40
+#define CB_COLOR9_BASE 0x28e5c
+#define CB_COLOR10_BASE 0x28e78
+#define CB_COLOR11_BASE 0x28e94
+
+#define CB_COLOR1_PITCH 0x28ca0
+#define CB_COLOR2_PITCH 0x28cdc
+#define CB_COLOR3_PITCH 0x28d18
+#define CB_COLOR4_PITCH 0x28d54
+#define CB_COLOR5_PITCH 0x28d90
+#define CB_COLOR6_PITCH 0x28dcc
+#define CB_COLOR7_PITCH 0x28e08
+#define CB_COLOR8_PITCH 0x28e44
+#define CB_COLOR9_PITCH 0x28e60
+#define CB_COLOR10_PITCH 0x28e7c
+#define CB_COLOR11_PITCH 0x28e98
+
+#define CB_COLOR1_SLICE 0x28ca4
+#define CB_COLOR2_SLICE 0x28ce0
+#define CB_COLOR3_SLICE 0x28d1c
+#define CB_COLOR4_SLICE 0x28d58
+#define CB_COLOR5_SLICE 0x28d94
+#define CB_COLOR6_SLICE 0x28dd0
+#define CB_COLOR7_SLICE 0x28e0c
+#define CB_COLOR8_SLICE 0x28e48
+#define CB_COLOR9_SLICE 0x28e64
+#define CB_COLOR10_SLICE 0x28e80
+#define CB_COLOR11_SLICE 0x28e9c
+
+#define CB_COLOR1_VIEW 0x28ca8
+#define CB_COLOR2_VIEW 0x28ce4
+#define CB_COLOR3_VIEW 0x28d20
+#define CB_COLOR4_VIEW 0x28d5c
+#define CB_COLOR5_VIEW 0x28d98
+#define CB_COLOR6_VIEW 0x28dd4
+#define CB_COLOR7_VIEW 0x28e10
+#define CB_COLOR8_VIEW 0x28e4c
+#define CB_COLOR9_VIEW 0x28e68
+#define CB_COLOR10_VIEW 0x28e84
+#define CB_COLOR11_VIEW 0x28ea0
+
+#define CB_COLOR1_INFO 0x28cac
+#define CB_COLOR2_INFO 0x28ce8
+#define CB_COLOR3_INFO 0x28d24
+#define CB_COLOR4_INFO 0x28d60
+#define CB_COLOR5_INFO 0x28d9c
+#define CB_COLOR6_INFO 0x28dd8
+#define CB_COLOR7_INFO 0x28e14
+#define CB_COLOR8_INFO 0x28e50
+#define CB_COLOR9_INFO 0x28e6c
+#define CB_COLOR10_INFO 0x28e88
+#define CB_COLOR11_INFO 0x28ea4
+
+#define CB_COLOR1_ATTRIB 0x28cb0
+#define CB_COLOR2_ATTRIB 0x28cec
+#define CB_COLOR3_ATTRIB 0x28d28
+#define CB_COLOR4_ATTRIB 0x28d64
+#define CB_COLOR5_ATTRIB 0x28da0
+#define CB_COLOR6_ATTRIB 0x28ddc
+#define CB_COLOR7_ATTRIB 0x28e18
+#define CB_COLOR8_ATTRIB 0x28e54
+#define CB_COLOR9_ATTRIB 0x28e70
+#define CB_COLOR10_ATTRIB 0x28e8c
+#define CB_COLOR11_ATTRIB 0x28ea8
+
+#define CB_COLOR1_DIM 0x28cb4
+#define CB_COLOR2_DIM 0x28cf0
+#define CB_COLOR3_DIM 0x28d2c
+#define CB_COLOR4_DIM 0x28d68
+#define CB_COLOR5_DIM 0x28da4
+#define CB_COLOR6_DIM 0x28de0
+#define CB_COLOR7_DIM 0x28e1c
+#define CB_COLOR8_DIM 0x28e58
+#define CB_COLOR9_DIM 0x28e74
+#define CB_COLOR10_DIM 0x28e90
+#define CB_COLOR11_DIM 0x28eac
+
+#define CB_COLOR1_CMASK 0x28cb8
+#define CB_COLOR2_CMASK 0x28cf4
+#define CB_COLOR3_CMASK 0x28d30
+#define CB_COLOR4_CMASK 0x28d6c
+#define CB_COLOR5_CMASK 0x28da8
+#define CB_COLOR6_CMASK 0x28de4
+#define CB_COLOR7_CMASK 0x28e20
+
+#define CB_COLOR1_CMASK_SLICE 0x28cbc
+#define CB_COLOR2_CMASK_SLICE 0x28cf8
+#define CB_COLOR3_CMASK_SLICE 0x28d34
+#define CB_COLOR4_CMASK_SLICE 0x28d70
+#define CB_COLOR5_CMASK_SLICE 0x28dac
+#define CB_COLOR6_CMASK_SLICE 0x28de8
+#define CB_COLOR7_CMASK_SLICE 0x28e24
+
+#define CB_COLOR1_FMASK 0x28cc0
+#define CB_COLOR2_FMASK 0x28cfc
+#define CB_COLOR3_FMASK 0x28d38
+#define CB_COLOR4_FMASK 0x28d74
+#define CB_COLOR5_FMASK 0x28db0
+#define CB_COLOR6_FMASK 0x28dec
+#define CB_COLOR7_FMASK 0x28e28
+
+#define CB_COLOR1_FMASK_SLICE 0x28cc4
+#define CB_COLOR2_FMASK_SLICE 0x28d00
+#define CB_COLOR3_FMASK_SLICE 0x28d3c
+#define CB_COLOR4_FMASK_SLICE 0x28d78
+#define CB_COLOR5_FMASK_SLICE 0x28db4
+#define CB_COLOR6_FMASK_SLICE 0x28df0
+#define CB_COLOR7_FMASK_SLICE 0x28e2c
+
+#define CB_COLOR1_CLEAR_WORD0 0x28cc8
+#define CB_COLOR2_CLEAR_WORD0 0x28d04
+#define CB_COLOR3_CLEAR_WORD0 0x28d40
+#define CB_COLOR4_CLEAR_WORD0 0x28d7c
+#define CB_COLOR5_CLEAR_WORD0 0x28db8
+#define CB_COLOR6_CLEAR_WORD0 0x28df4
+#define CB_COLOR7_CLEAR_WORD0 0x28e30
+
+#define CB_COLOR1_CLEAR_WORD1 0x28ccc
+#define CB_COLOR2_CLEAR_WORD1 0x28d08
+#define CB_COLOR3_CLEAR_WORD1 0x28d44
+#define CB_COLOR4_CLEAR_WORD1 0x28d80
+#define CB_COLOR5_CLEAR_WORD1 0x28dbc
+#define CB_COLOR6_CLEAR_WORD1 0x28df8
+#define CB_COLOR7_CLEAR_WORD1 0x28e34
+
+#define CB_COLOR1_CLEAR_WORD2 0x28cd0
+#define CB_COLOR2_CLEAR_WORD2 0x28d0c
+#define CB_COLOR3_CLEAR_WORD2 0x28d48
+#define CB_COLOR4_CLEAR_WORD2 0x28d84
+#define CB_COLOR5_CLEAR_WORD2 0x28dc0
+#define CB_COLOR6_CLEAR_WORD2 0x28dfc
+#define CB_COLOR7_CLEAR_WORD2 0x28e38
+
+#define CB_COLOR1_CLEAR_WORD3 0x28cd4
+#define CB_COLOR2_CLEAR_WORD3 0x28d10
+#define CB_COLOR3_CLEAR_WORD3 0x28d4c
+#define CB_COLOR4_CLEAR_WORD3 0x28d88
+#define CB_COLOR5_CLEAR_WORD3 0x28dc4
+#define CB_COLOR6_CLEAR_WORD3 0x28e00
+#define CB_COLOR7_CLEAR_WORD3 0x28e3c
+
+#define SQ_TEX_RESOURCE_WORD0_0 0x30000
+# define TEX_DIM(x) ((x) << 0)
+# define SQ_TEX_DIM_1D 0
+# define SQ_TEX_DIM_2D 1
+# define SQ_TEX_DIM_3D 2
+# define SQ_TEX_DIM_CUBEMAP 3
+# define SQ_TEX_DIM_1D_ARRAY 4
+# define SQ_TEX_DIM_2D_ARRAY 5
+# define SQ_TEX_DIM_2D_MSAA 6
+# define SQ_TEX_DIM_2D_ARRAY_MSAA 7
+#define SQ_TEX_RESOURCE_WORD1_0 0x30004
+# define TEX_ARRAY_MODE(x) ((x) << 28)
+#define SQ_TEX_RESOURCE_WORD2_0 0x30008
+#define SQ_TEX_RESOURCE_WORD3_0 0x3000C
+#define SQ_TEX_RESOURCE_WORD4_0 0x30010
+# define TEX_DST_SEL_X(x) ((x) << 16)
+# define TEX_DST_SEL_Y(x) ((x) << 19)
+# define TEX_DST_SEL_Z(x) ((x) << 22)
+# define TEX_DST_SEL_W(x) ((x) << 25)
+# define SQ_SEL_X 0
+# define SQ_SEL_Y 1
+# define SQ_SEL_Z 2
+# define SQ_SEL_W 3
+# define SQ_SEL_0 4
+# define SQ_SEL_1 5
+#define SQ_TEX_RESOURCE_WORD5_0 0x30014
+#define SQ_TEX_RESOURCE_WORD6_0 0x30018
+# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29)
+#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
+# define MACRO_TILE_ASPECT(x) (((x) & 0x3) << 6)
+# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8)
+# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10)
+# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16)
+#define R_030000_SQ_TEX_RESOURCE_WORD0_0 0x030000
+#define S_030000_DIM(x) (((x) & 0x7) << 0)
+#define G_030000_DIM(x) (((x) >> 0) & 0x7)
+#define C_030000_DIM 0xFFFFFFF8
+#define V_030000_SQ_TEX_DIM_1D 0x00000000
+#define V_030000_SQ_TEX_DIM_2D 0x00000001
+#define V_030000_SQ_TEX_DIM_3D 0x00000002
+#define V_030000_SQ_TEX_DIM_CUBEMAP 0x00000003
+#define V_030000_SQ_TEX_DIM_1D_ARRAY 0x00000004
+#define V_030000_SQ_TEX_DIM_2D_ARRAY 0x00000005
+#define V_030000_SQ_TEX_DIM_2D_MSAA 0x00000006
+#define V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007
+#define S_030000_NON_DISP_TILING_ORDER(x) (((x) & 0x1) << 5)
+#define G_030000_NON_DISP_TILING_ORDER(x) (((x) >> 5) & 0x1)
+#define C_030000_NON_DISP_TILING_ORDER 0xFFFFFFDF
+#define S_030000_PITCH(x) (((x) & 0xFFF) << 6)
+#define G_030000_PITCH(x) (((x) >> 6) & 0xFFF)
+#define C_030000_PITCH 0xFFFC003F
+#define S_030000_TEX_WIDTH(x) (((x) & 0x3FFF) << 18)
+#define G_030000_TEX_WIDTH(x) (((x) >> 18) & 0x3FFF)
+#define C_030000_TEX_WIDTH 0x0003FFFF
+#define R_030004_SQ_TEX_RESOURCE_WORD1_0 0x030004
+#define S_030004_TEX_HEIGHT(x) (((x) & 0x3FFF) << 0)
+#define G_030004_TEX_HEIGHT(x) (((x) >> 0) & 0x3FFF)
+#define C_030004_TEX_HEIGHT 0xFFFFC000
+#define S_030004_TEX_DEPTH(x) (((x) & 0x1FFF) << 14)
+#define G_030004_TEX_DEPTH(x) (((x) >> 14) & 0x1FFF)
+#define C_030004_TEX_DEPTH 0xF8003FFF
+#define S_030004_ARRAY_MODE(x) (((x) & 0xF) << 28)
+#define G_030004_ARRAY_MODE(x) (((x) >> 28) & 0xF)
+#define C_030004_ARRAY_MODE 0x0FFFFFFF
+#define R_030008_SQ_TEX_RESOURCE_WORD2_0 0x030008
+#define S_030008_BASE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_030008_BASE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_030008_BASE_ADDRESS 0x00000000
+#define R_03000C_SQ_TEX_RESOURCE_WORD3_0 0x03000C
+#define S_03000C_MIP_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_03000C_MIP_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_03000C_MIP_ADDRESS 0x00000000
+#define R_030010_SQ_TEX_RESOURCE_WORD4_0 0x030010
+#define S_030010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
+#define G_030010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
+#define C_030010_FORMAT_COMP_X 0xFFFFFFFC
+#define V_030010_SQ_FORMAT_COMP_UNSIGNED 0x00000000
+#define V_030010_SQ_FORMAT_COMP_SIGNED 0x00000001
+#define V_030010_SQ_FORMAT_COMP_UNSIGNED_BIASED 0x00000002
+#define S_030010_FORMAT_COMP_Y(x) (((x) & 0x3) << 2)
+#define G_030010_FORMAT_COMP_Y(x) (((x) >> 2) & 0x3)
+#define C_030010_FORMAT_COMP_Y 0xFFFFFFF3
+#define S_030010_FORMAT_COMP_Z(x) (((x) & 0x3) << 4)
+#define G_030010_FORMAT_COMP_Z(x) (((x) >> 4) & 0x3)
+#define C_030010_FORMAT_COMP_Z 0xFFFFFFCF
+#define S_030010_FORMAT_COMP_W(x) (((x) & 0x3) << 6)
+#define G_030010_FORMAT_COMP_W(x) (((x) >> 6) & 0x3)
+#define C_030010_FORMAT_COMP_W 0xFFFFFF3F
+#define S_030010_NUM_FORMAT_ALL(x) (((x) & 0x3) << 8)
+#define G_030010_NUM_FORMAT_ALL(x) (((x) >> 8) & 0x3)
+#define C_030010_NUM_FORMAT_ALL 0xFFFFFCFF
+#define V_030010_SQ_NUM_FORMAT_NORM 0x00000000
+#define V_030010_SQ_NUM_FORMAT_INT 0x00000001
+#define V_030010_SQ_NUM_FORMAT_SCALED 0x00000002
+#define S_030010_SRF_MODE_ALL(x) (((x) & 0x1) << 10)
+#define G_030010_SRF_MODE_ALL(x) (((x) >> 10) & 0x1)
+#define C_030010_SRF_MODE_ALL 0xFFFFFBFF
+#define V_030010_SRF_MODE_ZERO_CLAMP_MINUS_ONE 0x00000000
+#define V_030010_SRF_MODE_NO_ZERO 0x00000001
+#define S_030010_FORCE_DEGAMMA(x) (((x) & 0x1) << 11)
+#define G_030010_FORCE_DEGAMMA(x) (((x) >> 11) & 0x1)
+#define C_030010_FORCE_DEGAMMA 0xFFFFF7FF
+#define S_030010_ENDIAN_SWAP(x) (((x) & 0x3) << 12)
+#define G_030010_ENDIAN_SWAP(x) (((x) >> 12) & 0x3)
+#define C_030010_ENDIAN_SWAP 0xFFFFCFFF
+#define S_030010_DST_SEL_X(x) (((x) & 0x7) << 16)
+#define G_030010_DST_SEL_X(x) (((x) >> 16) & 0x7)
+#define C_030010_DST_SEL_X 0xFFF8FFFF
+#define V_030010_SQ_SEL_X 0x00000000
+#define V_030010_SQ_SEL_Y 0x00000001
+#define V_030010_SQ_SEL_Z 0x00000002
+#define V_030010_SQ_SEL_W 0x00000003
+#define V_030010_SQ_SEL_0 0x00000004
+#define V_030010_SQ_SEL_1 0x00000005
+#define S_030010_DST_SEL_Y(x) (((x) & 0x7) << 19)
+#define G_030010_DST_SEL_Y(x) (((x) >> 19) & 0x7)
+#define C_030010_DST_SEL_Y 0xFFC7FFFF
+#define S_030010_DST_SEL_Z(x) (((x) & 0x7) << 22)
+#define G_030010_DST_SEL_Z(x) (((x) >> 22) & 0x7)
+#define C_030010_DST_SEL_Z 0xFE3FFFFF
+#define S_030010_DST_SEL_W(x) (((x) & 0x7) << 25)
+#define G_030010_DST_SEL_W(x) (((x) >> 25) & 0x7)
+#define C_030010_DST_SEL_W 0xF1FFFFFF
+#define S_030010_BASE_LEVEL(x) (((x) & 0xF) << 28)
+#define G_030010_BASE_LEVEL(x) (((x) >> 28) & 0xF)
+#define C_030010_BASE_LEVEL 0x0FFFFFFF
+#define R_030014_SQ_TEX_RESOURCE_WORD5_0 0x030014
+#define S_030014_LAST_LEVEL(x) (((x) & 0xF) << 0)
+#define G_030014_LAST_LEVEL(x) (((x) >> 0) & 0xF)
+#define C_030014_LAST_LEVEL 0xFFFFFFF0
+#define S_030014_BASE_ARRAY(x) (((x) & 0x1FFF) << 4)
+#define G_030014_BASE_ARRAY(x) (((x) >> 4) & 0x1FFF)
+#define C_030014_BASE_ARRAY 0xFFFE000F
+#define S_030014_LAST_ARRAY(x) (((x) & 0x1FFF) << 17)
+#define G_030014_LAST_ARRAY(x) (((x) >> 17) & 0x1FFF)
+#define C_030014_LAST_ARRAY 0xC001FFFF
+#define R_030018_SQ_TEX_RESOURCE_WORD6_0 0x030018
+#define S_030018_MAX_ANISO(x) (((x) & 0x7) << 0)
+#define G_030018_MAX_ANISO(x) (((x) >> 0) & 0x7)
+#define C_030018_MAX_ANISO 0xFFFFFFF8
+#define S_030018_PERF_MODULATION(x) (((x) & 0x7) << 3)
+#define G_030018_PERF_MODULATION(x) (((x) >> 3) & 0x7)
+#define C_030018_PERF_MODULATION 0xFFFFFFC7
+#define S_030018_INTERLACED(x) (((x) & 0x1) << 6)
+#define G_030018_INTERLACED(x) (((x) >> 6) & 0x1)
+#define C_030018_INTERLACED 0xFFFFFFBF
+#define S_030018_TILE_SPLIT(x) (((x) & 0x7) << 29)
+#define G_030018_TILE_SPLIT(x) (((x) >> 29) & 0x7)
+#define R_03001C_SQ_TEX_RESOURCE_WORD7_0 0x03001C
+#define S_03001C_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 6)
+#define G_03001C_MACRO_TILE_ASPECT(x) (((x) >> 6) & 0x3)
+#define S_03001C_BANK_WIDTH(x) (((x) & 0x3) << 8)
+#define G_03001C_BANK_WIDTH(x) (((x) >> 8) & 0x3)
+#define S_03001C_BANK_HEIGHT(x) (((x) & 0x3) << 10)
+#define G_03001C_BANK_HEIGHT(x) (((x) >> 10) & 0x3)
+#define S_03001C_NUM_BANKS(x) (((x) & 0x3) << 16)
+#define G_03001C_NUM_BANKS(x) (((x) >> 16) & 0x3)
+#define S_03001C_TYPE(x) (((x) & 0x3) << 30)
+#define G_03001C_TYPE(x) (((x) >> 30) & 0x3)
+#define C_03001C_TYPE 0x3FFFFFFF
+#define V_03001C_SQ_TEX_VTX_INVALID_TEXTURE 0x00000000
+#define V_03001C_SQ_TEX_VTX_INVALID_BUFFER 0x00000001
+#define V_03001C_SQ_TEX_VTX_VALID_TEXTURE 0x00000002
+#define V_03001C_SQ_TEX_VTX_VALID_BUFFER 0x00000003
+#define S_03001C_DATA_FORMAT(x) (((x) & 0x3F) << 0)
+#define G_03001C_DATA_FORMAT(x) (((x) >> 0) & 0x3F)
+#define C_03001C_DATA_FORMAT 0xFFFFFFC0
+
+#define SQ_VTX_CONSTANT_WORD0_0 0x30000
+#define SQ_VTX_CONSTANT_WORD1_0 0x30004
+#define SQ_VTX_CONSTANT_WORD2_0 0x30008
+# define SQ_VTXC_BASE_ADDR_HI(x) ((x) << 0)
+# define SQ_VTXC_STRIDE(x) ((x) << 8)
+# define SQ_VTXC_ENDIAN_SWAP(x) ((x) << 30)
+# define SQ_ENDIAN_NONE 0
+# define SQ_ENDIAN_8IN16 1
+# define SQ_ENDIAN_8IN32 2
+#define SQ_VTX_CONSTANT_WORD3_0 0x3000C
+# define SQ_VTCX_SEL_X(x) ((x) << 3)
+# define SQ_VTCX_SEL_Y(x) ((x) << 6)
+# define SQ_VTCX_SEL_Z(x) ((x) << 9)
+# define SQ_VTCX_SEL_W(x) ((x) << 12)
+#define SQ_VTX_CONSTANT_WORD4_0 0x30010
+#define SQ_VTX_CONSTANT_WORD5_0 0x30014
+#define SQ_VTX_CONSTANT_WORD6_0 0x30018
+#define SQ_VTX_CONSTANT_WORD7_0 0x3001c
+
+#define TD_PS_BORDER_COLOR_INDEX 0xA400
+#define TD_PS_BORDER_COLOR_RED 0xA404
+#define TD_PS_BORDER_COLOR_GREEN 0xA408
+#define TD_PS_BORDER_COLOR_BLUE 0xA40C
+#define TD_PS_BORDER_COLOR_ALPHA 0xA410
+#define TD_VS_BORDER_COLOR_INDEX 0xA414
+#define TD_VS_BORDER_COLOR_RED 0xA418
+#define TD_VS_BORDER_COLOR_GREEN 0xA41C
+#define TD_VS_BORDER_COLOR_BLUE 0xA420
+#define TD_VS_BORDER_COLOR_ALPHA 0xA424
+#define TD_GS_BORDER_COLOR_INDEX 0xA428
+#define TD_GS_BORDER_COLOR_RED 0xA42C
+#define TD_GS_BORDER_COLOR_GREEN 0xA430
+#define TD_GS_BORDER_COLOR_BLUE 0xA434
+#define TD_GS_BORDER_COLOR_ALPHA 0xA438
+#define TD_HS_BORDER_COLOR_INDEX 0xA43C
+#define TD_HS_BORDER_COLOR_RED 0xA440
+#define TD_HS_BORDER_COLOR_GREEN 0xA444
+#define TD_HS_BORDER_COLOR_BLUE 0xA448
+#define TD_HS_BORDER_COLOR_ALPHA 0xA44C
+#define TD_LS_BORDER_COLOR_INDEX 0xA450
+#define TD_LS_BORDER_COLOR_RED 0xA454
+#define TD_LS_BORDER_COLOR_GREEN 0xA458
+#define TD_LS_BORDER_COLOR_BLUE 0xA45C
+#define TD_LS_BORDER_COLOR_ALPHA 0xA460
+#define TD_CS_BORDER_COLOR_INDEX 0xA464
+#define TD_CS_BORDER_COLOR_RED 0xA468
+#define TD_CS_BORDER_COLOR_GREEN 0xA46C
+#define TD_CS_BORDER_COLOR_BLUE 0xA470
+#define TD_CS_BORDER_COLOR_ALPHA 0xA474
+
+/* cayman 3D regs */
+#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B4
+#define CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS 0x8E48
+#define CAYMAN_DB_EQAA 0x28804
+#define CAYMAN_DB_DEPTH_INFO 0x2803C
+#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0
+#define CAYMAN_MSAA_NUM_SAMPLES_SHIFT 0
+#define CAYMAN_MSAA_NUM_SAMPLES_MASK 0x7
+#define CAYMAN_SX_SCATTER_EXPORT_BASE 0x28358
+/* cayman packet3 addition */
+#define CAYMAN_PACKET3_DEALLOC_STATE 0x14
+
+/* DMA regs common on r6xx/r7xx/evergreen/ni */
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
new file mode 100644
index 0000000..5a82b6b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -0,0 +1,725 @@
+/* utility to create the register check tables
+ * this includes inlined list.h safe for userspace.
+ *
+ * Copyright 2009 Jerome Glisse
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Authors:
+ * Jerome Glisse
+ * Dave Airlie
+ */
+
+#include <sys/types.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <regex.h>
+#include <libgen.h>
+
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({ \
+ const typeof(((type *)0)->member)*__mptr = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+/*
+ * Simple doubly linked list implementation.
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+static inline void INIT_LIST_HEAD(struct list_head *list)
+{
+ list->next = list;
+ list->prev = list;
+}
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+#ifndef CONFIG_DEBUG_LIST
+static inline void __list_add(struct list_head *new,
+ struct list_head *prev, struct list_head *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+#else
+extern void __list_add(struct list_head *new,
+ struct list_head *prev, struct list_head *next);
+#endif
+
+/**
+ * list_add - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
+static inline void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+/**
+ * list_add_tail - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static inline void list_add_tail(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head->prev, head);
+}
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_del(struct list_head *prev, struct list_head *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty() on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
+#ifndef CONFIG_DEBUG_LIST
+static inline void list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->next = (void *)0xDEADBEEF;
+ entry->prev = (void *)0xBEEFDEAD;
+}
+#else
+extern void list_del(struct list_head *entry);
+#endif
+
+/**
+ * list_replace - replace old entry by new one
+ * @old : the element to be replaced
+ * @new : the new element to insert
+ *
+ * If @old was empty, it will be overwritten.
+ */
+static inline void list_replace(struct list_head *old, struct list_head *new)
+{
+ new->next = old->next;
+ new->next->prev = new;
+ new->prev = old->prev;
+ new->prev->next = new;
+}
+
+static inline void list_replace_init(struct list_head *old,
+ struct list_head *new)
+{
+ list_replace(old, new);
+ INIT_LIST_HEAD(old);
+}
+
+/**
+ * list_del_init - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ */
+static inline void list_del_init(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ INIT_LIST_HEAD(entry);
+}
+
+/**
+ * list_move - delete from one list and add as another's head
+ * @list: the entry to move
+ * @head: the head that will precede our entry
+ */
+static inline void list_move(struct list_head *list, struct list_head *head)
+{
+ __list_del(list->prev, list->next);
+ list_add(list, head);
+}
+
+/**
+ * list_move_tail - delete from one list and add as another's tail
+ * @list: the entry to move
+ * @head: the head that will follow our entry
+ */
+static inline void list_move_tail(struct list_head *list,
+ struct list_head *head)
+{
+ __list_del(list->prev, list->next);
+ list_add_tail(list, head);
+}
+
+/**
+ * list_is_last - tests whether @list is the last entry in list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_last(const struct list_head *list,
+ const struct list_head *head)
+{
+ return list->next == head;
+}
+
+/**
+ * list_empty - tests whether a list is empty
+ * @head: the list to test.
+ */
+static inline int list_empty(const struct list_head *head)
+{
+ return head->next == head;
+}
+
+/**
+ * list_empty_careful - tests whether a list is empty and not being modified
+ * @head: the list to test
+ *
+ * Description:
+ * tests whether a list is empty _and_ checks that no other CPU might be
+ * in the process of modifying either member (next or prev)
+ *
+ * NOTE: using list_empty_careful() without synchronization
+ * can only be safe if the only activity that can happen
+ * to the list entry is list_del_init(). Eg. it cannot be used
+ * if another CPU could re-list_add() it.
+ */
+static inline int list_empty_careful(const struct list_head *head)
+{
+ struct list_head *next = head->next;
+ return (next == head) && (next == head->prev);
+}
+
+/**
+ * list_is_singular - tests whether a list has just one entry.
+ * @head: the list to test.
+ */
+static inline int list_is_singular(const struct list_head *head)
+{
+ return !list_empty(head) && (head->next == head->prev);
+}
+
+static inline void __list_cut_position(struct list_head *list,
+ struct list_head *head,
+ struct list_head *entry)
+{
+ struct list_head *new_first = entry->next;
+ list->next = head->next;
+ list->next->prev = list;
+ list->prev = entry;
+ entry->next = list;
+ head->next = new_first;
+ new_first->prev = head;
+}
+
+/**
+ * list_cut_position - cut a list into two
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ * and if so we won't cut the list
+ *
+ * This helper moves the initial part of @head, up to and
+ * including @entry, from @head to @list. You should
+ * pass on @entry an element you know is on @head. @list
+ * should be an empty list or a list you do not care about
+ * losing its data.
+ *
+ */
+static inline void list_cut_position(struct list_head *list,
+ struct list_head *head,
+ struct list_head *entry)
+{
+ if (list_empty(head))
+ return;
+ if (list_is_singular(head) && (head->next != entry && head != entry))
+ return;
+ if (entry == head)
+ INIT_LIST_HEAD(list);
+ else
+ __list_cut_position(list, head, entry);
+}
+
+static inline void __list_splice(const struct list_head *list,
+ struct list_head *prev, struct list_head *next)
+{
+ struct list_head *first = list->next;
+ struct list_head *last = list->prev;
+
+ first->prev = prev;
+ prev->next = first;
+
+ last->next = next;
+ next->prev = last;
+}
+
+/**
+ * list_splice - join two lists, this is designed for stacks
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static inline void list_splice(const struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list))
+ __list_splice(list, head, head->next);
+}
+
+/**
+ * list_splice_tail - join two lists, each list being a queue
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static inline void list_splice_tail(struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list))
+ __list_splice(list, head->prev, head);
+}
+
+/**
+ * list_splice_init - join two lists and reinitialise the emptied list.
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ *
+ * The list at @list is reinitialised
+ */
+static inline void list_splice_init(struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list)) {
+ __list_splice(list, head, head->next);
+ INIT_LIST_HEAD(list);
+ }
+}
+
+/**
+ * list_splice_tail_init - join two lists and reinitialise the emptied list
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ *
+ * Each of the lists is a queue.
+ * The list at @list is reinitialised
+ */
+static inline void list_splice_tail_init(struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list)) {
+ __list_splice(list, head->prev, head);
+ INIT_LIST_HEAD(list);
+ }
+}
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+/**
+ * list_first_entry - get the first element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ *
+ * Note, that list is expected to be not empty.
+ */
+#define list_first_entry(ptr, type, member) \
+ list_entry((ptr)->next, type, member)
+
+/**
+ * list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; prefetch(pos->next), pos != (head); \
+ pos = pos->next)
+
+/**
+ * __list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ *
+ * This variant differs from list_for_each() in that it's the
+ * simplest possible list iteration code, no prefetching is done.
+ * Use this for code that knows the list to be very short (empty
+ * or 1 entry) most of the time.
+ */
+#define __list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); pos = pos->next)
+
+/**
+ * list_for_each_prev - iterate over a list backwards
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each_prev(pos, head) \
+ for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
+ pos = pos->prev)
+
+/**
+ * list_for_each_safe - iterate over a list safe against removal of list entry
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @n: another &struct list_head to use as temporary storage
+ * @head: the head for your list.
+ */
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+
+/**
+ * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @n: another &struct list_head to use as temporary storage
+ * @head: the head for your list.
+ */
+#define list_for_each_prev_safe(pos, n, head) \
+ for (pos = (head)->prev, n = pos->prev; \
+ prefetch(pos->prev), pos != (head); \
+ pos = n, n = pos->prev)
+
+/**
+ * list_for_each_entry - iterate over list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry(pos, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.next, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_reverse - iterate backwards over list of given type.
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_reverse(pos, head, member) \
+ for (pos = list_entry((head)->prev, typeof(*pos), member); \
+ prefetch(pos->member.prev), &pos->member != (head); \
+ pos = list_entry(pos->member.prev, typeof(*pos), member))
+
+/**
+ * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
+ * @pos: the type * to use as a start point
+ * @head: the head of the list
+ * @member: the name of the list_struct within the struct.
+ *
+ * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
+ */
+#define list_prepare_entry(pos, head, member) \
+ ((pos) ? : list_entry(head, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_continue - continue iteration over list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ *
+ * Continue to iterate over list of given type, continuing after
+ * the current position.
+ */
+#define list_for_each_entry_continue(pos, head, member) \
+ for (pos = list_entry(pos->member.next, typeof(*pos), member); \
+ prefetch(pos->member.next), &pos->member != (head); \
+ pos = list_entry(pos->member.next, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_continue_reverse - iterate backwards from the given point
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ *
+ * Start to iterate over list of given type backwards, continuing after
+ * the current position.
+ */
+#define list_for_each_entry_continue_reverse(pos, head, member) \
+ for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
+ prefetch(pos->member.prev), &pos->member != (head); \
+ pos = list_entry(pos->member.prev, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_from - iterate over list of given type from the current point
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ *
+ * Iterate over list of given type, continuing from current position.
+ */
+#define list_for_each_entry_from(pos, head, member) \
+ for (; prefetch(pos->member.next), &pos->member != (head); \
+ pos = list_entry(pos->member.next, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member), \
+ n = list_entry(pos->member.next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+/**
+ * list_for_each_entry_safe_continue
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ *
+ * Iterate over list of given type, continuing after current point,
+ * safe against removal of list entry.
+ */
+#define list_for_each_entry_safe_continue(pos, n, head, member) \
+ for (pos = list_entry(pos->member.next, typeof(*pos), member), \
+ n = list_entry(pos->member.next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+/**
+ * list_for_each_entry_safe_from
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ *
+ * Iterate over list of given type from current point, safe against
+ * removal of list entry.
+ */
+#define list_for_each_entry_safe_from(pos, n, head, member) \
+ for (n = list_entry(pos->member.next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+/**
+ * list_for_each_entry_safe_reverse
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ *
+ * Iterate backwards over list of given type, safe against removal
+ * of list entry.
+ */
+#define list_for_each_entry_safe_reverse(pos, n, head, member) \
+ for (pos = list_entry((head)->prev, typeof(*pos), member), \
+ n = list_entry(pos->member.prev, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.prev, typeof(*n), member))
+
+struct offset {
+ struct list_head list;
+ unsigned offset;
+};
+
+struct table {
+ struct list_head offsets;
+ unsigned offset_max;
+ unsigned nentry;
+ unsigned *table;
+ char *gpu_prefix;
+};
+
+static struct offset *offset_new(unsigned o)
+{
+ struct offset *offset;
+
+ offset = (struct offset *)malloc(sizeof(struct offset));
+ if (offset) {
+ INIT_LIST_HEAD(&offset->list);
+ offset->offset = o;
+ }
+ return offset;
+}
+
+static void table_offset_add(struct table *t, struct offset *offset)
+{
+ list_add_tail(&offset->list, &t->offsets);
+}
+
+static void table_init(struct table *t)
+{
+ INIT_LIST_HEAD(&t->offsets);
+ t->offset_max = 0;
+ t->nentry = 0;
+ t->table = NULL;
+}
+
+static void table_print(struct table *t)
+{
+ unsigned nlloop, i, j, n, c, id;
+
+ nlloop = (t->nentry + 3) / 4;
+ c = t->nentry;
+ printf("static const unsigned %s_reg_safe_bm[%d] = {\n", t->gpu_prefix,
+ t->nentry);
+ for (i = 0, id = 0; i < nlloop; i++) {
+ n = 4;
+ if (n > c)
+ n = c;
+ c -= n;
+ for (j = 0; j < n; j++) {
+ if (j == 0)
+ printf("\t");
+ else
+ printf(" ");
+ printf("0x%08X,", t->table[id++]);
+ }
+ printf("\n");
+ }
+ printf("};\n");
+}
+
+static int table_build(struct table *t)
+{
+ struct offset *offset;
+ unsigned i, m;
+
+ t->nentry = ((t->offset_max >> 2) + 31) / 32;
+ t->table = (unsigned *)malloc(sizeof(unsigned) * t->nentry);
+ if (t->table == NULL)
+ return -1;
+ memset(t->table, 0xff, sizeof(unsigned) * t->nentry);
+ list_for_each_entry(offset, &t->offsets, list) {
+ i = (offset->offset >> 2) / 32;
+ m = (offset->offset >> 2) & 31;
+ m = 1 << m;
+ t->table[i] ^= m;
+ }
+ return 0;
+}
+
+static char gpu_name[10];
+static int parser_auth(struct table *t, const char *filename)
+{
+ FILE *file;
+ regex_t mask_rex;
+ regmatch_t match[4];
+ char buf[1024];
+ size_t end;
+ int len;
+ int done = 0;
+ int r;
+ unsigned o;
+ struct offset *offset;
+ char last_reg_s[10];
+ int last_reg;
+
+ if (regcomp
+ (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
+ fprintf(stderr, "Failed to compile regular expression\n");
+ return -1;
+ }
+ file = fopen(filename, "r");
+ if (file == NULL) {
+ fprintf(stderr, "Failed to open: %s\n", filename);
+ return -1;
+ }
+ fseek(file, 0, SEEK_END);
+ end = ftell(file);
+ fseek(file, 0, SEEK_SET);
+
+ /* get header */
+ if (fgets(buf, 1024, file) == NULL) {
+ fclose(file);
+ return -1;
+ }
+
+ /* first line will contain the last register
+ * and gpu name */
+ sscanf(buf, "%s %s", gpu_name, last_reg_s);
+ t->gpu_prefix = gpu_name;
+ last_reg = strtol(last_reg_s, NULL, 16);
+
+ do {
+ if (fgets(buf, 1024, file) == NULL) {
+ fclose(file);
+ return -1;
+ }
+ len = strlen(buf);
+ if (ftell(file) == end)
+ done = 1;
+ if (len) {
+ r = regexec(&mask_rex, buf, 4, match, 0);
+ if (r == REG_NOMATCH) {
+ } else if (r) {
+ fprintf(stderr,
+ "Error matching regular expression %d in %s\n",
+ r, filename);
+ fclose(file);
+ return -1;
+ } else {
+ buf[match[0].rm_eo] = 0;
+ buf[match[1].rm_eo] = 0;
+ buf[match[2].rm_eo] = 0;
+ o = strtol(&buf[match[1].rm_so], NULL, 16);
+ offset = offset_new(o);
+ table_offset_add(t, offset);
+ if (o > t->offset_max)
+ t->offset_max = o;
+ }
+ }
+ } while (!done);
+ fclose(file);
+ if (t->offset_max < last_reg)
+ t->offset_max = last_reg;
+ return table_build(t);
+}
+
+int main(int argc, char *argv[])
+{
+ struct table t;
+
+ if (argc != 2) {
+ fprintf(stderr, "Usage: %s <authfile>\n", argv[0]);
+ exit(1);
+ }
+ table_init(&t);
+ if (parser_auth(&t, argv[1])) {
+ fprintf(stderr, "Failed to parse file %s\n", argv[1]);
+ return -1;
+ }
+ table_print(&t);
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
new file mode 100644
index 0000000..3bf43a1
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -0,0 +1,2479 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include <drm/radeon_drm.h>
+#include "nid.h"
+#include "atom.h"
+#include "ni_reg.h"
+#include "cayman_blit_shaders.h"
+
+extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
+extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+extern void evergreen_mc_program(struct radeon_device *rdev);
+extern void evergreen_irq_suspend(struct radeon_device *rdev);
+extern int evergreen_mc_init(struct radeon_device *rdev);
+extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
+extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+extern void si_rlc_fini(struct radeon_device *rdev);
+extern int si_rlc_init(struct radeon_device *rdev);
+
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
+#define BTC_MC_UCODE_SIZE 6024
+
+#define CAYMAN_PFP_UCODE_SIZE 2176
+#define CAYMAN_PM4_UCODE_SIZE 2176
+#define CAYMAN_RLC_UCODE_SIZE 1024
+#define CAYMAN_MC_UCODE_SIZE 6037
+
+#define ARUBA_RLC_UCODE_SIZE 1536
+
+/* Firmware Names */
+MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
+MODULE_FIRMWARE("radeon/BARTS_me.bin");
+MODULE_FIRMWARE("radeon/BARTS_mc.bin");
+MODULE_FIRMWARE("radeon/BTC_rlc.bin");
+MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
+MODULE_FIRMWARE("radeon/TURKS_me.bin");
+MODULE_FIRMWARE("radeon/TURKS_mc.bin");
+MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
+MODULE_FIRMWARE("radeon/CAICOS_me.bin");
+MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
+MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
+MODULE_FIRMWARE("radeon/ARUBA_me.bin");
+MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
+
+
+static const u32 cayman_golden_registers2[] =
+{
+ 0x3e5c, 0xffffffff, 0x00000000,
+ 0x3e48, 0xffffffff, 0x00000000,
+ 0x3e4c, 0xffffffff, 0x00000000,
+ 0x3e64, 0xffffffff, 0x00000000,
+ 0x3e50, 0xffffffff, 0x00000000,
+ 0x3e60, 0xffffffff, 0x00000000
+};
+
+static const u32 cayman_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0x8f311ff1, 0x001000f0,
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0xbd4, 0x70073777, 0x00011003,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x73773777, 0x02011003,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x98f8, 0x33773777, 0x02011003,
+ 0x98fc, 0xffffffff, 0x76541032,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2f48, 0x33773777, 0x42010001,
+ 0x6b28, 0x00000010, 0x00000012,
+ 0x7728, 0x00000010, 0x00000012,
+ 0x10328, 0x00000010, 0x00000012,
+ 0x10f28, 0x00000010, 0x00000012,
+ 0x11b28, 0x00000010, 0x00000012,
+ 0x12728, 0x00000010, 0x00000012,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x10c, 0x00000001, 0x00010003,
+ 0xa02c, 0xffffffff, 0x0000009b,
+ 0x913c, 0x0000010f, 0x01000100,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0x3700001f, 0x00000002,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0x001f3ae3, 0x00000082,
+ 0x88d0, 0xffffffff, 0x0f40df40,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 dvst_golden_registers2[] =
+{
+ 0x8f8, 0xffffffff, 0,
+ 0x8fc, 0x00380000, 0,
+ 0x8f8, 0xffffffff, 1,
+ 0x8fc, 0x0e000000, 0
+};
+
+static const u32 dvst_golden_registers[] =
+{
+ 0x690, 0x3fff3fff, 0x20c00033,
+ 0x918c, 0x0fff0fff, 0x00010006,
+ 0x91a8, 0x0fff0fff, 0x00010006,
+ 0x9150, 0xffffdfff, 0x6e944040,
+ 0x917c, 0x0fff0fff, 0x00030002,
+ 0x9198, 0x0fff0fff, 0x00030002,
+ 0x915c, 0x0fff0fff, 0x00010000,
+ 0x3f90, 0xffff0001, 0xff000000,
+ 0x9178, 0x0fff0fff, 0x00070000,
+ 0x9194, 0x0fff0fff, 0x00070000,
+ 0x9148, 0xffff0001, 0xff000000,
+ 0x9190, 0x0fff0fff, 0x00090008,
+ 0x91ac, 0x0fff0fff, 0x00090008,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x929c, 0x00000fff, 0x00000001,
+ 0x55e4, 0xff607fff, 0xfc000100,
+ 0x8a18, 0xff000fff, 0x00000100,
+ 0x8b28, 0xff000fff, 0x00000100,
+ 0x9144, 0xfffc0fff, 0x00000100,
+ 0x6ed8, 0x00010101, 0x00010000,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0xfffffffe, 0x00000000,
+ 0xd0c0, 0xff000fff, 0x00000100,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x73773777, 0x12010001,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x98f8, 0x73773777, 0x12010001,
+ 0x98fc, 0xffffffff, 0x00000010,
+ 0x9b7c, 0x00ff0000, 0x00fc0000,
+ 0x8030, 0x00001f0f, 0x0000100a,
+ 0x2f48, 0x73773777, 0x12010001,
+ 0x2408, 0x00030000, 0x000c007f,
+ 0x8a14, 0xf000003f, 0x00000007,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x4d8, 0x00000fff, 0x00000100,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x913c, 0xffff03ff, 0x01000100,
+ 0x8c00, 0x000000ff, 0x00000003,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8cf0, 0x1fff1fff, 0x08e00410,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0xf700071f, 0x00000002,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x20ef8, 0x01ff01ff, 0x00000002,
+ 0x20e98, 0xfffffbff, 0x00200000,
+ 0x2015c, 0xffffffff, 0x00000f40,
+ 0x88c4, 0x001f3ae3, 0x00000082,
+ 0x8978, 0x3fffffff, 0x04050140,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 scrapper_golden_registers[] =
+{
+ 0x690, 0x3fff3fff, 0x20c00033,
+ 0x918c, 0x0fff0fff, 0x00010006,
+ 0x918c, 0x0fff0fff, 0x00010006,
+ 0x91a8, 0x0fff0fff, 0x00010006,
+ 0x91a8, 0x0fff0fff, 0x00010006,
+ 0x9150, 0xffffdfff, 0x6e944040,
+ 0x9150, 0xffffdfff, 0x6e944040,
+ 0x917c, 0x0fff0fff, 0x00030002,
+ 0x917c, 0x0fff0fff, 0x00030002,
+ 0x9198, 0x0fff0fff, 0x00030002,
+ 0x9198, 0x0fff0fff, 0x00030002,
+ 0x915c, 0x0fff0fff, 0x00010000,
+ 0x915c, 0x0fff0fff, 0x00010000,
+ 0x3f90, 0xffff0001, 0xff000000,
+ 0x3f90, 0xffff0001, 0xff000000,
+ 0x9178, 0x0fff0fff, 0x00070000,
+ 0x9178, 0x0fff0fff, 0x00070000,
+ 0x9194, 0x0fff0fff, 0x00070000,
+ 0x9194, 0x0fff0fff, 0x00070000,
+ 0x9148, 0xffff0001, 0xff000000,
+ 0x9148, 0xffff0001, 0xff000000,
+ 0x9190, 0x0fff0fff, 0x00090008,
+ 0x9190, 0x0fff0fff, 0x00090008,
+ 0x91ac, 0x0fff0fff, 0x00090008,
+ 0x91ac, 0x0fff0fff, 0x00090008,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x929c, 0x00000fff, 0x00000001,
+ 0x929c, 0x00000fff, 0x00000001,
+ 0x55e4, 0xff607fff, 0xfc000100,
+ 0x8a18, 0xff000fff, 0x00000100,
+ 0x8a18, 0xff000fff, 0x00000100,
+ 0x8b28, 0xff000fff, 0x00000100,
+ 0x8b28, 0xff000fff, 0x00000100,
+ 0x9144, 0xfffc0fff, 0x00000100,
+ 0x9144, 0xfffc0fff, 0x00000100,
+ 0x6ed8, 0x00010101, 0x00010000,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0xfffffffe, 0x00000000,
+ 0x9838, 0xfffffffe, 0x00000000,
+ 0xd0c0, 0xff000fff, 0x00000100,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x73773777, 0x12010001,
+ 0xd0b8, 0x73773777, 0x12010001,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x98f8, 0x73773777, 0x12010001,
+ 0x98f8, 0x73773777, 0x12010001,
+ 0x98fc, 0xffffffff, 0x00000010,
+ 0x98fc, 0xffffffff, 0x00000010,
+ 0x9b7c, 0x00ff0000, 0x00fc0000,
+ 0x9b7c, 0x00ff0000, 0x00fc0000,
+ 0x8030, 0x00001f0f, 0x0000100a,
+ 0x8030, 0x00001f0f, 0x0000100a,
+ 0x2f48, 0x73773777, 0x12010001,
+ 0x2f48, 0x73773777, 0x12010001,
+ 0x2408, 0x00030000, 0x000c007f,
+ 0x8a14, 0xf000003f, 0x00000007,
+ 0x8a14, 0xf000003f, 0x00000007,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x4d8, 0x00000fff, 0x00000100,
+ 0x4d8, 0x00000fff, 0x00000100,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x913c, 0xffff03ff, 0x01000100,
+ 0x913c, 0xffff03ff, 0x01000100,
+ 0x90e8, 0x001fffff, 0x010400c0,
+ 0x8c00, 0x000000ff, 0x00000003,
+ 0x8c00, 0x000000ff, 0x00000003,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8c30, 0x0000000f, 0x00040005,
+ 0x8cf0, 0x1fff1fff, 0x08e00410,
+ 0x8cf0, 0x1fff1fff, 0x08e00410,
+ 0x900c, 0x00ffffff, 0x0017071f,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0xf700071f, 0x00000002,
+ 0x9508, 0xf700071f, 0x00000002,
+ 0x9688, 0x00300000, 0x0017000f,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x20ef8, 0x01ff01ff, 0x00000002,
+ 0x20e98, 0xfffffbff, 0x00200000,
+ 0x2015c, 0xffffffff, 0x00000f40,
+ 0x88c4, 0x001f3ae3, 0x00000082,
+ 0x88c4, 0x001f3ae3, 0x00000082,
+ 0x8978, 0x3fffffff, 0x04050140,
+ 0x8978, 0x3fffffff, 0x04050140,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static void ni_init_golden_registers(struct radeon_device *rdev)
+{
+ switch (rdev->family) {
+ case CHIP_CAYMAN:
+ radeon_program_register_sequence(rdev,
+ cayman_golden_registers,
+ (const u32)ARRAY_SIZE(cayman_golden_registers));
+ radeon_program_register_sequence(rdev,
+ cayman_golden_registers2,
+ (const u32)ARRAY_SIZE(cayman_golden_registers2));
+ break;
+ case CHIP_ARUBA:
+ if ((rdev->pdev->device == 0x9900) ||
+ (rdev->pdev->device == 0x9901) ||
+ (rdev->pdev->device == 0x9903) ||
+ (rdev->pdev->device == 0x9904) ||
+ (rdev->pdev->device == 0x9905) ||
+ (rdev->pdev->device == 0x9906) ||
+ (rdev->pdev->device == 0x9907) ||
+ (rdev->pdev->device == 0x9908) ||
+ (rdev->pdev->device == 0x9909) ||
+ (rdev->pdev->device == 0x990A) ||
+ (rdev->pdev->device == 0x990B) ||
+ (rdev->pdev->device == 0x990C) ||
+ (rdev->pdev->device == 0x990D) ||
+ (rdev->pdev->device == 0x990E) ||
+ (rdev->pdev->device == 0x990F) ||
+ (rdev->pdev->device == 0x9910) ||
+ (rdev->pdev->device == 0x9913) ||
+ (rdev->pdev->device == 0x9917) ||
+ (rdev->pdev->device == 0x9918)) {
+ radeon_program_register_sequence(rdev,
+ dvst_golden_registers,
+ (const u32)ARRAY_SIZE(dvst_golden_registers));
+ radeon_program_register_sequence(rdev,
+ dvst_golden_registers2,
+ (const u32)ARRAY_SIZE(dvst_golden_registers2));
+ } else {
+ radeon_program_register_sequence(rdev,
+ scrapper_golden_registers,
+ (const u32)ARRAY_SIZE(scrapper_golden_registers));
+ radeon_program_register_sequence(rdev,
+ dvst_golden_registers2,
+ (const u32)ARRAY_SIZE(dvst_golden_registers2));
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+#define BTC_IO_MC_REGS_SIZE 29
+
+static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+ {0x00000077, 0xff010100},
+ {0x00000078, 0x00000000},
+ {0x00000079, 0x00001434},
+ {0x0000007a, 0xcc08ec08},
+ {0x0000007b, 0x00040000},
+ {0x0000007c, 0x000080c0},
+ {0x0000007d, 0x09000000},
+ {0x0000007e, 0x00210404},
+ {0x00000081, 0x08a8e800},
+ {0x00000082, 0x00030444},
+ {0x00000083, 0x00000000},
+ {0x00000085, 0x00000001},
+ {0x00000086, 0x00000002},
+ {0x00000087, 0x48490000},
+ {0x00000088, 0x20244647},
+ {0x00000089, 0x00000005},
+ {0x0000008b, 0x66030000},
+ {0x0000008c, 0x00006603},
+ {0x0000008d, 0x00000100},
+ {0x0000008f, 0x00001c0a},
+ {0x00000090, 0xff000001},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00946a00}
+};
+
+static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+ {0x00000077, 0xff010100},
+ {0x00000078, 0x00000000},
+ {0x00000079, 0x00001434},
+ {0x0000007a, 0xcc08ec08},
+ {0x0000007b, 0x00040000},
+ {0x0000007c, 0x000080c0},
+ {0x0000007d, 0x09000000},
+ {0x0000007e, 0x00210404},
+ {0x00000081, 0x08a8e800},
+ {0x00000082, 0x00030444},
+ {0x00000083, 0x00000000},
+ {0x00000085, 0x00000001},
+ {0x00000086, 0x00000002},
+ {0x00000087, 0x48490000},
+ {0x00000088, 0x20244647},
+ {0x00000089, 0x00000005},
+ {0x0000008b, 0x66030000},
+ {0x0000008c, 0x00006603},
+ {0x0000008d, 0x00000100},
+ {0x0000008f, 0x00001c0a},
+ {0x00000090, 0xff000001},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00936a00}
+};
+
+static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+ {0x00000077, 0xff010100},
+ {0x00000078, 0x00000000},
+ {0x00000079, 0x00001434},
+ {0x0000007a, 0xcc08ec08},
+ {0x0000007b, 0x00040000},
+ {0x0000007c, 0x000080c0},
+ {0x0000007d, 0x09000000},
+ {0x0000007e, 0x00210404},
+ {0x00000081, 0x08a8e800},
+ {0x00000082, 0x00030444},
+ {0x00000083, 0x00000000},
+ {0x00000085, 0x00000001},
+ {0x00000086, 0x00000002},
+ {0x00000087, 0x48490000},
+ {0x00000088, 0x20244647},
+ {0x00000089, 0x00000005},
+ {0x0000008b, 0x66030000},
+ {0x0000008c, 0x00006603},
+ {0x0000008d, 0x00000100},
+ {0x0000008f, 0x00001c0a},
+ {0x00000090, 0xff000001},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00916a00}
+};
+
+static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+ {0x00000077, 0xff010100},
+ {0x00000078, 0x00000000},
+ {0x00000079, 0x00001434},
+ {0x0000007a, 0xcc08ec08},
+ {0x0000007b, 0x00040000},
+ {0x0000007c, 0x000080c0},
+ {0x0000007d, 0x09000000},
+ {0x0000007e, 0x00210404},
+ {0x00000081, 0x08a8e800},
+ {0x00000082, 0x00030444},
+ {0x00000083, 0x00000000},
+ {0x00000085, 0x00000001},
+ {0x00000086, 0x00000002},
+ {0x00000087, 0x48490000},
+ {0x00000088, 0x20244647},
+ {0x00000089, 0x00000005},
+ {0x0000008b, 0x66030000},
+ {0x0000008c, 0x00006603},
+ {0x0000008d, 0x00000100},
+ {0x0000008f, 0x00001c0a},
+ {0x00000090, 0xff000001},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00976b00}
+};
+
+int ni_mc_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ u32 mem_type, running, blackout = 0;
+ u32 *io_mc_regs;
+ int i, ucode_size, regs_size;
+
+ if (!rdev->mc_fw)
+ return -EINVAL;
+
+ switch (rdev->family) {
+ case CHIP_BARTS:
+ io_mc_regs = (u32 *)&barts_io_mc_regs;
+ ucode_size = BTC_MC_UCODE_SIZE;
+ regs_size = BTC_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_TURKS:
+ io_mc_regs = (u32 *)&turks_io_mc_regs;
+ ucode_size = BTC_MC_UCODE_SIZE;
+ regs_size = BTC_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_CAICOS:
+ default:
+ io_mc_regs = (u32 *)&caicos_io_mc_regs;
+ ucode_size = BTC_MC_UCODE_SIZE;
+ regs_size = BTC_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_CAYMAN:
+ io_mc_regs = (u32 *)&cayman_io_mc_regs;
+ ucode_size = CAYMAN_MC_UCODE_SIZE;
+ regs_size = BTC_IO_MC_REGS_SIZE;
+ break;
+ }
+
+ mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
+ running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+ if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
+ if (running) {
+ blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+ WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
+ }
+
+ /* reset the engine and set to writable */
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+ /* load mc io regs */
+ for (i = 0; i < regs_size; i++) {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ }
+ /* load the MC ucode */
+ fw_data = (const __be32 *)rdev->mc_fw->data;
+ for (i = 0; i < ucode_size; i++)
+ WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+
+ /* put the engine back into the active state */
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+ /* wait for training to complete */
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
+ break;
+ udelay(1);
+ }
+
+ if (running)
+ WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
+ }
+
+ return 0;
+}
+
+int ni_init_microcode(struct radeon_device *rdev)
+{
+ struct platform_device *pdev;
+ const char *chip_name;
+ const char *rlc_chip_name;
+ size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
+ char fw_name[30];
+ int err;
+
+ DRM_DEBUG("\n");
+
+ pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
+ err = IS_ERR(pdev);
+ if (err) {
+ printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
+ return -EINVAL;
+ }
+
+ switch (rdev->family) {
+ case CHIP_BARTS:
+ chip_name = "BARTS";
+ rlc_chip_name = "BTC";
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ mc_req_size = BTC_MC_UCODE_SIZE * 4;
+ break;
+ case CHIP_TURKS:
+ chip_name = "TURKS";
+ rlc_chip_name = "BTC";
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ mc_req_size = BTC_MC_UCODE_SIZE * 4;
+ break;
+ case CHIP_CAICOS:
+ chip_name = "CAICOS";
+ rlc_chip_name = "BTC";
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ mc_req_size = BTC_MC_UCODE_SIZE * 4;
+ break;
+ case CHIP_CAYMAN:
+ chip_name = "CAYMAN";
+ rlc_chip_name = "CAYMAN";
+ pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
+ me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
+ mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
+ break;
+ case CHIP_ARUBA:
+ chip_name = "ARUBA";
+ rlc_chip_name = "ARUBA";
+ /* pfp/me same size as CAYMAN */
+ pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
+ me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
+ mc_req_size = 0;
+ break;
+ default: BUG();
+ }
+
+ DRM_INFO("Loading %s Microcode\n", chip_name);
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->pfp_fw->size != pfp_req_size) {
+ printk(KERN_ERR
+ "ni_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->pfp_fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->me_fw->size != me_req_size) {
+ printk(KERN_ERR
+ "ni_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->me_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
+ /* no MC ucode on TN */
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->mc_fw->size != mc_req_size) {
+ printk(KERN_ERR
+ "ni_mc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ }
+out:
+ platform_device_unregister(pdev);
+
+ if (err) {
+ if (err != -EINVAL)
+ printk(KERN_ERR
+ "ni_cp: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->pfp_fw);
+ rdev->pfp_fw = NULL;
+ release_firmware(rdev->me_fw);
+ rdev->me_fw = NULL;
+ release_firmware(rdev->rlc_fw);
+ rdev->rlc_fw = NULL;
+ release_firmware(rdev->mc_fw);
+ rdev->mc_fw = NULL;
+ }
+ return err;
+}
+
+/*
+ * Core functions
+ */
+static void cayman_gpu_init(struct radeon_device *rdev)
+{
+ u32 gb_addr_config = 0;
+ u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 cgts_tcc_disable;
+ u32 sx_debug_1;
+ u32 smx_dc_ctl0;
+ u32 cgts_sm_ctrl_reg;
+ u32 hdp_host_path_cntl;
+ u32 tmp;
+ u32 disabled_rb_mask;
+ int i, j;
+
+ switch (rdev->family) {
+ case CHIP_CAYMAN:
+ rdev->config.cayman.max_shader_engines = 2;
+ rdev->config.cayman.max_pipes_per_simd = 4;
+ rdev->config.cayman.max_tile_pipes = 8;
+ rdev->config.cayman.max_simds_per_se = 12;
+ rdev->config.cayman.max_backends_per_se = 4;
+ rdev->config.cayman.max_texture_channel_caches = 8;
+ rdev->config.cayman.max_gprs = 256;
+ rdev->config.cayman.max_threads = 256;
+ rdev->config.cayman.max_gs_threads = 32;
+ rdev->config.cayman.max_stack_entries = 512;
+ rdev->config.cayman.sx_num_of_sets = 8;
+ rdev->config.cayman.sx_max_export_size = 256;
+ rdev->config.cayman.sx_max_export_pos_size = 64;
+ rdev->config.cayman.sx_max_export_smx_size = 192;
+ rdev->config.cayman.max_hw_contexts = 8;
+ rdev->config.cayman.sq_num_cf_insts = 2;
+
+ rdev->config.cayman.sc_prim_fifo_size = 0x100;
+ rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_ARUBA:
+ default:
+ rdev->config.cayman.max_shader_engines = 1;
+ rdev->config.cayman.max_pipes_per_simd = 4;
+ rdev->config.cayman.max_tile_pipes = 2;
+ if ((rdev->pdev->device == 0x9900) ||
+ (rdev->pdev->device == 0x9901) ||
+ (rdev->pdev->device == 0x9905) ||
+ (rdev->pdev->device == 0x9906) ||
+ (rdev->pdev->device == 0x9907) ||
+ (rdev->pdev->device == 0x9908) ||
+ (rdev->pdev->device == 0x9909) ||
+ (rdev->pdev->device == 0x990B) ||
+ (rdev->pdev->device == 0x990C) ||
+ (rdev->pdev->device == 0x990F) ||
+ (rdev->pdev->device == 0x9910) ||
+ (rdev->pdev->device == 0x9917) ||
+ (rdev->pdev->device == 0x9999) ||
+ (rdev->pdev->device == 0x999C)) {
+ rdev->config.cayman.max_simds_per_se = 6;
+ rdev->config.cayman.max_backends_per_se = 2;
+ } else if ((rdev->pdev->device == 0x9903) ||
+ (rdev->pdev->device == 0x9904) ||
+ (rdev->pdev->device == 0x990A) ||
+ (rdev->pdev->device == 0x990D) ||
+ (rdev->pdev->device == 0x990E) ||
+ (rdev->pdev->device == 0x9913) ||
+ (rdev->pdev->device == 0x9918) ||
+ (rdev->pdev->device == 0x999D)) {
+ rdev->config.cayman.max_simds_per_se = 4;
+ rdev->config.cayman.max_backends_per_se = 2;
+ } else if ((rdev->pdev->device == 0x9919) ||
+ (rdev->pdev->device == 0x9990) ||
+ (rdev->pdev->device == 0x9991) ||
+ (rdev->pdev->device == 0x9994) ||
+ (rdev->pdev->device == 0x9995) ||
+ (rdev->pdev->device == 0x9996) ||
+ (rdev->pdev->device == 0x999A) ||
+ (rdev->pdev->device == 0x99A0)) {
+ rdev->config.cayman.max_simds_per_se = 3;
+ rdev->config.cayman.max_backends_per_se = 1;
+ } else {
+ rdev->config.cayman.max_simds_per_se = 2;
+ rdev->config.cayman.max_backends_per_se = 1;
+ }
+ rdev->config.cayman.max_texture_channel_caches = 2;
+ rdev->config.cayman.max_gprs = 256;
+ rdev->config.cayman.max_threads = 256;
+ rdev->config.cayman.max_gs_threads = 32;
+ rdev->config.cayman.max_stack_entries = 512;
+ rdev->config.cayman.sx_num_of_sets = 8;
+ rdev->config.cayman.sx_max_export_size = 256;
+ rdev->config.cayman.sx_max_export_pos_size = 64;
+ rdev->config.cayman.sx_max_export_smx_size = 192;
+ rdev->config.cayman.max_hw_contexts = 8;
+ rdev->config.cayman.sq_num_cf_insts = 2;
+
+ rdev->config.cayman.sc_prim_fifo_size = 0x40;
+ rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ }
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+
+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+ evergreen_fix_pci_max_read_req_size(rdev);
+
+ mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+ tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+ rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+ if (rdev->config.cayman.mem_row_size_in_kb > 4)
+ rdev->config.cayman.mem_row_size_in_kb = 4;
+ /* XXX use MC settings? */
+ rdev->config.cayman.shader_engine_tile_size = 32;
+ rdev->config.cayman.num_gpus = 1;
+ rdev->config.cayman.multi_gpu_tile_size = 64;
+
+ tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
+ rdev->config.cayman.num_tile_pipes = (1 << tmp);
+ tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
+ rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
+ tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
+ rdev->config.cayman.num_shader_engines = tmp + 1;
+ tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
+ rdev->config.cayman.num_gpus = tmp + 1;
+ tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
+ rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
+ tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
+ rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
+
+
+ /* setup tiling info dword. gb_addr_config is not adequate since it does
+ * not have bank info, so create a custom tiling dword.
+ * bits 3:0 num_pipes
+ * bits 7:4 num_banks
+ * bits 11:8 group_size
+ * bits 15:12 row_size
+ */
+ rdev->config.cayman.tile_config = 0;
+ switch (rdev->config.cayman.num_tile_pipes) {
+ case 1:
+ default:
+ rdev->config.cayman.tile_config |= (0 << 0);
+ break;
+ case 2:
+ rdev->config.cayman.tile_config |= (1 << 0);
+ break;
+ case 4:
+ rdev->config.cayman.tile_config |= (2 << 0);
+ break;
+ case 8:
+ rdev->config.cayman.tile_config |= (3 << 0);
+ break;
+ }
+
+ /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
+ if (rdev->flags & RADEON_IS_IGP)
+ rdev->config.cayman.tile_config |= 1 << 4;
+ else {
+ switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+ case 0: /* four banks */
+ rdev->config.cayman.tile_config |= 0 << 4;
+ break;
+ case 1: /* eight banks */
+ rdev->config.cayman.tile_config |= 1 << 4;
+ break;
+ case 2: /* sixteen banks */
+ default:
+ rdev->config.cayman.tile_config |= 2 << 4;
+ break;
+ }
+ }
+ rdev->config.cayman.tile_config |=
+ ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
+ rdev->config.cayman.tile_config |=
+ ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
+
+ tmp = 0;
+ for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
+ u32 rb_disable_bitmap;
+
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+ tmp <<= 4;
+ tmp |= rb_disable_bitmap;
+ }
+ /* enabled rb are just the one not disabled :) */
+ disabled_rb_mask = tmp;
+ tmp = 0;
+ for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
+ tmp |= (1 << i);
+ /* if all the backends are disabled, fix it up here */
+ if ((disabled_rb_mask & tmp) == tmp) {
+ for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
+ disabled_rb_mask &= ~(1 << i);
+ }
+
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+
+ WREG32(GB_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ if (ASIC_IS_DCE6(rdev))
+ WREG32(DMIF_ADDR_CALC, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+ WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+
+ if ((rdev->config.cayman.max_backends_per_se == 1) &&
+ (rdev->flags & RADEON_IS_IGP)) {
+ if ((disabled_rb_mask & 3) == 1) {
+ /* RB0 disabled, RB1 enabled */
+ tmp = 0x11111111;
+ } else {
+ /* RB1 disabled, RB0 enabled */
+ tmp = 0x00000000;
+ }
+ } else {
+ tmp = gb_addr_config & NUM_PIPES_MASK;
+ tmp = r6xx_remap_render_backend(rdev, tmp,
+ rdev->config.cayman.max_backends_per_se *
+ rdev->config.cayman.max_shader_engines,
+ CAYMAN_MAX_BACKENDS, disabled_rb_mask);
+ }
+ WREG32(GB_BACKEND_MAP, tmp);
+
+ cgts_tcc_disable = 0xffff0000;
+ for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
+ cgts_tcc_disable &= ~(1 << (16 + i));
+ WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
+ WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
+ WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
+ WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
+
+ /* reprogram the shader complex */
+ cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
+ for (i = 0; i < 16; i++)
+ WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
+ WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
+
+ /* set HW defaults for 3D engine */
+ WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+ sx_debug_1 = RREG32(SX_DEBUG_1);
+ sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+ WREG32(SX_DEBUG_1, sx_debug_1);
+
+ smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+ smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+ smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
+ WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
+
+ /* need to be explicitly zero-ed */
+ WREG32(VGT_OFFCHIP_LDS_BASE, 0);
+ WREG32(SQ_LSTMP_RING_BASE, 0);
+ WREG32(SQ_HSTMP_RING_BASE, 0);
+ WREG32(SQ_ESTMP_RING_BASE, 0);
+ WREG32(SQ_GSTMP_RING_BASE, 0);
+ WREG32(SQ_VSTMP_RING_BASE, 0);
+ WREG32(SQ_PSTMP_RING_BASE, 0);
+
+ WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
+
+ WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
+ POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
+ SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
+
+ WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
+ SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
+ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
+
+
+ WREG32(VGT_NUM_INSTANCES, 1);
+
+ WREG32(CP_PERFMON_CNTL, 0);
+
+ WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
+ FETCH_FIFO_HIWATER(0x4) |
+ DONE_FIFO_HIWATER(0xe0) |
+ ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+ WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
+ WREG32(SQ_CONFIG, (VC_ENABLE |
+ EXPORT_SRC_C |
+ GFX_PRIO(0) |
+ CS1_PRIO(0) |
+ CS2_PRIO(1)));
+ WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
+
+ WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+ FORCE_EOV_MAX_REZ_CNT(255)));
+
+ WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+ AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+ WREG32(VGT_GS_VERTEX_REUSE, 16);
+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+ WREG32(CB_PERF_CTR0_SEL_0, 0);
+ WREG32(CB_PERF_CTR0_SEL_1, 0);
+ WREG32(CB_PERF_CTR1_SEL_0, 0);
+ WREG32(CB_PERF_CTR1_SEL_1, 0);
+ WREG32(CB_PERF_CTR2_SEL_0, 0);
+ WREG32(CB_PERF_CTR2_SEL_1, 0);
+ WREG32(CB_PERF_CTR3_SEL_0, 0);
+ WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+ tmp = RREG32(HDP_MISC_CNTL);
+ tmp |= HDP_FLUSH_INVALIDATE_CACHE;
+ WREG32(HDP_MISC_CNTL, tmp);
+
+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+ WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+ udelay(50);
+}
+
+/*
+ * GART
+ */
+void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+ /* flush hdp cache */
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ WREG32(VM_INVALIDATE_REQUEST, 1);
+}
+
+static int cayman_pcie_gart_enable(struct radeon_device *rdev)
+{
+ int i, r;
+
+ if (rdev->gart.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
+ return r;
+ radeon_gart_restore(rdev);
+ /* Setup TLB control */
+ WREG32(MC_VM_MX_L1_TLB_CNTL,
+ (0xA << 7) |
+ ENABLE_L1_TLB |
+ ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ ENABLE_ADVANCED_DRIVER_MODEL |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7) |
+ CONTEXT1_IDENTITY_ACCESS_MODE(1));
+ WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+ WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(6));
+ /* setup context0 */
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(rdev->dummy_page.addr >> 12));
+ WREG32(VM_CONTEXT0_CNTL2, 0);
+ WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+ WREG32(0x15D4, 0);
+ WREG32(0x15D8, 0);
+ WREG32(0x15DC, 0);
+
+ /* empty context1-7 */
+ /* Assign the pt base to something valid for now; the pts used for
+ * the VMs are determined by the application and setup and assigned
+ * on the fly in the vm part of radeon_gart.c
+ */
+ for (i = 1; i < 8; i++) {
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+ rdev->gart.table_addr >> 12);
+ }
+
+ /* enable context1-7 */
+ WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(rdev->dummy_page.addr >> 12));
+ WREG32(VM_CONTEXT1_CNTL2, 4);
+ WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+ RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+ VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+ READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+ WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+ cayman_pcie_gart_tlb_flush(rdev);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)rdev->gart.table_addr);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+static void cayman_pcie_gart_disable(struct radeon_device *rdev)
+{
+ /* Disable all tables */
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
+ /* Setup TLB control */
+ WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7) |
+ CONTEXT1_IDENTITY_ACCESS_MODE(1));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(6));
+ radeon_gart_table_vram_unpin(rdev);
+}
+
+static void cayman_pcie_gart_fini(struct radeon_device *rdev)
+{
+ cayman_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
+void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+ int ring, u32 cp_int_cntl)
+{
+ u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
+
+ WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
+ WREG32(CP_INT_CNTL, cp_int_cntl);
+}
+
+/*
+ * CP.
+ */
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ /* flush read cache over gart for this vmid */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+ /* EVENT_WRITE_EOP - flush caches, send int */
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, 0);
+}
+
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ /* set to DX10/11 mode */
+ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+ radeon_ring_write(ring, 1);
+
+ if (ring->rptr_save_reg) {
+ uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((ring->rptr_save_reg -
+ PACKET3_SET_CONFIG_REG_START) >> 2));
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+ radeon_ring_write(ring, ib->length_dw |
+ (ib->vm ? (ib->vm->id << 24) : 0));
+
+ /* flush read cache over gart for this vmid */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+}
+
+void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+ radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
+}
+
+static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
+{
+ if (enable)
+ WREG32(CP_ME_CNTL, 0);
+ else {
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
+ WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ }
+}
+
+static int cayman_cp_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ int i;
+
+ if (!rdev->me_fw || !rdev->pfp_fw)
+ return -EINVAL;
+
+ cayman_cp_enable(rdev, false);
+
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(CP_ME_RAM_RADDR, 0);
+ return 0;
+}
+
+static int cayman_cp_start(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ int r, i;
+
+ r = radeon_ring_lock(rdev, ring, 7);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(ring, 0x1);
+ radeon_ring_write(ring, 0x0);
+ radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
+ radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_unlock_commit(rdev, ring);
+
+ cayman_cp_enable(rdev, true);
+
+ r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+
+ /* setup clear context state */
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ for (i = 0; i < cayman_default_size; i++)
+ radeon_ring_write(ring, cayman_default_state[i]);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ /* set clear context state */
+ radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(ring, 0);
+
+ /* SQ_VTX_BASE_VTX_LOC */
+ radeon_ring_write(ring, 0xc0026f00);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
+
+ /* Clear consts */
+ radeon_ring_write(ring, 0xc0036f00);
+ radeon_ring_write(ring, 0x00000bc4);
+ radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(ring, 0xffffffff);
+
+ radeon_ring_write(ring, 0xc0026900);
+ radeon_ring_write(ring, 0x00000316);
+ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ radeon_ring_write(ring, 0x00000010); /* */
+
+ radeon_ring_unlock_commit(rdev, ring);
+
+ /* XXX init other rings */
+
+ return 0;
+}
+
+static void cayman_cp_fini(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ cayman_cp_enable(rdev, false);
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
+}
+
+static int cayman_cp_resume(struct radeon_device *rdev)
+{
+ static const int ridx[] = {
+ RADEON_RING_TYPE_GFX_INDEX,
+ CAYMAN_RING_TYPE_CP1_INDEX,
+ CAYMAN_RING_TYPE_CP2_INDEX
+ };
+ static const unsigned cp_rb_cntl[] = {
+ CP_RB0_CNTL,
+ CP_RB1_CNTL,
+ CP_RB2_CNTL,
+ };
+ static const unsigned cp_rb_rptr_addr[] = {
+ CP_RB0_RPTR_ADDR,
+ CP_RB1_RPTR_ADDR,
+ CP_RB2_RPTR_ADDR
+ };
+ static const unsigned cp_rb_rptr_addr_hi[] = {
+ CP_RB0_RPTR_ADDR_HI,
+ CP_RB1_RPTR_ADDR_HI,
+ CP_RB2_RPTR_ADDR_HI
+ };
+ static const unsigned cp_rb_base[] = {
+ CP_RB0_BASE,
+ CP_RB1_BASE,
+ CP_RB2_BASE
+ };
+ struct radeon_ring *ring;
+ int i, r;
+
+ /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+ SOFT_RESET_PA |
+ SOFT_RESET_SH |
+ SOFT_RESET_VGT |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SX));
+ RREG32(GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(GRBM_SOFT_RESET, 0);
+ RREG32(GRBM_SOFT_RESET);
+
+ WREG32(CP_SEM_WAIT_TIMER, 0x0);
+ WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+ /* Set the write pointer delay */
+ WREG32(CP_RB_WPTR_DELAY, 0);
+
+ WREG32(CP_DEBUG, (1 << 27));
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+ WREG32(SCRATCH_UMSK, 0xff);
+
+ for (i = 0; i < 3; ++i) {
+ uint32_t rb_cntl;
+ uint64_t addr;
+
+ /* Set ring buffer size */
+ ring = &rdev->ring[ridx[i]];
+ rb_cntl = drm_order(ring->ring_size / 8);
+ rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= BUF_SWAP_32BIT;
+#endif
+ WREG32(cp_rb_cntl[i], rb_cntl);
+
+ /* set the wb address whether it's enabled or not */
+ addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
+ WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
+ WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
+ }
+
+ /* set the rb base addr, this causes an internal reset of ALL rings */
+ for (i = 0; i < 3; ++i) {
+ ring = &rdev->ring[ridx[i]];
+ WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
+ }
+
+ for (i = 0; i < 3; ++i) {
+ /* Initialize the ring buffer's read and write pointers */
+ ring = &rdev->ring[ridx[i]];
+ WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
+
+ ring->rptr = ring->wptr = 0;
+ WREG32(ring->rptr_reg, ring->rptr);
+ WREG32(ring->wptr_reg, ring->wptr);
+
+ mdelay(1);
+ WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
+ }
+
+ /* start the rings */
+ cayman_cp_start(rdev);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ /* this only test cp0 */
+ r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ if (r) {
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ return r;
+ }
+
+ return 0;
+}
+
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ * Cayman and newer support two asynchronous DMA engines.
+ */
+/**
+ * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (cayman-SI).
+ */
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * cayman_dma_stop - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines (cayman-SI).
+ */
+void cayman_dma_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl;
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ /* dma0 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
+
+ /* dma1 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+}
+
+/**
+ * cayman_dma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffers and enable them. (cayman-SI).
+ * Returns 0 for success, error for failure.
+ */
+int cayman_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ u32 rb_cntl, dma_cntl, ib_cntl;
+ u32 rb_bufsz;
+ u32 reg_offset, wb_offset;
+ int i, r;
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ reg_offset = DMA0_REGISTER_OFFSET;
+ wb_offset = R600_WB_DMA_RPTR_OFFSET;
+ } else {
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ reg_offset = DMA1_REGISTER_OFFSET;
+ wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+ }
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = drm_order(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR + reg_offset, 0);
+ WREG32(DMA_RB_WPTR + reg_offset, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
+ upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
+ ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
+
+ dma_cntl = RREG32(DMA_CNTL + reg_offset);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL + reg_offset, dma_cntl);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
+
+ ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
+
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, ring->idx, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * cayman_dma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (cayman-SI).
+ */
+void cayman_dma_fini(struct radeon_device *rdev)
+{
+ cayman_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+ radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+}
+
+static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
+{
+ u32 reset_mask = 0;
+ u32 tmp;
+
+ /* GRBM_STATUS */
+ tmp = RREG32(GRBM_STATUS);
+ if (tmp & (PA_BUSY | SC_BUSY |
+ SH_BUSY | SX_BUSY |
+ TA_BUSY | VGT_BUSY |
+ DB_BUSY | CB_BUSY |
+ GDS_BUSY | SPI_BUSY |
+ IA_BUSY | IA_BUSY_NO_DMA))
+ reset_mask |= RADEON_RESET_GFX;
+
+ if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+ CP_BUSY | CP_COHERENCY_BUSY))
+ reset_mask |= RADEON_RESET_CP;
+
+ if (tmp & GRBM_EE_BUSY)
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
+
+ /* DMA_STATUS_REG 0 */
+ tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
+
+ /* DMA_STATUS_REG 1 */
+ tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA1;
+
+ /* SRBM_STATUS2 */
+ tmp = RREG32(SRBM_STATUS2);
+ if (tmp & DMA_BUSY)
+ reset_mask |= RADEON_RESET_DMA;
+
+ if (tmp & DMA1_BUSY)
+ reset_mask |= RADEON_RESET_DMA1;
+
+ /* SRBM_STATUS */
+ tmp = RREG32(SRBM_STATUS);
+ if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+ reset_mask |= RADEON_RESET_RLC;
+
+ if (tmp & IH_BUSY)
+ reset_mask |= RADEON_RESET_IH;
+
+ if (tmp & SEM_BUSY)
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (tmp & GRBM_RQ_PENDING)
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (tmp & VMC_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+ MCC_BUSY | MCD_BUSY))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (evergreen_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+
+ /* VM_L2_STATUS */
+ tmp = RREG32(VM_L2_STATUS);
+ if (tmp & L2_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ /* Skip MC reset as it's mostly likely not hung, just busy */
+ if (reset_mask & RADEON_RESET_MC) {
+ DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+ reset_mask &= ~RADEON_RESET_MC;
+ }
+
+ return reset_mask;
+}
+
+static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct evergreen_mc_save save;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
+
+ if (reset_mask == 0)
+ return;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ evergreen_print_gpu_status_regs(rdev);
+ dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(0x14F8));
+ dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(0x14D8));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(0x14FC));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(0x14DC));
+
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ }
+
+ if (reset_mask & RADEON_RESET_DMA1) {
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ }
+
+ udelay(50);
+
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+ grbm_soft_reset = SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_GDS |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SH |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VGT |
+ SOFT_RESET_IA;
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
+
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+ }
+
+ if (reset_mask & RADEON_RESET_DMA)
+ srbm_soft_reset |= SOFT_RESET_DMA;
+
+ if (reset_mask & RADEON_RESET_DMA1)
+ srbm_soft_reset |= SOFT_RESET_DMA1;
+
+ if (reset_mask & RADEON_RESET_DISPLAY)
+ srbm_soft_reset |= SOFT_RESET_DC;
+
+ if (reset_mask & RADEON_RESET_RLC)
+ srbm_soft_reset |= SOFT_RESET_RLC;
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= SOFT_RESET_SEM;
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= SOFT_RESET_IH;
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= SOFT_RESET_VMC;
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= SOFT_RESET_MC;
+ }
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
+ evergreen_mc_resume(rdev, &save);
+ udelay(50);
+
+ evergreen_print_gpu_status_regs(rdev);
+}
+
+int cayman_asic_reset(struct radeon_device *rdev)
+{
+ u32 reset_mask;
+
+ reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ cayman_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
+ return 0;
+}
+
+/**
+ * cayman_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * cayman_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+ u32 mask;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ mask = RADEON_RESET_DMA;
+ else
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+static int cayman_startup(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ int r;
+
+ /* enable pcie gen2 link */
+ evergreen_pcie_gen2_enable(rdev);
+
+ evergreen_mc_program(rdev);
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ } else {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
+ r = ni_mc_load_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load MC firmware!\n");
+ return r;
+ }
+ }
+
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
+ r = cayman_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ cayman_gpu_init(rdev);
+
+ r = evergreen_blit_init(rdev);
+ if (r) {
+ r600_blit_fini(rdev);
+ rdev->asic->copy.copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+
+ /* allocate rlc buffers */
+ if (rdev->flags & RADEON_IS_IGP) {
+ r = si_rlc_init(rdev);
+ if (r) {
+ DRM_ERROR("Failed to init rlc BOs!\n");
+ return r;
+ }
+ }
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ r = rv770_uvd_resume(rdev);
+ if (!r) {
+ r = radeon_fence_driver_start_ring(rdev,
+ R600_RING_TYPE_UVD_INDEX);
+ if (r)
+ dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ }
+ if (r)
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
+ /* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
+ return r;
+ }
+ evergreen_irq_set(rdev);
+
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ CP_RB0_RPTR, CP_RB0_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (r)
+ return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
+ r = cayman_cp_load_microcode(rdev);
+ if (r)
+ return r;
+ r = cayman_cp_resume(rdev);
+ if (r)
+ return r;
+
+ r = cayman_dma_resume(rdev);
+ if (r)
+ return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ if (ring->ring_size) {
+ r = radeon_ring_init(rdev, ring, ring->ring_size,
+ R600_WB_UVD_RPTR_OFFSET,
+ UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (!r)
+ r = r600_uvd_init(rdev);
+ if (r)
+ DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+ }
+
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_vm_manager_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
+ return r;
+ }
+
+ r = r600_audio_init(rdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+int cayman_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+ * posting will perform necessary task to bring back GPU into good
+ * shape.
+ */
+ /* post card */
+ atom_asic_init(rdev->mode_info.atom_context);
+
+ /* init golden registers */
+ ni_init_golden_registers(rdev);
+
+ rdev->accel_working = true;
+ r = cayman_startup(rdev);
+ if (r) {
+ DRM_ERROR("cayman startup failed on resume\n");
+ rdev->accel_working = false;
+ return r;
+ }
+ return r;
+}
+
+int cayman_suspend(struct radeon_device *rdev)
+{
+ r600_audio_fini(rdev);
+ radeon_vm_manager_fini(rdev);
+ cayman_cp_enable(rdev, false);
+ cayman_dma_stop(rdev);
+ r600_uvd_stop(rdev);
+ radeon_uvd_suspend(rdev);
+ evergreen_irq_suspend(rdev);
+ radeon_wb_disable(rdev);
+ cayman_pcie_gart_disable(rdev);
+ return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int cayman_init(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ int r;
+
+ /* Read BIOS */
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ /* Must be an ATOMBIOS */
+ if (!rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
+ return -EINVAL;
+ }
+ r = radeon_atombios_init(rdev);
+ if (r)
+ return r;
+
+ /* Post card if necessary */
+ if (!radeon_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
+ DRM_INFO("GPU not posted. posting now...\n");
+ atom_asic_init(rdev->mode_info.atom_context);
+ }
+ /* init golden registers */
+ ni_init_golden_registers(rdev);
+ /* Initialize scratch registers */
+ r600_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ /* initialize memory controller */
+ r = evergreen_mc_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 1024 * 1024);
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
+ r = radeon_uvd_init(rdev);
+ if (!r) {
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 4096);
+ }
+
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
+
+ r = r600_pcie_gart_init(rdev);
+ if (r)
+ return r;
+
+ rdev->accel_working = true;
+ r = cayman_startup(rdev);
+ if (r) {
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ cayman_cp_fini(rdev);
+ cayman_dma_fini(rdev);
+ r600_irq_fini(rdev);
+ if (rdev->flags & RADEON_IS_IGP)
+ si_rlc_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_vm_manager_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ cayman_pcie_gart_fini(rdev);
+ rdev->accel_working = false;
+ }
+
+ /* Don't start up if the MC ucode is missing.
+ * The default clocks and voltages before the MC ucode
+ * is loaded are not suffient for advanced operations.
+ *
+ * We can skip this check for TN, because there is no MC
+ * ucode.
+ */
+ if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+ DRM_ERROR("radeon: MC ucode required for NI+.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void cayman_fini(struct radeon_device *rdev)
+{
+ r600_blit_fini(rdev);
+ cayman_cp_fini(rdev);
+ cayman_dma_fini(rdev);
+ r600_irq_fini(rdev);
+ if (rdev->flags & RADEON_IS_IGP)
+ si_rlc_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_vm_manager_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ r600_uvd_stop(rdev);
+ radeon_uvd_fini(rdev);
+ cayman_pcie_gart_fini(rdev);
+ r600_vram_scratch_fini(rdev);
+ radeon_gem_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_bo_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
+/*
+ * vm
+ */
+int cayman_vm_init(struct radeon_device *rdev)
+{
+ /* number of VMs */
+ rdev->vm_manager.nvm = 8;
+ /* base offset of vram pages */
+ if (rdev->flags & RADEON_IS_IGP) {
+ u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
+ tmp <<= 22;
+ rdev->vm_manager.vram_base_offset = tmp;
+ } else
+ rdev->vm_manager.vram_base_offset = 0;
+ return 0;
+}
+
+void cayman_vm_fini(struct radeon_device *rdev)
+{
+}
+
+#define R600_ENTRY_VALID (1 << 0)
+#define R600_PTE_SYSTEM (1 << 1)
+#define R600_PTE_SNOOPED (1 << 2)
+#define R600_PTE_READABLE (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
+{
+ uint32_t r600_flags = 0;
+ r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
+ r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+ r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ r600_flags |= R600_PTE_SYSTEM;
+ r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+ }
+ return r600_flags;
+}
+
+/**
+ * cayman_vm_set_page - update the page tables using the CP
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the CP (cayman/TN).
+ */
+void cayman_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+ uint64_t value;
+ unsigned ndw;
+
+ if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+ while (count) {
+ ndw = 1 + count * 2;
+ if (ndw > 0x3FFF)
+ ndw = 0x3FFF;
+
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 1; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ }
+ }
+ } else {
+ if ((flags & RADEON_VM_PAGE_SYSTEM) ||
+ (count == 1)) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ }
+ }
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
+ }
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
+ }
+}
+
+/**
+ * cayman_vm_flush - vm flush using the CP
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using the CP (cayman-si).
+ */
+void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+ radeon_ring_write(ring, 0x1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
+ radeon_ring_write(ring, 1 << vm->id);
+
+ /* sync PFP to ME, otherwise we might get invalid PFP reads */
+ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+ radeon_ring_write(ring, 0x0);
+}
+
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
new file mode 100644
index 0000000..5db7b7d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __NI_REG_H__
+#define __NI_REG_H__
+
+/* northern islands - DCE5 */
+
+#define NI_INPUT_GAMMA_CONTROL 0x6840
+# define NI_GRPH_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 0)
+# define NI_INPUT_GAMMA_USE_LUT 0
+# define NI_INPUT_GAMMA_BYPASS 1
+# define NI_INPUT_GAMMA_SRGB_24 2
+# define NI_INPUT_GAMMA_XVYCC_222 3
+# define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4)
+
+#define NI_PRESCALE_GRPH_CONTROL 0x68b4
+# define NI_GRPH_PRESCALE_BYPASS (1 << 4)
+
+#define NI_PRESCALE_OVL_CONTROL 0x68c4
+# define NI_OVL_PRESCALE_BYPASS (1 << 4)
+
+#define NI_INPUT_CSC_CONTROL 0x68d4
+# define NI_INPUT_CSC_GRPH_MODE(x) (((x) & 0x3) << 0)
+# define NI_INPUT_CSC_BYPASS 0
+# define NI_INPUT_CSC_PROG_COEFF 1
+# define NI_INPUT_CSC_PROG_SHARED_MATRIXA 2
+# define NI_INPUT_CSC_OVL_MODE(x) (((x) & 0x3) << 4)
+
+#define NI_OUTPUT_CSC_CONTROL 0x68f0
+# define NI_OUTPUT_CSC_GRPH_MODE(x) (((x) & 0x7) << 0)
+# define NI_OUTPUT_CSC_BYPASS 0
+# define NI_OUTPUT_CSC_TV_RGB 1
+# define NI_OUTPUT_CSC_YCBCR_601 2
+# define NI_OUTPUT_CSC_YCBCR_709 3
+# define NI_OUTPUT_CSC_PROG_COEFF 4
+# define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB 5
+# define NI_OUTPUT_CSC_OVL_MODE(x) (((x) & 0x7) << 4)
+
+#define NI_DEGAMMA_CONTROL 0x6960
+# define NI_GRPH_DEGAMMA_MODE(x) (((x) & 0x3) << 0)
+# define NI_DEGAMMA_BYPASS 0
+# define NI_DEGAMMA_SRGB_24 1
+# define NI_DEGAMMA_XVYCC_222 2
+# define NI_OVL_DEGAMMA_MODE(x) (((x) & 0x3) << 4)
+# define NI_ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
+# define NI_CURSOR_DEGAMMA_MODE(x) (((x) & 0x3) << 12)
+
+#define NI_GAMUT_REMAP_CONTROL 0x6964
+# define NI_GRPH_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 0)
+# define NI_GAMUT_REMAP_BYPASS 0
+# define NI_GAMUT_REMAP_PROG_COEFF 1
+# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA 2
+# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB 3
+# define NI_OVL_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 4)
+
+#define NI_REGAMMA_CONTROL 0x6a80
+# define NI_GRPH_REGAMMA_MODE(x) (((x) & 0x7) << 0)
+# define NI_REGAMMA_BYPASS 0
+# define NI_REGAMMA_SRGB_24 1
+# define NI_REGAMMA_XVYCC_222 2
+# define NI_REGAMMA_PROG_A 3
+# define NI_REGAMMA_PROG_B 4
+# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
new file mode 100644
index 0000000..e226faf
--- /dev/null
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -0,0 +1,703 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef NI_H
+#define NI_H
+
+#define CAYMAN_MAX_SH_GPRS 256
+#define CAYMAN_MAX_TEMP_GPRS 16
+#define CAYMAN_MAX_SH_THREADS 256
+#define CAYMAN_MAX_SH_STACK_ENTRIES 4096
+#define CAYMAN_MAX_FRC_EOV_CNT 16384
+#define CAYMAN_MAX_BACKENDS 8
+#define CAYMAN_MAX_BACKENDS_MASK 0xFF
+#define CAYMAN_MAX_BACKENDS_PER_SE_MASK 0xF
+#define CAYMAN_MAX_SIMDS 16
+#define CAYMAN_MAX_SIMDS_MASK 0xFFFF
+#define CAYMAN_MAX_SIMDS_PER_SE_MASK 0xFFF
+#define CAYMAN_MAX_PIPES 8
+#define CAYMAN_MAX_PIPES_MASK 0xFF
+#define CAYMAN_MAX_LDS_NUM 0xFFFF
+#define CAYMAN_MAX_TCC 16
+#define CAYMAN_MAX_TCC_MASK 0xFF
+
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
+#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
+
+#define DMIF_ADDR_CONFIG 0xBD4
+
+/* DCE6 only */
+#define DMIF_ADDR_CALC 0xC00
+
+#define SRBM_GFX_CNTL 0x0E44
+#define RINGID(x) (((x) & 0x3) << 0)
+#define VMID(x) (((x) & 0x7) << 0)
+#define SRBM_STATUS 0x0E50
+#define RLC_RQ_PENDING (1 << 3)
+#define GRBM_RQ_PENDING (1 << 5)
+#define VMC_BUSY (1 << 8)
+#define MCB_BUSY (1 << 9)
+#define MCB_NON_DISPLAY_BUSY (1 << 10)
+#define MCC_BUSY (1 << 11)
+#define MCD_BUSY (1 << 12)
+#define SEM_BUSY (1 << 14)
+#define RLC_BUSY (1 << 15)
+#define IH_BUSY (1 << 17)
+
+#define SRBM_SOFT_RESET 0x0E60
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_CG (1 << 2)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_DMA1 (1 << 6)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_RLC (1 << 13)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_DMA (1 << 20)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
+
+#define SRBM_STATUS2 0x0EC4
+#define DMA_BUSY (1 << 5)
+#define DMA1_BUSY (1 << 6)
+
+#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
+#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
+#define RESPONSE_TYPE_MASK 0x000000F0
+#define RESPONSE_TYPE_SHIFT 4
+#define VM_L2_CNTL 0x1400
+#define ENABLE_L2_CACHE (1 << 0)
+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
+#define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10)
+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14)
+#define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 18)
+/* CONTEXT1_IDENTITY_ACCESS_MODE
+ * 0 physical = logical
+ * 1 logical via context1 page table
+ * 2 inside identity aperture use translation, outside physical = logical
+ * 3 inside identity aperture physical = logical, outside use translation
+ */
+#define VM_L2_CNTL2 0x1404
+#define INVALIDATE_ALL_L1_TLBS (1 << 0)
+#define INVALIDATE_L2_CACHE (1 << 1)
+#define VM_L2_CNTL3 0x1408
+#define BANK_SELECT(x) ((x) << 0)
+#define CACHE_UPDATE_MODE(x) ((x) << 6)
+#define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20)
+#define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15)
+#define VM_L2_STATUS 0x140C
+#define L2_BUSY (1 << 0)
+#define VM_CONTEXT0_CNTL 0x1410
+#define ENABLE_CONTEXT (1 << 0)
+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
+#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
+#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
+#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
+#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
+#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
+#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
+#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
+#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
+#define VM_CONTEXT1_CNTL 0x1414
+#define VM_CONTEXT0_CNTL2 0x1430
+#define VM_CONTEXT1_CNTL2 0x1434
+#define VM_INVALIDATE_REQUEST 0x1478
+#define VM_INVALIDATE_RESPONSE 0x147c
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
+
+#define MC_SHARED_CHMAP 0x2004
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x00003000
+#define MC_SHARED_CHREMAP 0x2008
+
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
+#define MC_VM_MX_L1_TLB_CNTL 0x2064
+#define ENABLE_L1_TLB (1 << 0)
+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
+#define ENABLE_ADVANCED_DRIVER_MODEL (1 << 6)
+#define FUS_MC_VM_FB_OFFSET 0x2068
+
+#define MC_SHARED_BLACKOUT_CNTL 0x20ac
+#define MC_ARB_RAMCFG 0x2760
+#define NOOFBANK_SHIFT 0
+#define NOOFBANK_MASK 0x00000003
+#define NOOFRANK_SHIFT 2
+#define NOOFRANK_MASK 0x00000004
+#define NOOFROWS_SHIFT 3
+#define NOOFROWS_MASK 0x00000038
+#define NOOFCOLS_SHIFT 6
+#define NOOFCOLS_MASK 0x000000C0
+#define CHANSIZE_SHIFT 8
+#define CHANSIZE_MASK 0x00000100
+#define BURSTLENGTH_SHIFT 9
+#define BURSTLENGTH_MASK 0x00000200
+#define CHANSIZE_OVERRIDE (1 << 11)
+#define MC_SEQ_SUP_CNTL 0x28c8
+#define RUN_MASK (1 << 0)
+#define MC_SEQ_SUP_PGM 0x28cc
+#define MC_IO_PAD_CNTL_D0 0x29d0
+#define MEM_FALL_OUT_CMD (1 << 8)
+#define MC_SEQ_MISC0 0x2a00
+#define MC_SEQ_MISC0_GDDR5_SHIFT 28
+#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
+#define MC_SEQ_MISC0_GDDR5_VALUE 5
+#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
+#define MC_SEQ_IO_DEBUG_DATA 0x2a48
+
+#define HDP_HOST_PATH_CNTL 0x2C00
+#define HDP_NONSURFACE_BASE 0x2C04
+#define HDP_NONSURFACE_INFO 0x2C08
+#define HDP_NONSURFACE_SIZE 0x2C0C
+#define HDP_ADDR_CONFIG 0x2F48
+#define HDP_MISC_CNTL 0x2F4C
+#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
+
+#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
+#define GC_USER_SYS_RB_BACKEND_DISABLE 0x3F8C
+#define CGTS_SYS_TCC_DISABLE 0x3F90
+#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
+
+#define RLC_GFX_INDEX 0x3FC4
+
+#define CONFIG_MEMSIZE 0x5428
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
+
+#define GRBM_CNTL 0x8000
+#define GRBM_READ_TIMEOUT(x) ((x) << 0)
+#define GRBM_STATUS 0x8010
+#define CMDFIFO_AVAIL_MASK 0x0000000F
+#define RING2_RQ_PENDING (1 << 4)
+#define SRBM_RQ_PENDING (1 << 5)
+#define RING1_RQ_PENDING (1 << 6)
+#define CF_RQ_PENDING (1 << 7)
+#define PF_RQ_PENDING (1 << 8)
+#define GDS_DMA_RQ_PENDING (1 << 9)
+#define GRBM_EE_BUSY (1 << 10)
+#define SX_CLEAN (1 << 11)
+#define DB_CLEAN (1 << 12)
+#define CB_CLEAN (1 << 13)
+#define TA_BUSY (1 << 14)
+#define GDS_BUSY (1 << 15)
+#define VGT_BUSY_NO_DMA (1 << 16)
+#define VGT_BUSY (1 << 17)
+#define IA_BUSY_NO_DMA (1 << 18)
+#define IA_BUSY (1 << 19)
+#define SX_BUSY (1 << 20)
+#define SH_BUSY (1 << 21)
+#define SPI_BUSY (1 << 22)
+#define SC_BUSY (1 << 24)
+#define PA_BUSY (1 << 25)
+#define DB_BUSY (1 << 26)
+#define CP_COHERENCY_BUSY (1 << 28)
+#define CP_BUSY (1 << 29)
+#define CB_BUSY (1 << 30)
+#define GUI_ACTIVE (1 << 31)
+#define GRBM_STATUS_SE0 0x8014
+#define GRBM_STATUS_SE1 0x8018
+#define SE_SX_CLEAN (1 << 0)
+#define SE_DB_CLEAN (1 << 1)
+#define SE_CB_CLEAN (1 << 2)
+#define SE_VGT_BUSY (1 << 23)
+#define SE_PA_BUSY (1 << 24)
+#define SE_TA_BUSY (1 << 25)
+#define SE_SX_BUSY (1 << 26)
+#define SE_SPI_BUSY (1 << 27)
+#define SE_SH_BUSY (1 << 28)
+#define SE_SC_BUSY (1 << 29)
+#define SE_DB_BUSY (1 << 30)
+#define SE_CB_BUSY (1 << 31)
+#define GRBM_SOFT_RESET 0x8020
+#define SOFT_RESET_CP (1 << 0)
+#define SOFT_RESET_CB (1 << 1)
+#define SOFT_RESET_DB (1 << 3)
+#define SOFT_RESET_GDS (1 << 4)
+#define SOFT_RESET_PA (1 << 5)
+#define SOFT_RESET_SC (1 << 6)
+#define SOFT_RESET_SPI (1 << 8)
+#define SOFT_RESET_SH (1 << 9)
+#define SOFT_RESET_SX (1 << 10)
+#define SOFT_RESET_TC (1 << 11)
+#define SOFT_RESET_TA (1 << 12)
+#define SOFT_RESET_VGT (1 << 14)
+#define SOFT_RESET_IA (1 << 15)
+
+#define GRBM_GFX_INDEX 0x802C
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SE_INDEX(x) ((x) << 16)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
+
+#define SCRATCH_REG0 0x8500
+#define SCRATCH_REG1 0x8504
+#define SCRATCH_REG2 0x8508
+#define SCRATCH_REG3 0x850C
+#define SCRATCH_REG4 0x8510
+#define SCRATCH_REG5 0x8514
+#define SCRATCH_REG6 0x8518
+#define SCRATCH_REG7 0x851C
+#define SCRATCH_UMSK 0x8540
+#define SCRATCH_ADDR 0x8544
+#define CP_SEM_WAIT_TIMER 0x85BC
+#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
+#define CP_COHER_CNTL2 0x85E8
+#define CP_STALLED_STAT1 0x8674
+#define CP_STALLED_STAT2 0x8678
+#define CP_BUSY_STAT 0x867C
+#define CP_STAT 0x8680
+#define CP_ME_CNTL 0x86D8
+#define CP_ME_HALT (1 << 28)
+#define CP_PFP_HALT (1 << 26)
+#define CP_RB2_RPTR 0x86f8
+#define CP_RB1_RPTR 0x86fc
+#define CP_RB0_RPTR 0x8700
+#define CP_RB_WPTR_DELAY 0x8704
+#define CP_MEQ_THRESHOLDS 0x8764
+#define MEQ1_START(x) ((x) << 0)
+#define MEQ2_START(x) ((x) << 8)
+#define CP_PERFMON_CNTL 0x87FC
+
+#define VGT_CACHE_INVALIDATION 0x88C4
+#define CACHE_INVALIDATION(x) ((x) << 0)
+#define VC_ONLY 0
+#define TC_ONLY 1
+#define VC_AND_TC 2
+#define AUTO_INVLD_EN(x) ((x) << 6)
+#define NO_AUTO 0
+#define ES_AUTO 1
+#define GS_AUTO 2
+#define ES_AND_GS_AUTO 3
+#define VGT_GS_VERTEX_REUSE 0x88D4
+
+#define CC_GC_SHADER_PIPE_CONFIG 0x8950
+#define GC_USER_SHADER_PIPE_CONFIG 0x8954
+#define INACTIVE_QD_PIPES(x) ((x) << 8)
+#define INACTIVE_QD_PIPES_MASK 0x0000FF00
+#define INACTIVE_QD_PIPES_SHIFT 8
+#define INACTIVE_SIMDS(x) ((x) << 16)
+#define INACTIVE_SIMDS_MASK 0xFFFF0000
+#define INACTIVE_SIMDS_SHIFT 16
+
+#define VGT_PRIMITIVE_TYPE 0x8958
+#define VGT_NUM_INSTANCES 0x8974
+#define VGT_TF_RING_SIZE 0x8988
+#define VGT_OFFCHIP_LDS_BASE 0x89b4
+
+#define PA_SC_LINE_STIPPLE_STATE 0x8B10
+#define PA_CL_ENHANCE 0x8A14
+#define CLIP_VTX_REORDER_ENA (1 << 0)
+#define NUM_CLIP_SEQ(x) ((x) << 1)
+#define PA_SC_FIFO_SIZE 0x8BCC
+#define SC_PRIM_FIFO_SIZE(x) ((x) << 0)
+#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
+#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
+#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
+#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
+#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
+
+#define SQ_CONFIG 0x8C00
+#define VC_ENABLE (1 << 0)
+#define EXPORT_SRC_C (1 << 1)
+#define GFX_PRIO(x) ((x) << 2)
+#define CS1_PRIO(x) ((x) << 4)
+#define CS2_PRIO(x) ((x) << 6)
+#define SQ_GPR_RESOURCE_MGMT_1 0x8C04
+#define NUM_PS_GPRS(x) ((x) << 0)
+#define NUM_VS_GPRS(x) ((x) << 16)
+#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
+#define SQ_ESGS_RING_SIZE 0x8c44
+#define SQ_GSVS_RING_SIZE 0x8c4c
+#define SQ_ESTMP_RING_BASE 0x8c50
+#define SQ_ESTMP_RING_SIZE 0x8c54
+#define SQ_GSTMP_RING_BASE 0x8c58
+#define SQ_GSTMP_RING_SIZE 0x8c5c
+#define SQ_VSTMP_RING_BASE 0x8c60
+#define SQ_VSTMP_RING_SIZE 0x8c64
+#define SQ_PSTMP_RING_BASE 0x8c68
+#define SQ_PSTMP_RING_SIZE 0x8c6c
+#define SQ_MS_FIFO_SIZES 0x8CF0
+#define CACHE_FIFO_SIZE(x) ((x) << 0)
+#define FETCH_FIFO_HIWATER(x) ((x) << 8)
+#define DONE_FIFO_HIWATER(x) ((x) << 16)
+#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
+#define SQ_LSTMP_RING_BASE 0x8e10
+#define SQ_LSTMP_RING_SIZE 0x8e14
+#define SQ_HSTMP_RING_BASE 0x8e18
+#define SQ_HSTMP_RING_SIZE 0x8e1c
+#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
+#define DYN_GPR_ENABLE (1 << 8)
+#define SQ_CONST_MEM_BASE 0x8df8
+
+#define SX_EXPORT_BUFFER_SIZES 0x900C
+#define COLOR_BUFFER_SIZE(x) ((x) << 0)
+#define POSITION_BUFFER_SIZE(x) ((x) << 8)
+#define SMX_BUFFER_SIZE(x) ((x) << 16)
+#define SX_DEBUG_1 0x9058
+#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
+
+#define SPI_CONFIG_CNTL 0x9100
+#define GPR_WRITE_PRIORITY(x) ((x) << 0)
+#define SPI_CONFIG_CNTL_1 0x913C
+#define VTX_DONE_DELAY(x) ((x) << 0)
+#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
+#define CRC_SIMD_ID_WADDR_DISABLE (1 << 8)
+
+#define CGTS_TCC_DISABLE 0x9148
+#define CGTS_USER_TCC_DISABLE 0x914C
+#define TCC_DISABLE_MASK 0xFFFF0000
+#define TCC_DISABLE_SHIFT 16
+#define CGTS_SM_CTRL_REG 0x9150
+#define OVERRIDE (1 << 21)
+
+#define TA_CNTL_AUX 0x9508
+#define DISABLE_CUBE_WRAP (1 << 0)
+#define DISABLE_CUBE_ANISO (1 << 1)
+
+#define TCP_CHAN_STEER_LO 0x960c
+#define TCP_CHAN_STEER_HI 0x9610
+
+#define CC_RB_BACKEND_DISABLE 0x98F4
+#define BACKEND_DISABLE(x) ((x) << 16)
+#define GB_ADDR_CONFIG 0x98F8
+#define NUM_PIPES(x) ((x) << 0)
+#define NUM_PIPES_MASK 0x00000007
+#define NUM_PIPES_SHIFT 0
+#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
+#define PIPE_INTERLEAVE_SIZE_MASK 0x00000070
+#define PIPE_INTERLEAVE_SIZE_SHIFT 4
+#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
+#define NUM_SHADER_ENGINES(x) ((x) << 12)
+#define NUM_SHADER_ENGINES_MASK 0x00003000
+#define NUM_SHADER_ENGINES_SHIFT 12
+#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
+#define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000
+#define SHADER_ENGINE_TILE_SIZE_SHIFT 16
+#define NUM_GPUS(x) ((x) << 20)
+#define NUM_GPUS_MASK 0x00700000
+#define NUM_GPUS_SHIFT 20
+#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
+#define MULTI_GPU_TILE_SIZE_MASK 0x03000000
+#define MULTI_GPU_TILE_SIZE_SHIFT 24
+#define ROW_SIZE(x) ((x) << 28)
+#define ROW_SIZE_MASK 0x30000000
+#define ROW_SIZE_SHIFT 28
+#define NUM_LOWER_PIPES(x) ((x) << 30)
+#define NUM_LOWER_PIPES_MASK 0x40000000
+#define NUM_LOWER_PIPES_SHIFT 30
+#define GB_BACKEND_MAP 0x98FC
+
+#define CB_PERF_CTR0_SEL_0 0x9A20
+#define CB_PERF_CTR0_SEL_1 0x9A24
+#define CB_PERF_CTR1_SEL_0 0x9A28
+#define CB_PERF_CTR1_SEL_1 0x9A2C
+#define CB_PERF_CTR2_SEL_0 0x9A30
+#define CB_PERF_CTR2_SEL_1 0x9A34
+#define CB_PERF_CTR3_SEL_0 0x9A38
+#define CB_PERF_CTR3_SEL_1 0x9A3C
+
+#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
+#define BACKEND_DISABLE_MASK 0x00FF0000
+#define BACKEND_DISABLE_SHIFT 16
+
+#define SMX_DC_CTL0 0xA020
+#define USE_HASH_FUNCTION (1 << 0)
+#define NUMBER_OF_SETS(x) ((x) << 1)
+#define FLUSH_ALL_ON_EVENT (1 << 10)
+#define STALL_ON_EVENT (1 << 11)
+#define SMX_EVENT_CTL 0xA02C
+#define ES_FLUSH_CTL(x) ((x) << 0)
+#define GS_FLUSH_CTL(x) ((x) << 3)
+#define ACK_FLUSH_CTL(x) ((x) << 6)
+#define SYNC_FLUSH_CTL (1 << 8)
+
+#define CP_RB0_BASE 0xC100
+#define CP_RB0_CNTL 0xC104
+#define RB_BUFSZ(x) ((x) << 0)
+#define RB_BLKSZ(x) ((x) << 8)
+#define RB_NO_UPDATE (1 << 27)
+#define RB_RPTR_WR_ENA (1 << 31)
+#define BUF_SWAP_32BIT (2 << 16)
+#define CP_RB0_RPTR_ADDR 0xC10C
+#define CP_RB0_RPTR_ADDR_HI 0xC110
+#define CP_RB0_WPTR 0xC114
+
+#define CP_INT_CNTL 0xC124
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+
+#define CP_RB1_BASE 0xC180
+#define CP_RB1_CNTL 0xC184
+#define CP_RB1_RPTR_ADDR 0xC188
+#define CP_RB1_RPTR_ADDR_HI 0xC18C
+#define CP_RB1_WPTR 0xC190
+#define CP_RB2_BASE 0xC194
+#define CP_RB2_CNTL 0xC198
+#define CP_RB2_RPTR_ADDR 0xC19C
+#define CP_RB2_RPTR_ADDR_HI 0xC1A0
+#define CP_RB2_WPTR 0xC1A4
+#define CP_PFP_UCODE_ADDR 0xC150
+#define CP_PFP_UCODE_DATA 0xC154
+#define CP_ME_RAM_RADDR 0xC158
+#define CP_ME_RAM_WADDR 0xC15C
+#define CP_ME_RAM_DATA 0xC160
+#define CP_DEBUG 0xC1FC
+
+#define VGT_EVENT_INITIATOR 0x28a90
+# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
+# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
+
+/*
+ * UVD
+ */
+#define UVD_SEMA_ADDR_LOW 0xEF00
+#define UVD_SEMA_ADDR_HIGH 0xEF04
+#define UVD_SEMA_CMD 0xEF08
+#define UVD_UDEC_ADDR_CONFIG 0xEF4C
+#define UVD_UDEC_DB_ADDR_CONFIG 0xEF50
+#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
+#define UVD_RBC_RB_RPTR 0xF690
+#define UVD_RBC_RB_WPTR 0xF694
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
+ (((reg) >> 2) & 0xFFFF) | \
+ ((n) & 0x3FFF) << 16)
+#define CP_PACKET2 0x80000000
+#define PACKET2_PAD_SHIFT 0
+#define PACKET2_PAD_MASK (0x3fffffff << 0)
+
+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define PACKET3_NOP 0x10
+#define PACKET3_SET_BASE 0x11
+#define PACKET3_CLEAR_STATE 0x12
+#define PACKET3_INDEX_BUFFER_SIZE 0x13
+#define PACKET3_DEALLOC_STATE 0x14
+#define PACKET3_DISPATCH_DIRECT 0x15
+#define PACKET3_DISPATCH_INDIRECT 0x16
+#define PACKET3_INDIRECT_BUFFER_END 0x17
+#define PACKET3_MODE_CONTROL 0x18
+#define PACKET3_SET_PREDICATION 0x20
+#define PACKET3_REG_RMW 0x21
+#define PACKET3_COND_EXEC 0x22
+#define PACKET3_PRED_EXEC 0x23
+#define PACKET3_DRAW_INDIRECT 0x24
+#define PACKET3_DRAW_INDEX_INDIRECT 0x25
+#define PACKET3_INDEX_BASE 0x26
+#define PACKET3_DRAW_INDEX_2 0x27
+#define PACKET3_CONTEXT_CONTROL 0x28
+#define PACKET3_DRAW_INDEX_OFFSET 0x29
+#define PACKET3_INDEX_TYPE 0x2A
+#define PACKET3_DRAW_INDEX 0x2B
+#define PACKET3_DRAW_INDEX_AUTO 0x2D
+#define PACKET3_DRAW_INDEX_IMMD 0x2E
+#define PACKET3_NUM_INSTANCES 0x2F
+#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
+#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
+#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
+#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
+#define PACKET3_WRITE_DATA 0x37
+#define PACKET3_MEM_SEMAPHORE 0x39
+#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_WAIT_REG_MEM 0x3C
+#define PACKET3_MEM_WRITE 0x3D
+#define PACKET3_PFP_SYNC_ME 0x42
+#define PACKET3_SURFACE_SYNC 0x43
+# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
+# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
+# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
+# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
+# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
+# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
+# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
+# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
+# define PACKET3_CB8_DEST_BASE_ENA (1 << 15)
+# define PACKET3_CB9_DEST_BASE_ENA (1 << 16)
+# define PACKET3_CB10_DEST_BASE_ENA (1 << 17)
+# define PACKET3_CB11_DEST_BASE_ENA (1 << 18)
+# define PACKET3_FULL_CACHE_ENA (1 << 20)
+# define PACKET3_TC_ACTION_ENA (1 << 23)
+# define PACKET3_CB_ACTION_ENA (1 << 25)
+# define PACKET3_DB_ACTION_ENA (1 << 26)
+# define PACKET3_SH_ACTION_ENA (1 << 27)
+# define PACKET3_SX_ACTION_ENA (1 << 28)
+#define PACKET3_ME_INITIALIZE 0x44
+#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define PACKET3_COND_WRITE 0x45
+#define PACKET3_EVENT_WRITE 0x46
+#define EVENT_TYPE(x) ((x) << 0)
+#define EVENT_INDEX(x) ((x) << 8)
+ /* 0 - any non-TS event
+ * 1 - ZPASS_DONE
+ * 2 - SAMPLE_PIPELINESTAT
+ * 3 - SAMPLE_STREAMOUTSTAT*
+ * 4 - *S_PARTIAL_FLUSH
+ * 5 - TS events
+ */
+#define PACKET3_EVENT_WRITE_EOP 0x47
+#define DATA_SEL(x) ((x) << 29)
+ /* 0 - discard
+ * 1 - send low 32bit data
+ * 2 - send 64bit data
+ * 3 - send 64bit counter value
+ */
+#define INT_SEL(x) ((x) << 24)
+ /* 0 - none
+ * 1 - interrupt only (DATA_SEL = 0)
+ * 2 - interrupt when data write is confirmed
+ */
+#define PACKET3_EVENT_WRITE_EOS 0x48
+#define PACKET3_PREAMBLE_CNTL 0x4A
+# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
+# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
+#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C
+#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D
+#define PACKET3_ALU_PS_CONST_UPDATE 0x4E
+#define PACKET3_ALU_VS_CONST_UPDATE 0x4F
+#define PACKET3_ONE_REG_WRITE 0x57
+#define PACKET3_SET_CONFIG_REG 0x68
+#define PACKET3_SET_CONFIG_REG_START 0x00008000
+#define PACKET3_SET_CONFIG_REG_END 0x0000ac00
+#define PACKET3_SET_CONTEXT_REG 0x69
+#define PACKET3_SET_CONTEXT_REG_START 0x00028000
+#define PACKET3_SET_CONTEXT_REG_END 0x00029000
+#define PACKET3_SET_ALU_CONST 0x6A
+/* alu const buffers only; no reg file */
+#define PACKET3_SET_BOOL_CONST 0x6B
+#define PACKET3_SET_BOOL_CONST_START 0x0003a500
+#define PACKET3_SET_BOOL_CONST_END 0x0003a518
+#define PACKET3_SET_LOOP_CONST 0x6C
+#define PACKET3_SET_LOOP_CONST_START 0x0003a200
+#define PACKET3_SET_LOOP_CONST_END 0x0003a500
+#define PACKET3_SET_RESOURCE 0x6D
+#define PACKET3_SET_RESOURCE_START 0x00030000
+#define PACKET3_SET_RESOURCE_END 0x00038000
+#define PACKET3_SET_SAMPLER 0x6E
+#define PACKET3_SET_SAMPLER_START 0x0003c000
+#define PACKET3_SET_SAMPLER_END 0x0003c600
+#define PACKET3_SET_CTL_CONST 0x6F
+#define PACKET3_SET_CTL_CONST_START 0x0003cff0
+#define PACKET3_SET_CTL_CONST_END 0x0003ff0c
+#define PACKET3_SET_RESOURCE_OFFSET 0x70
+#define PACKET3_SET_ALU_CONST_VS 0x71
+#define PACKET3_SET_ALU_CONST_DI 0x72
+#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
+#define PACKET3_SET_RESOURCE_INDIRECT 0x74
+#define PACKET3_SET_APPEND_CNT 0x75
+#define PACKET3_ME_WRITE 0x7A
+
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET 0x800 /* not a register */
+
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_RB_BASE 0xd004
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI 0xd01c
+#define DMA_RB_RPTR_ADDR_LO 0xd020
+
+#define DMA_IB_CNTL 0xd024
+# define DMA_IB_ENABLE (1 << 0)
+# define DMA_IB_SWAP_ENABLE (1 << 4)
+# define CMD_VMID_FORCE (1 << 31)
+#define DMA_IB_RPTR 0xd028
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048
+#define DMA_TILING_CONFIG 0xd0b8
+#define DMA_MODE 0xd0bc
+
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \
+ (((vmid) & 0xF) << 20) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \
+ (1 << 26) | \
+ (1 << 21) | \
+ (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
new file mode 100644
index 0000000..46470dd
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -0,0 +1,4113 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r100d.h"
+#include "rs100d.h"
+#include "rv200d.h"
+#include "rv250d.h"
+#include "atom.h"
+
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+
+#include "r100_reg_safe.h"
+#include "rn50_reg_safe.h"
+
+/* Firmware Names */
+#define FIRMWARE_R100 "radeon/R100_cp.bin"
+#define FIRMWARE_R200 "radeon/R200_cp.bin"
+#define FIRMWARE_R300 "radeon/R300_cp.bin"
+#define FIRMWARE_R420 "radeon/R420_cp.bin"
+#define FIRMWARE_RS690 "radeon/RS690_cp.bin"
+#define FIRMWARE_RS600 "radeon/RS600_cp.bin"
+#define FIRMWARE_R520 "radeon/R520_cp.bin"
+
+MODULE_FIRMWARE(FIRMWARE_R100);
+MODULE_FIRMWARE(FIRMWARE_R200);
+MODULE_FIRMWARE(FIRMWARE_R300);
+MODULE_FIRMWARE(FIRMWARE_R420);
+MODULE_FIRMWARE(FIRMWARE_RS690);
+MODULE_FIRMWARE(FIRMWARE_RS600);
+MODULE_FIRMWARE(FIRMWARE_R520);
+
+#include "r100_track.h"
+
+/* This files gather functions specifics to:
+ * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
+ * and others in some cases.
+ */
+
+static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+ if (crtc == 0) {
+ if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
+ return true;
+ else
+ return false;
+ } else {
+ if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
+ return true;
+ else
+ return false;
+ }
+}
+
+static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+ u32 vline1, vline2;
+
+ if (crtc == 0) {
+ vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ } else {
+ vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ }
+ if (vline1 != vline2)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * r100_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (r1xx-r4xx).
+ */
+void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
+{
+ unsigned i = 0;
+
+ if (crtc >= rdev->num_crtc)
+ return;
+
+ if (crtc == 0) {
+ if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
+ return;
+ } else {
+ if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
+ return;
+ }
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (r100_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!r100_is_counter_moving(rdev, crtc))
+ break;
+ }
+ }
+
+ while (!r100_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!r100_is_counter_moving(rdev, crtc))
+ break;
+ }
+ }
+}
+
+/**
+ * r100_pre_page_flip - pre-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to prepare for pageflip on
+ *
+ * Pre-pageflip callback (r1xx-r4xx).
+ * Enables the pageflip irq (vblank irq).
+ */
+void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* enable the pflip int */
+ radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+/**
+ * r100_post_page_flip - pos-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to cleanup pageflip on
+ *
+ * Post-pageflip callback (r1xx-r4xx).
+ * Disables the pageflip irq (vblank irq).
+ */
+void r100_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* disable the pflip int */
+ radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+/**
+ * r100_page_flip - pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (r1xx-r4xx).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
+u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+ int i;
+
+ /* Lock the graphics update lock */
+ /* update the scanout addresses */
+ WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+ /* Wait for update_pending to go high. */
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
+ break;
+ udelay(1);
+ }
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
+ WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+ /* Return current update_pending status: */
+ return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
+}
+
+/**
+ * r100_pm_get_dynpm_state - look up dynpm power state callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Look up the optimal power state based on the
+ * current state of the GPU (r1xx-r5xx).
+ * Used for dynpm only.
+ */
+void r100_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+ int i;
+ rdev->pm.dynpm_can_upclock = true;
+ rdev->pm.dynpm_can_downclock = true;
+
+ switch (rdev->pm.dynpm_planned_action) {
+ case DYNPM_ACTION_MINIMUM:
+ rdev->pm.requested_power_state_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ break;
+ case DYNPM_ACTION_DOWNCLOCK:
+ if (rdev->pm.current_power_state_index == 0) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_downclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i >= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index - 1;
+ }
+ /* don't use the power state if crtcs are active and no display flag is set */
+ if ((rdev->pm.active_crtc_count > 0) &&
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
+ RADEON_PM_MODE_NO_DISPLAY)) {
+ rdev->pm.requested_power_state_index++;
+ }
+ break;
+ case DYNPM_ACTION_UPCLOCK:
+ if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_upclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i <= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index + 1;
+ }
+ break;
+ case DYNPM_ACTION_DEFAULT:
+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.dynpm_can_upclock = false;
+ break;
+ case DYNPM_ACTION_NONE:
+ default:
+ DRM_ERROR("Requested mode for not defined action\n");
+ return;
+ }
+ /* only one clock mode per power state */
+ rdev->pm.requested_clock_mode_index = 0;
+
+ DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].sclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].mclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ pcie_lanes);
+}
+
+/**
+ * r100_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (r1xx-r3xx).
+ * Used for profile mode only.
+ */
+void r100_pm_init_profile(struct radeon_device *rdev)
+{
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
+/**
+ * r100_pm_misc - set additional pm hw parameters callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set non-clock parameters associated with a power state
+ * (voltage, pcie lanes, etc.) (r1xx-r4xx).
+ */
+void r100_pm_misc(struct radeon_device *rdev)
+{
+ int requested_index = rdev->pm.requested_power_state_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+ struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+ u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
+
+ if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+ if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp |= voltage->gpio.mask;
+ else
+ tmp &= ~(voltage->gpio.mask);
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ } else {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp &= ~voltage->gpio.mask;
+ else
+ tmp |= voltage->gpio.mask;
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ }
+ }
+
+ sclk_cntl = RREG32_PLL(SCLK_CNTL);
+ sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
+ sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
+ sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
+ sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+ sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
+ if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
+ sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
+ else
+ sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
+ sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
+ else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
+ sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
+ } else
+ sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
+
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+ sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
+ if (voltage->delay) {
+ sclk_more_cntl |= VOLTAGE_DROP_SYNC;
+ switch (voltage->delay) {
+ case 33:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
+ break;
+ case 66:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
+ break;
+ case 99:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
+ break;
+ case 132:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
+ break;
+ }
+ } else
+ sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
+ } else
+ sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
+
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+ sclk_cntl &= ~FORCE_HDP;
+ else
+ sclk_cntl |= FORCE_HDP;
+
+ WREG32_PLL(SCLK_CNTL, sclk_cntl);
+ WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
+ WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
+
+ /* set pcie lanes */
+ if ((rdev->flags & RADEON_IS_PCIE) &&
+ !(rdev->flags & RADEON_IS_IGP) &&
+ rdev->asic->pm.set_pcie_lanes &&
+ (ps->pcie_lanes !=
+ rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+ radeon_set_pcie_lanes(rdev,
+ ps->pcie_lanes);
+ DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
+ }
+}
+
+/**
+ * r100_pm_prepare - pre-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Prepare for a power state change (r1xx-r4xx).
+ */
+void r100_pm_prepare(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* disable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ if (radeon_crtc->crtc_id) {
+ tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+ tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+ } else {
+ tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+ tmp |= RADEON_CRTC_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+ }
+ }
+ }
+}
+
+/**
+ * r100_pm_finish - post-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clean up after a power state change (r1xx-r4xx).
+ */
+void r100_pm_finish(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* enable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ if (radeon_crtc->crtc_id) {
+ tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+ tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+ } else {
+ tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+ tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+ }
+ }
+ }
+}
+
+/**
+ * r100_gui_idle - gui idle callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx).
+ * Returns true if idle, false if not.
+ */
+bool r100_gui_idle(struct radeon_device *rdev)
+{
+ if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
+ return false;
+ else
+ return true;
+}
+
+/* hpd for digital panel detect/disconnect */
+/**
+ * r100_hpd_sense - hpd sense callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (r1xx-r4xx).
+ * Returns true if connected, false if not connected.
+ */
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+ bool connected = false;
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+ return connected;
+}
+
+/**
+ * r100_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (r1xx-r4xx).
+ */
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = r100_hpd_sense(rdev, hpd);
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(RADEON_FP_GEN_CNTL);
+ if (connected)
+ tmp &= ~RADEON_FP_DETECT_INT_POL;
+ else
+ tmp |= RADEON_FP_DETECT_INT_POL;
+ WREG32(RADEON_FP_GEN_CNTL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(RADEON_FP2_GEN_CNTL);
+ if (connected)
+ tmp &= ~RADEON_FP2_DETECT_INT_POL;
+ else
+ tmp |= RADEON_FP2_DETECT_INT_POL;
+ WREG32(RADEON_FP2_GEN_CNTL, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * r100_hpd_init - hpd setup callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup the hpd pins used by the card (r1xx-r4xx).
+ * Set the polarity, and enable the hpd interrupts.
+ */
+void r100_hpd_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+ unsigned enable = 0;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ enable |= 1 << radeon_connector->hpd.hpd;
+ radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+ }
+ radeon_irq_kms_enable_hpd(rdev, enable);
+}
+
+/**
+ * r100_hpd_fini - hpd tear down callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the hpd pins used by the card (r1xx-r4xx).
+ * Disable the hpd interrupts.
+ */
+void r100_hpd_fini(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+ unsigned disable = 0;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ disable |= 1 << radeon_connector->hpd.hpd;
+ }
+ radeon_irq_kms_disable_hpd(rdev, disable);
+}
+
+/*
+ * PCI GART
+ */
+void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
+{
+ /* TODO: can we do somethings here ? */
+ /* It seems hw only cache one entry so we should discard this
+ * entry otherwise if first GPU GART read hit this entry it
+ * could end up in wrong address. */
+}
+
+int r100_pci_gart_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->gart.ptr) {
+ WARN(1, "R100 PCI GART already initialized\n");
+ return 0;
+ }
+ /* Initialize common gart structure */
+ r = radeon_gart_init(rdev);
+ if (r)
+ return r;
+ rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+ rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart.set_page = &r100_pci_gart_set_page;
+ return radeon_gart_table_ram_alloc(rdev);
+}
+
+int r100_pci_gart_enable(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ radeon_gart_restore(rdev);
+ /* discard memory request outside of configured range */
+ tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
+ WREG32(RADEON_AIC_CNTL, tmp);
+ /* set address range for PCI address translate */
+ WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
+ WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
+ /* set PCI GART page-table base address */
+ WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
+ tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
+ WREG32(RADEON_AIC_CNTL, tmp);
+ r100_pci_gart_tlb_flush(rdev);
+ DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)rdev->gart.table_addr);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+void r100_pci_gart_disable(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ /* discard memory request outside of configured range */
+ tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
+ WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
+ WREG32(RADEON_AIC_LO_ADDR, 0);
+ WREG32(RADEON_AIC_HI_ADDR, 0);
+}
+
+int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+ u32 *gtt = rdev->gart.ptr;
+
+ if (i < 0 || i > rdev->gart.num_gpu_pages) {
+ return -EINVAL;
+ }
+ gtt[i] = cpu_to_le32(lower_32_bits(addr));
+ return 0;
+}
+
+void r100_pci_gart_fini(struct radeon_device *rdev)
+{
+ radeon_gart_fini(rdev);
+ r100_pci_gart_disable(rdev);
+ radeon_gart_table_ram_free(rdev);
+}
+
+int r100_irq_set(struct radeon_device *rdev)
+{
+ uint32_t tmp = 0;
+
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+ WREG32(R_000040_GEN_INT_CNTL, 0);
+ return -EINVAL;
+ }
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+ tmp |= RADEON_SW_INT_ENABLE;
+ }
+ if (rdev->irq.crtc_vblank_int[0] ||
+ atomic_read(&rdev->irq.pflip[0])) {
+ tmp |= RADEON_CRTC_VBLANK_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[1] ||
+ atomic_read(&rdev->irq.pflip[1])) {
+ tmp |= RADEON_CRTC2_VBLANK_MASK;
+ }
+ if (rdev->irq.hpd[0]) {
+ tmp |= RADEON_FP_DETECT_MASK;
+ }
+ if (rdev->irq.hpd[1]) {
+ tmp |= RADEON_FP2_DETECT_MASK;
+ }
+ WREG32(RADEON_GEN_INT_CNTL, tmp);
+ return 0;
+}
+
+void r100_irq_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ WREG32(R_000040_GEN_INT_CNTL, 0);
+ /* Wait and acknowledge irq */
+ mdelay(1);
+ tmp = RREG32(R_000044_GEN_INT_STATUS);
+ WREG32(R_000044_GEN_INT_STATUS, tmp);
+}
+
+static uint32_t r100_irq_ack(struct radeon_device *rdev)
+{
+ uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
+ uint32_t irq_mask = RADEON_SW_INT_TEST |
+ RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
+ RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
+
+ if (irqs) {
+ WREG32(RADEON_GEN_INT_STATUS, irqs);
+ }
+ return irqs & irq_mask;
+}
+
+int r100_irq_process(struct radeon_device *rdev)
+{
+ uint32_t status, msi_rearm;
+ bool queue_hotplug = false;
+
+ status = r100_irq_ack(rdev);
+ if (!status) {
+ return IRQ_NONE;
+ }
+ if (rdev->shutdown) {
+ return IRQ_NONE;
+ }
+ while (status) {
+ /* SW interrupt */
+ if (status & RADEON_SW_INT_TEST) {
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ }
+ /* Vertical blank interrupts */
+ if (status & RADEON_CRTC_VBLANK_STAT) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_flip(rdev, 0);
+ }
+ if (status & RADEON_CRTC2_VBLANK_STAT) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_flip(rdev, 1);
+ }
+ if (status & RADEON_FP_DETECT_STAT) {
+ queue_hotplug = true;
+ DRM_DEBUG("HPD1\n");
+ }
+ if (status & RADEON_FP2_DETECT_STAT) {
+ queue_hotplug = true;
+ DRM_DEBUG("HPD2\n");
+ }
+ status = r100_irq_ack(rdev);
+ }
+ if (queue_hotplug)
+ schedule_work(&rdev->hotplug_work);
+ if (rdev->msi_enabled) {
+ switch (rdev->family) {
+ case CHIP_RS400:
+ case CHIP_RS480:
+ msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
+ WREG32(RADEON_AIC_CNTL, msi_rearm);
+ WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
+ break;
+ default:
+ WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
+ break;
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+ if (crtc == 0)
+ return RREG32(RADEON_CRTC_CRNT_FRAME);
+ else
+ return RREG32(RADEON_CRTC2_CRNT_FRAME);
+}
+
+/* Who ever call radeon_fence_emit should call ring_lock and ask
+ * for enough space (today caller are ib schedule and buffer move) */
+void r100_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+
+ /* We have to make sure that caches are flushed before
+ * CPU might read something from VRAM. */
+ radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
+ radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
+ /* Wait until IDLE & CLEAN */
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
+ RADEON_HDP_READ_BUFFER_INVALIDATE);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+ /* Emit fence sequence & fire IRQ */
+ radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+ radeon_ring_write(ring, RADEON_SW_INT_FIRE);
+}
+
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ /* Unused on older asics, since we don't have semaphores or multiple rings */
+ BUG();
+}
+
+int r100_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ uint32_t cur_pages;
+ uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
+ uint32_t pitch;
+ uint32_t stride_pixels;
+ unsigned ndw;
+ int num_loops;
+ int r = 0;
+
+ /* radeon limited to 16k stride */
+ stride_bytes &= 0x3fff;
+ /* radeon pitch is /64 */
+ pitch = stride_bytes / 64;
+ stride_pixels = stride_bytes / 4;
+ num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
+
+ /* Ask for enough room for blit + flush + fence */
+ ndw = 64 + (10 * num_loops);
+ r = radeon_ring_lock(rdev, ring, ndw);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
+ return -EINVAL;
+ }
+ while (num_gpu_pages > 0) {
+ cur_pages = num_gpu_pages;
+ if (cur_pages > 8191) {
+ cur_pages = 8191;
+ }
+ num_gpu_pages -= cur_pages;
+
+ /* pages are in Y direction - height
+ page width in X direction - width */
+ radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
+ radeon_ring_write(ring,
+ RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+ RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_SRC_CLIPPING |
+ RADEON_GMC_DST_CLIPPING |
+ RADEON_GMC_BRUSH_NONE |
+ (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_S |
+ RADEON_DP_SRC_SOURCE_MEMORY |
+ RADEON_GMC_CLR_CMP_CNTL_DIS |
+ RADEON_GMC_WR_MSK_DIS);
+ radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
+ radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
+ radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+ radeon_ring_write(ring, num_gpu_pages);
+ radeon_ring_write(ring, num_gpu_pages);
+ radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
+ }
+ radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring,
+ RADEON_WAIT_2D_IDLECLEAN |
+ RADEON_WAIT_HOST_IDLECLEAN |
+ RADEON_WAIT_DMA_GUI_IDLE);
+ if (fence) {
+ r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
+ }
+ radeon_ring_unlock_commit(rdev, ring);
+ return r;
+}
+
+static int r100_cp_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ u32 tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
+ return 0;
+ }
+ udelay(1);
+ }
+ return -1;
+}
+
+void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ int r;
+
+ r = radeon_ring_lock(rdev, ring, 2);
+ if (r) {
+ return;
+ }
+ radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+ radeon_ring_write(ring,
+ RADEON_ISYNC_ANY2D_IDLE3D |
+ RADEON_ISYNC_ANY3D_IDLE2D |
+ RADEON_ISYNC_WAIT_IDLEGUI |
+ RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+ radeon_ring_unlock_commit(rdev, ring);
+}
+
+
+/* Load the microcode for the CP */
+static int r100_cp_init_microcode(struct radeon_device *rdev)
+{
+ struct platform_device *pdev;
+ const char *fw_name = NULL;
+ int err;
+
+ DRM_DEBUG_KMS("\n");
+
+ pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
+ err = IS_ERR(pdev);
+ if (err) {
+ printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
+ return -EINVAL;
+ }
+ if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
+ (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
+ (rdev->family == CHIP_RS200)) {
+ DRM_INFO("Loading R100 Microcode\n");
+ fw_name = FIRMWARE_R100;
+ } else if ((rdev->family == CHIP_R200) ||
+ (rdev->family == CHIP_RV250) ||
+ (rdev->family == CHIP_RV280) ||
+ (rdev->family == CHIP_RS300)) {
+ DRM_INFO("Loading R200 Microcode\n");
+ fw_name = FIRMWARE_R200;
+ } else if ((rdev->family == CHIP_R300) ||
+ (rdev->family == CHIP_R350) ||
+ (rdev->family == CHIP_RV350) ||
+ (rdev->family == CHIP_RV380) ||
+ (rdev->family == CHIP_RS400) ||
+ (rdev->family == CHIP_RS480)) {
+ DRM_INFO("Loading R300 Microcode\n");
+ fw_name = FIRMWARE_R300;
+ } else if ((rdev->family == CHIP_R420) ||
+ (rdev->family == CHIP_R423) ||
+ (rdev->family == CHIP_RV410)) {
+ DRM_INFO("Loading R400 Microcode\n");
+ fw_name = FIRMWARE_R420;
+ } else if ((rdev->family == CHIP_RS690) ||
+ (rdev->family == CHIP_RS740)) {
+ DRM_INFO("Loading RS690/RS740 Microcode\n");
+ fw_name = FIRMWARE_RS690;
+ } else if (rdev->family == CHIP_RS600) {
+ DRM_INFO("Loading RS600 Microcode\n");
+ fw_name = FIRMWARE_RS600;
+ } else if ((rdev->family == CHIP_RV515) ||
+ (rdev->family == CHIP_R520) ||
+ (rdev->family == CHIP_RV530) ||
+ (rdev->family == CHIP_R580) ||
+ (rdev->family == CHIP_RV560) ||
+ (rdev->family == CHIP_RV570)) {
+ DRM_INFO("Loading R500 Microcode\n");
+ fw_name = FIRMWARE_R520;
+ }
+
+ err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+ platform_device_unregister(pdev);
+ if (err) {
+ printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
+ fw_name);
+ } else if (rdev->me_fw->size % 8) {
+ printk(KERN_ERR
+ "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->me_fw->size, fw_name);
+ err = -EINVAL;
+ release_firmware(rdev->me_fw);
+ rdev->me_fw = NULL;
+ }
+ return err;
+}
+
+static void r100_cp_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ int i, size;
+
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+
+ if (rdev->me_fw) {
+ size = rdev->me_fw->size / 4;
+ fw_data = (const __be32 *)&rdev->me_fw->data[0];
+ WREG32(RADEON_CP_ME_RAM_ADDR, 0);
+ for (i = 0; i < size; i += 2) {
+ WREG32(RADEON_CP_ME_RAM_DATAH,
+ be32_to_cpup(&fw_data[i]));
+ WREG32(RADEON_CP_ME_RAM_DATAL,
+ be32_to_cpup(&fw_data[i + 1]));
+ }
+ }
+}
+
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ unsigned rb_bufsz;
+ unsigned rb_blksz;
+ unsigned max_fetch;
+ unsigned pre_write_timer;
+ unsigned pre_write_limit;
+ unsigned indirect2_start;
+ unsigned indirect1_start;
+ uint32_t tmp;
+ int r;
+
+ if (r100_debugfs_cp_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for CP !\n");
+ }
+ if (!rdev->me_fw) {
+ r = r100_cp_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
+ /* Align ring size */
+ rb_bufsz = drm_order(ring_size / 8);
+ ring_size = (1 << (rb_bufsz + 1)) * 4;
+ r100_cp_load_microcode(rdev);
+ r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
+ 0, 0x7fffff, RADEON_CP_PACKET2);
+ if (r) {
+ return r;
+ }
+ /* Each time the cp read 1024 bytes (16 dword/quadword) update
+ * the rptr copy in system ram */
+ rb_blksz = 9;
+ /* cp will read 128bytes at a time (4 dwords) */
+ max_fetch = 1;
+ ring->align_mask = 16 - 1;
+ /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
+ pre_write_timer = 64;
+ /* Force CP_RB_WPTR write if written more than one time before the
+ * delay expire
+ */
+ pre_write_limit = 0;
+ /* Setup the cp cache like this (cache size is 96 dwords) :
+ * RING 0 to 15
+ * INDIRECT1 16 to 79
+ * INDIRECT2 80 to 95
+ * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
+ * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
+ * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
+ * Idea being that most of the gpu cmd will be through indirect1 buffer
+ * so it gets the bigger cache.
+ */
+ indirect2_start = 80;
+ indirect1_start = 16;
+ /* cp setup */
+ WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
+ tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
+ REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
+ REG_SET(RADEON_MAX_FETCH, max_fetch));
+#ifdef __BIG_ENDIAN
+ tmp |= RADEON_BUF_SWAP_32BIT;
+#endif
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
+
+ /* Set ring address */
+ DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
+ WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
+ /* Force read & write ptr to 0 */
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ ring->wptr = 0;
+ WREG32(RADEON_CP_RB_WPTR, ring->wptr);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(R_00070C_CP_RB_RPTR_ADDR,
+ S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
+ WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
+
+ if (rdev->wb.enabled)
+ WREG32(R_000770_SCRATCH_UMSK, 0xff);
+ else {
+ tmp |= RADEON_RB_NO_UPDATE;
+ WREG32(R_000770_SCRATCH_UMSK, 0);
+ }
+
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ udelay(10);
+ ring->rptr = RREG32(RADEON_CP_RB_RPTR);
+ /* Set cp mode to bus mastering & enable cp*/
+ WREG32(RADEON_CP_CSQ_MODE,
+ REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
+ REG_SET(RADEON_INDIRECT1_START, indirect1_start));
+ WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
+ WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
+ WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
+
+ /* at this point everything should be setup correctly to enable master */
+ pci_set_master(rdev->pdev);
+
+ radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+ if (r) {
+ DRM_ERROR("radeon: cp isn't working (%d).\n", r);
+ return r;
+ }
+ ring->ready = true;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ if (!ring->rptr_save_reg /* not resuming from suspend */
+ && radeon_ring_supports_scratch_reg(rdev, ring)) {
+ r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
+ if (r) {
+ DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
+ ring->rptr_save_reg = 0;
+ }
+ }
+ return 0;
+}
+
+void r100_cp_fini(struct radeon_device *rdev)
+{
+ if (r100_cp_wait_for_idle(rdev)) {
+ DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
+ }
+ /* Disable ring */
+ r100_cp_disable(rdev);
+ radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg);
+ radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ DRM_INFO("radeon: cp finalized\n");
+}
+
+void r100_cp_disable(struct radeon_device *rdev)
+{
+ /* Disable ring */
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ WREG32(RADEON_CP_CSQ_MODE, 0);
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ WREG32(R_000770_SCRATCH_UMSK, 0);
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+}
+
+/*
+ * CS functions
+ */
+int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx,
+ unsigned reg)
+{
+ int r;
+ u32 tile_flags = 0;
+ u32 tmp;
+ struct radeon_cs_reloc *reloc;
+ u32 value;
+
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+
+ value = radeon_get_ib_value(p, idx);
+ tmp = value & 0x003fffff;
+ tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
+
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= RADEON_DST_TILE_MACRO;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ if (reg == RADEON_SRC_PITCH_OFFSET) {
+ DRM_ERROR("Cannot src blit from microtiled surface\n");
+ radeon_cs_dump_packet(p, pkt);
+ return -EINVAL;
+ }
+ tile_flags |= RADEON_DST_TILE_MICRO;
+ }
+
+ tmp |= tile_flags;
+ p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
+ } else
+ p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
+ return 0;
+}
+
+int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ int idx)
+{
+ unsigned c, i;
+ struct radeon_cs_reloc *reloc;
+ struct r100_cs_track *track;
+ int r = 0;
+ volatile uint32_t *ib;
+ u32 idx_value;
+
+ ib = p->ib.ptr;
+ track = (struct r100_cs_track *)p->track;
+ c = radeon_get_ib_value(p, idx++) & 0x1F;
+ if (c > 16) {
+ DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
+ pkt->opcode);
+ radeon_cs_dump_packet(p, pkt);
+ return -EINVAL;
+ }
+ track->num_arrays = c;
+ for (i = 0; i < (c - 1); i+=2, idx+=3) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ idx_value = radeon_get_ib_value(p, idx);
+ ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+
+ track->arrays[i + 0].esize = idx_value >> 8;
+ track->arrays[i + 0].robj = reloc->robj;
+ track->arrays[i + 0].esize &= 0x7F;
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
+ track->arrays[i + 1].robj = reloc->robj;
+ track->arrays[i + 1].esize = idx_value >> 24;
+ track->arrays[i + 1].esize &= 0x7F;
+ }
+ if (c & 1) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ idx_value = radeon_get_ib_value(p, idx);
+ ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+ track->arrays[i + 0].robj = reloc->robj;
+ track->arrays[i + 0].esize = idx_value >> 8;
+ track->arrays[i + 0].esize &= 0x7F;
+ }
+ return r;
+}
+
+int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ const unsigned *auth, unsigned n,
+ radeon_packet0_check_t check)
+{
+ unsigned reg;
+ unsigned i, j, m;
+ unsigned idx;
+ int r;
+
+ idx = pkt->idx + 1;
+ reg = pkt->reg;
+ /* Check that register fall into register range
+ * determined by the number of entry (n) in the
+ * safe register bitmap.
+ */
+ if (pkt->one_reg_wr) {
+ if ((reg >> 7) > n) {
+ return -EINVAL;
+ }
+ } else {
+ if (((reg + (pkt->count << 2)) >> 7) > n) {
+ return -EINVAL;
+ }
+ }
+ for (i = 0; i <= pkt->count; i++, idx++) {
+ j = (reg >> 7);
+ m = 1 << ((reg >> 2) & 31);
+ if (auth[j] & m) {
+ r = check(p, pkt, idx, reg);
+ if (r) {
+ return r;
+ }
+ }
+ if (pkt->one_reg_wr) {
+ if (!(auth[j] & m)) {
+ break;
+ }
+ } else {
+ reg += 4;
+ }
+ }
+ return 0;
+}
+
+/**
+ * r100_cs_packet_next_vline() - parse userspace VLINE packet
+ * @parser: parser structure holding parsing context.
+ *
+ * Userspace sends a special sequence for VLINE waits.
+ * PACKET0 - VLINE_START_END + value
+ * PACKET0 - WAIT_UNTIL +_value
+ * RELOC (P3) - crtc_id in reloc.
+ *
+ * This function parses this and relocates the VLINE START END
+ * and WAIT UNTIL packets to the correct crtc.
+ * It also detects a switched off crtc and nulls out the
+ * wait in that case.
+ */
+int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
+{
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ struct radeon_cs_packet p3reloc, waitreloc;
+ int crtc_id;
+ int r;
+ uint32_t header, h_idx, reg;
+ volatile uint32_t *ib;
+
+ ib = p->ib.ptr;
+
+ /* parse the wait until */
+ r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
+ if (r)
+ return r;
+
+ /* check its a wait until and only 1 count */
+ if (waitreloc.reg != RADEON_WAIT_UNTIL ||
+ waitreloc.count != 0) {
+ DRM_ERROR("vline wait had illegal wait until segment\n");
+ return -EINVAL;
+ }
+
+ if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
+ DRM_ERROR("vline wait had illegal wait until\n");
+ return -EINVAL;
+ }
+
+ /* jump over the NOP */
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
+ if (r)
+ return r;
+
+ h_idx = p->idx - 2;
+ p->idx += waitreloc.count + 2;
+ p->idx += p3reloc.count + 2;
+
+ header = radeon_get_ib_value(p, h_idx);
+ crtc_id = radeon_get_ib_value(p, h_idx + 5);
+ reg = R100_CP_PACKET0_GET_REG(header);
+ obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_ERROR("cannot find crtc %d\n", crtc_id);
+ return -EINVAL;
+ }
+ crtc = obj_to_crtc(obj);
+ radeon_crtc = to_radeon_crtc(crtc);
+ crtc_id = radeon_crtc->crtc_id;
+
+ if (!crtc->enabled) {
+ /* if the CRTC isn't enabled - we need to nop out the wait until */
+ ib[h_idx + 2] = PACKET2(0);
+ ib[h_idx + 3] = PACKET2(0);
+ } else if (crtc_id == 1) {
+ switch (reg) {
+ case AVIVO_D1MODE_VLINE_START_END:
+ header &= ~R300_CP_PACKET0_REG_MASK;
+ header |= AVIVO_D2MODE_VLINE_START_END >> 2;
+ break;
+ case RADEON_CRTC_GUI_TRIG_VLINE:
+ header &= ~R300_CP_PACKET0_REG_MASK;
+ header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
+ break;
+ default:
+ DRM_ERROR("unknown crtc reloc\n");
+ return -EINVAL;
+ }
+ ib[h_idx] = header;
+ ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
+ }
+
+ return 0;
+}
+
+static int r100_get_vtx_size(uint32_t vtx_fmt)
+{
+ int vtx_size;
+ vtx_size = 2;
+ /* ordered according to bits in spec */
+ if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
+ vtx_size++;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
+ vtx_size += 3;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
+ vtx_size++;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
+ vtx_size++;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
+ vtx_size += 3;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
+ vtx_size++;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
+ vtx_size++;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
+ vtx_size += 2;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
+ vtx_size += 2;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
+ vtx_size++;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
+ vtx_size += 2;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
+ vtx_size++;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
+ vtx_size += 2;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
+ vtx_size++;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
+ vtx_size++;
+ /* blend weight */
+ if (vtx_fmt & (0x7 << 15))
+ vtx_size += (vtx_fmt >> 15) & 0x7;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
+ vtx_size += 3;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
+ vtx_size += 2;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
+ vtx_size++;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
+ vtx_size++;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
+ vtx_size++;
+ if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
+ vtx_size++;
+ return vtx_size;
+}
+
+static int r100_packet0_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx, unsigned reg)
+{
+ struct radeon_cs_reloc *reloc;
+ struct r100_cs_track *track;
+ volatile uint32_t *ib;
+ uint32_t tmp;
+ int r;
+ int i, face;
+ u32 tile_flags = 0;
+ u32 idx_value;
+
+ ib = p->ib.ptr;
+ track = (struct r100_cs_track *)p->track;
+
+ idx_value = radeon_get_ib_value(p, idx);
+
+ switch (reg) {
+ case RADEON_CRTC_GUI_TRIG_VLINE:
+ r = r100_cs_packet_parse_vline(p);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ break;
+ /* FIXME: only allow PACKET3 blit? easier to check for out of
+ * range access */
+ case RADEON_DST_PITCH_OFFSET:
+ case RADEON_SRC_PITCH_OFFSET:
+ r = r100_reloc_pitch_offset(p, pkt, idx, reg);
+ if (r)
+ return r;
+ break;
+ case RADEON_RB3D_DEPTHOFFSET:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ track->zb.robj = reloc->robj;
+ track->zb.offset = idx_value;
+ track->zb_dirty = true;
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ break;
+ case RADEON_RB3D_COLOROFFSET:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ track->cb[0].robj = reloc->robj;
+ track->cb[0].offset = idx_value;
+ track->cb_dirty = true;
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ break;
+ case RADEON_PP_TXOFFSET_0:
+ case RADEON_PP_TXOFFSET_1:
+ case RADEON_PP_TXOFFSET_2:
+ i = (reg - RADEON_PP_TXOFFSET_0) / 24;
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= RADEON_TXO_MACRO_TILE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= RADEON_TXO_MICRO_TILE_X2;
+
+ tmp = idx_value & ~(0x7 << 2);
+ tmp |= tile_flags;
+ ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
+ } else
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ track->textures[i].robj = reloc->robj;
+ track->tex_dirty = true;
+ break;
+ case RADEON_PP_CUBIC_OFFSET_T0_0:
+ case RADEON_PP_CUBIC_OFFSET_T0_1:
+ case RADEON_PP_CUBIC_OFFSET_T0_2:
+ case RADEON_PP_CUBIC_OFFSET_T0_3:
+ case RADEON_PP_CUBIC_OFFSET_T0_4:
+ i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ track->textures[0].cube_info[i].offset = idx_value;
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ track->textures[0].cube_info[i].robj = reloc->robj;
+ track->tex_dirty = true;
+ break;
+ case RADEON_PP_CUBIC_OFFSET_T1_0:
+ case RADEON_PP_CUBIC_OFFSET_T1_1:
+ case RADEON_PP_CUBIC_OFFSET_T1_2:
+ case RADEON_PP_CUBIC_OFFSET_T1_3:
+ case RADEON_PP_CUBIC_OFFSET_T1_4:
+ i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ track->textures[1].cube_info[i].offset = idx_value;
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ track->textures[1].cube_info[i].robj = reloc->robj;
+ track->tex_dirty = true;
+ break;
+ case RADEON_PP_CUBIC_OFFSET_T2_0:
+ case RADEON_PP_CUBIC_OFFSET_T2_1:
+ case RADEON_PP_CUBIC_OFFSET_T2_2:
+ case RADEON_PP_CUBIC_OFFSET_T2_3:
+ case RADEON_PP_CUBIC_OFFSET_T2_4:
+ i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ track->textures[2].cube_info[i].offset = idx_value;
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ track->textures[2].cube_info[i].robj = reloc->robj;
+ track->tex_dirty = true;
+ break;
+ case RADEON_RE_WIDTH_HEIGHT:
+ track->maxy = ((idx_value >> 16) & 0x7FF);
+ track->cb_dirty = true;
+ track->zb_dirty = true;
+ break;
+ case RADEON_RB3D_COLORPITCH:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= RADEON_COLOR_TILE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
+
+ tmp = idx_value & ~(0x7 << 16);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
+ } else
+ ib[idx] = idx_value;
+
+ track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+ track->cb_dirty = true;
+ break;
+ case RADEON_RB3D_DEPTHPITCH:
+ track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+ track->zb_dirty = true;
+ break;
+ case RADEON_RB3D_CNTL:
+ switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
+ case 7:
+ case 8:
+ case 9:
+ case 11:
+ case 12:
+ track->cb[0].cpp = 1;
+ break;
+ case 3:
+ case 4:
+ case 15:
+ track->cb[0].cpp = 2;
+ break;
+ case 6:
+ track->cb[0].cpp = 4;
+ break;
+ default:
+ DRM_ERROR("Invalid color buffer format (%d) !\n",
+ ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
+ return -EINVAL;
+ }
+ track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+ track->cb_dirty = true;
+ track->zb_dirty = true;
+ break;
+ case RADEON_RB3D_ZSTENCILCNTL:
+ switch (idx_value & 0xf) {
+ case 0:
+ track->zb.cpp = 2;
+ break;
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 9:
+ case 11:
+ track->zb.cpp = 4;
+ break;
+ default:
+ break;
+ }
+ track->zb_dirty = true;
+ break;
+ case RADEON_RB3D_ZPASS_ADDR:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ break;
+ case RADEON_PP_CNTL:
+ {
+ uint32_t temp = idx_value >> 4;
+ for (i = 0; i < track->num_texture; i++)
+ track->textures[i].enabled = !!(temp & (1 << i));
+ track->tex_dirty = true;
+ }
+ break;
+ case RADEON_SE_VF_CNTL:
+ track->vap_vf_cntl = idx_value;
+ break;
+ case RADEON_SE_VTX_FMT:
+ track->vtx_size = r100_get_vtx_size(idx_value);
+ break;
+ case RADEON_PP_TEX_SIZE_0:
+ case RADEON_PP_TEX_SIZE_1:
+ case RADEON_PP_TEX_SIZE_2:
+ i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
+ track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
+ track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+ track->tex_dirty = true;
+ break;
+ case RADEON_PP_TEX_PITCH_0:
+ case RADEON_PP_TEX_PITCH_1:
+ case RADEON_PP_TEX_PITCH_2:
+ i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
+ track->textures[i].pitch = idx_value + 32;
+ track->tex_dirty = true;
+ break;
+ case RADEON_PP_TXFILTER_0:
+ case RADEON_PP_TXFILTER_1:
+ case RADEON_PP_TXFILTER_2:
+ i = (reg - RADEON_PP_TXFILTER_0) / 24;
+ track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
+ >> RADEON_MAX_MIP_LEVEL_SHIFT);
+ tmp = (idx_value >> 23) & 0x7;
+ if (tmp == 2 || tmp == 6)
+ track->textures[i].roundup_w = false;
+ tmp = (idx_value >> 27) & 0x7;
+ if (tmp == 2 || tmp == 6)
+ track->textures[i].roundup_h = false;
+ track->tex_dirty = true;
+ break;
+ case RADEON_PP_TXFORMAT_0:
+ case RADEON_PP_TXFORMAT_1:
+ case RADEON_PP_TXFORMAT_2:
+ i = (reg - RADEON_PP_TXFORMAT_0) / 24;
+ if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
+ track->textures[i].use_pitch = 1;
+ } else {
+ track->textures[i].use_pitch = 0;
+ track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
+ track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
+ }
+ if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
+ track->textures[i].tex_coord_type = 2;
+ switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
+ case RADEON_TXFORMAT_I8:
+ case RADEON_TXFORMAT_RGB332:
+ case RADEON_TXFORMAT_Y8:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+ break;
+ case RADEON_TXFORMAT_AI88:
+ case RADEON_TXFORMAT_ARGB1555:
+ case RADEON_TXFORMAT_RGB565:
+ case RADEON_TXFORMAT_ARGB4444:
+ case RADEON_TXFORMAT_VYUY422:
+ case RADEON_TXFORMAT_YVYU422:
+ case RADEON_TXFORMAT_SHADOW16:
+ case RADEON_TXFORMAT_LDUDV655:
+ case RADEON_TXFORMAT_DUDV88:
+ track->textures[i].cpp = 2;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+ break;
+ case RADEON_TXFORMAT_ARGB8888:
+ case RADEON_TXFORMAT_RGBA8888:
+ case RADEON_TXFORMAT_SHADOW32:
+ case RADEON_TXFORMAT_LDUDUV8888:
+ track->textures[i].cpp = 4;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+ break;
+ case RADEON_TXFORMAT_DXT1:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+ break;
+ case RADEON_TXFORMAT_DXT23:
+ case RADEON_TXFORMAT_DXT45:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
+ break;
+ }
+ track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
+ track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+ track->tex_dirty = true;
+ break;
+ case RADEON_PP_CUBIC_FACES_0:
+ case RADEON_PP_CUBIC_FACES_1:
+ case RADEON_PP_CUBIC_FACES_2:
+ tmp = idx_value;
+ i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
+ for (face = 0; face < 4; face++) {
+ track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
+ track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
+ }
+ track->tex_dirty = true;
+ break;
+ default:
+ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
+ reg, idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ struct radeon_bo *robj)
+{
+ unsigned idx;
+ u32 value;
+ idx = pkt->idx + 1;
+ value = radeon_get_ib_value(p, idx + 2);
+ if ((value + 1) > radeon_bo_size(robj)) {
+ DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
+ "(need %u have %lu) !\n",
+ value + 1,
+ radeon_bo_size(robj));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int r100_packet3_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ struct radeon_cs_reloc *reloc;
+ struct r100_cs_track *track;
+ unsigned idx;
+ volatile uint32_t *ib;
+ int r;
+
+ ib = p->ib.ptr;
+ idx = pkt->idx + 1;
+ track = (struct r100_cs_track *)p->track;
+ switch (pkt->opcode) {
+ case PACKET3_3D_LOAD_VBPNTR:
+ r = r100_packet3_load_vbpntr(p, pkt, idx);
+ if (r)
+ return r;
+ break;
+ case PACKET3_INDX_BUFFER:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
+ r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
+ if (r) {
+ return r;
+ }
+ break;
+ case 0x23:
+ /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
+ track->num_arrays = 1;
+ track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
+
+ track->arrays[0].robj = reloc->robj;
+ track->arrays[0].esize = track->vtx_size;
+
+ track->max_indx = radeon_get_ib_value(p, idx+1);
+
+ track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
+ track->immd_dwords = pkt->count - 1;
+ r = r100_cs_track_check(p->rdev, track);
+ if (r)
+ return r;
+ break;
+ case PACKET3_3D_DRAW_IMMD:
+ if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
+ DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+ return -EINVAL;
+ }
+ track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
+ track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+ track->immd_dwords = pkt->count - 1;
+ r = r100_cs_track_check(p->rdev, track);
+ if (r)
+ return r;
+ break;
+ /* triggers drawing using in-packet vertex data */
+ case PACKET3_3D_DRAW_IMMD_2:
+ if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
+ DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+ return -EINVAL;
+ }
+ track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+ track->immd_dwords = pkt->count;
+ r = r100_cs_track_check(p->rdev, track);
+ if (r)
+ return r;
+ break;
+ /* triggers drawing using in-packet vertex data */
+ case PACKET3_3D_DRAW_VBUF_2:
+ track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+ r = r100_cs_track_check(p->rdev, track);
+ if (r)
+ return r;
+ break;
+ /* triggers drawing of vertex buffers setup elsewhere */
+ case PACKET3_3D_DRAW_INDX_2:
+ track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+ r = r100_cs_track_check(p->rdev, track);
+ if (r)
+ return r;
+ break;
+ /* triggers drawing using indices to vertex buffer */
+ case PACKET3_3D_DRAW_VBUF:
+ track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+ r = r100_cs_track_check(p->rdev, track);
+ if (r)
+ return r;
+ break;
+ /* triggers drawing of vertex buffers setup elsewhere */
+ case PACKET3_3D_DRAW_INDX:
+ track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+ r = r100_cs_track_check(p->rdev, track);
+ if (r)
+ return r;
+ break;
+ /* triggers drawing using indices to vertex buffer */
+ case PACKET3_3D_CLEAR_HIZ:
+ case PACKET3_3D_CLEAR_ZMASK:
+ if (p->rdev->hyperz_filp != p->filp)
+ return -EINVAL;
+ break;
+ case PACKET3_NOP:
+ break;
+ default:
+ DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int r100_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet pkt;
+ struct r100_cs_track *track;
+ int r;
+
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (!track)
+ return -ENOMEM;
+ r100_cs_track_clear(p->rdev, track);
+ p->track = track;
+ do {
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
+ if (r) {
+ return r;
+ }
+ p->idx += pkt.count + 2;
+ switch (pkt.type) {
+ case RADEON_PACKET_TYPE0:
+ if (p->rdev->family >= CHIP_R200)
+ r = r100_cs_parse_packet0(p, &pkt,
+ p->rdev->config.r100.reg_safe_bm,
+ p->rdev->config.r100.reg_safe_bm_size,
+ &r200_packet0_check);
+ else
+ r = r100_cs_parse_packet0(p, &pkt,
+ p->rdev->config.r100.reg_safe_bm,
+ p->rdev->config.r100.reg_safe_bm_size,
+ &r100_packet0_check);
+ break;
+ case RADEON_PACKET_TYPE2:
+ break;
+ case RADEON_PACKET_TYPE3:
+ r = r100_packet3_check(p, &pkt);
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d !\n",
+ pkt.type);
+ return -EINVAL;
+ }
+ if (r)
+ return r;
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ return 0;
+}
+
+static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
+{
+ DRM_ERROR("pitch %d\n", t->pitch);
+ DRM_ERROR("use_pitch %d\n", t->use_pitch);
+ DRM_ERROR("width %d\n", t->width);
+ DRM_ERROR("width_11 %d\n", t->width_11);
+ DRM_ERROR("height %d\n", t->height);
+ DRM_ERROR("height_11 %d\n", t->height_11);
+ DRM_ERROR("num levels %d\n", t->num_levels);
+ DRM_ERROR("depth %d\n", t->txdepth);
+ DRM_ERROR("bpp %d\n", t->cpp);
+ DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
+ DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
+ DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
+ DRM_ERROR("compress format %d\n", t->compress_format);
+}
+
+static int r100_track_compress_size(int compress_format, int w, int h)
+{
+ int block_width, block_height, block_bytes;
+ int wblocks, hblocks;
+ int min_wblocks;
+ int sz;
+
+ block_width = 4;
+ block_height = 4;
+
+ switch (compress_format) {
+ case R100_TRACK_COMP_DXT1:
+ block_bytes = 8;
+ min_wblocks = 4;
+ break;
+ default:
+ case R100_TRACK_COMP_DXT35:
+ block_bytes = 16;
+ min_wblocks = 2;
+ break;
+ }
+
+ hblocks = (h + block_height - 1) / block_height;
+ wblocks = (w + block_width - 1) / block_width;
+ if (wblocks < min_wblocks)
+ wblocks = min_wblocks;
+ sz = wblocks * hblocks * block_bytes;
+ return sz;
+}
+
+static int r100_cs_track_cube(struct radeon_device *rdev,
+ struct r100_cs_track *track, unsigned idx)
+{
+ unsigned face, w, h;
+ struct radeon_bo *cube_robj;
+ unsigned long size;
+ unsigned compress_format = track->textures[idx].compress_format;
+
+ for (face = 0; face < 5; face++) {
+ cube_robj = track->textures[idx].cube_info[face].robj;
+ w = track->textures[idx].cube_info[face].width;
+ h = track->textures[idx].cube_info[face].height;
+
+ if (compress_format) {
+ size = r100_track_compress_size(compress_format, w, h);
+ } else
+ size = w * h;
+ size *= track->textures[idx].cpp;
+
+ size += track->textures[idx].cube_info[face].offset;
+
+ if (size > radeon_bo_size(cube_robj)) {
+ DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
+ size, radeon_bo_size(cube_robj));
+ r100_cs_track_texture_print(&track->textures[idx]);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int r100_cs_track_texture_check(struct radeon_device *rdev,
+ struct r100_cs_track *track)
+{
+ struct radeon_bo *robj;
+ unsigned long size;
+ unsigned u, i, w, h, d;
+ int ret;
+
+ for (u = 0; u < track->num_texture; u++) {
+ if (!track->textures[u].enabled)
+ continue;
+ if (track->textures[u].lookup_disable)
+ continue;
+ robj = track->textures[u].robj;
+ if (robj == NULL) {
+ DRM_ERROR("No texture bound to unit %u\n", u);
+ return -EINVAL;
+ }
+ size = 0;
+ for (i = 0; i <= track->textures[u].num_levels; i++) {
+ if (track->textures[u].use_pitch) {
+ if (rdev->family < CHIP_R300)
+ w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
+ else
+ w = track->textures[u].pitch / (1 << i);
+ } else {
+ w = track->textures[u].width;
+ if (rdev->family >= CHIP_RV515)
+ w |= track->textures[u].width_11;
+ w = w / (1 << i);
+ if (track->textures[u].roundup_w)
+ w = roundup_pow_of_two(w);
+ }
+ h = track->textures[u].height;
+ if (rdev->family >= CHIP_RV515)
+ h |= track->textures[u].height_11;
+ h = h / (1 << i);
+ if (track->textures[u].roundup_h)
+ h = roundup_pow_of_two(h);
+ if (track->textures[u].tex_coord_type == 1) {
+ d = (1 << track->textures[u].txdepth) / (1 << i);
+ if (!d)
+ d = 1;
+ } else {
+ d = 1;
+ }
+ if (track->textures[u].compress_format) {
+
+ size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
+ /* compressed textures are block based */
+ } else
+ size += w * h * d;
+ }
+ size *= track->textures[u].cpp;
+
+ switch (track->textures[u].tex_coord_type) {
+ case 0:
+ case 1:
+ break;
+ case 2:
+ if (track->separate_cube) {
+ ret = r100_cs_track_cube(rdev, track, u);
+ if (ret)
+ return ret;
+ } else
+ size *= 6;
+ break;
+ default:
+ DRM_ERROR("Invalid texture coordinate type %u for unit "
+ "%u\n", track->textures[u].tex_coord_type, u);
+ return -EINVAL;
+ }
+ if (size > radeon_bo_size(robj)) {
+ DRM_ERROR("Texture of unit %u needs %lu bytes but is "
+ "%lu\n", u, size, radeon_bo_size(robj));
+ r100_cs_track_texture_print(&track->textures[u]);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
+{
+ unsigned i;
+ unsigned long size;
+ unsigned prim_walk;
+ unsigned nverts;
+ unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
+
+ if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
+ !track->blend_read_enable)
+ num_cb = 0;
+
+ for (i = 0; i < num_cb; i++) {
+ if (track->cb[i].robj == NULL) {
+ DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
+ return -EINVAL;
+ }
+ size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
+ size += track->cb[i].offset;
+ if (size > radeon_bo_size(track->cb[i].robj)) {
+ DRM_ERROR("[drm] Buffer too small for color buffer %d "
+ "(need %lu have %lu) !\n", i, size,
+ radeon_bo_size(track->cb[i].robj));
+ DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
+ i, track->cb[i].pitch, track->cb[i].cpp,
+ track->cb[i].offset, track->maxy);
+ return -EINVAL;
+ }
+ }
+ track->cb_dirty = false;
+
+ if (track->zb_dirty && track->z_enabled) {
+ if (track->zb.robj == NULL) {
+ DRM_ERROR("[drm] No buffer for z buffer !\n");
+ return -EINVAL;
+ }
+ size = track->zb.pitch * track->zb.cpp * track->maxy;
+ size += track->zb.offset;
+ if (size > radeon_bo_size(track->zb.robj)) {
+ DRM_ERROR("[drm] Buffer too small for z buffer "
+ "(need %lu have %lu) !\n", size,
+ radeon_bo_size(track->zb.robj));
+ DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
+ track->zb.pitch, track->zb.cpp,
+ track->zb.offset, track->maxy);
+ return -EINVAL;
+ }
+ }
+ track->zb_dirty = false;
+
+ if (track->aa_dirty && track->aaresolve) {
+ if (track->aa.robj == NULL) {
+ DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
+ return -EINVAL;
+ }
+ /* I believe the format comes from colorbuffer0. */
+ size = track->aa.pitch * track->cb[0].cpp * track->maxy;
+ size += track->aa.offset;
+ if (size > radeon_bo_size(track->aa.robj)) {
+ DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
+ "(need %lu have %lu) !\n", i, size,
+ radeon_bo_size(track->aa.robj));
+ DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
+ i, track->aa.pitch, track->cb[0].cpp,
+ track->aa.offset, track->maxy);
+ return -EINVAL;
+ }
+ }
+ track->aa_dirty = false;
+
+ prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
+ if (track->vap_vf_cntl & (1 << 14)) {
+ nverts = track->vap_alt_nverts;
+ } else {
+ nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
+ }
+ switch (prim_walk) {
+ case 1:
+ for (i = 0; i < track->num_arrays; i++) {
+ size = track->arrays[i].esize * track->max_indx * 4;
+ if (track->arrays[i].robj == NULL) {
+ DRM_ERROR("(PW %u) Vertex array %u no buffer "
+ "bound\n", prim_walk, i);
+ return -EINVAL;
+ }
+ if (size > radeon_bo_size(track->arrays[i].robj)) {
+ dev_err(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
+ DRM_ERROR("Max indices %u\n", track->max_indx);
+ return -EINVAL;
+ }
+ }
+ break;
+ case 2:
+ for (i = 0; i < track->num_arrays; i++) {
+ size = track->arrays[i].esize * (nverts - 1) * 4;
+ if (track->arrays[i].robj == NULL) {
+ DRM_ERROR("(PW %u) Vertex array %u no buffer "
+ "bound\n", prim_walk, i);
+ return -EINVAL;
+ }
+ if (size > radeon_bo_size(track->arrays[i].robj)) {
+ dev_err(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
+ return -EINVAL;
+ }
+ }
+ break;
+ case 3:
+ size = track->vtx_size * nverts;
+ if (size != track->immd_dwords) {
+ DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
+ track->immd_dwords, size);
+ DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
+ nverts, track->vtx_size);
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
+ prim_walk);
+ return -EINVAL;
+ }
+
+ if (track->tex_dirty) {
+ track->tex_dirty = false;
+ return r100_cs_track_texture_check(rdev, track);
+ }
+ return 0;
+}
+
+void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
+{
+ unsigned i, face;
+
+ track->cb_dirty = true;
+ track->zb_dirty = true;
+ track->tex_dirty = true;
+ track->aa_dirty = true;
+
+ if (rdev->family < CHIP_R300) {
+ track->num_cb = 1;
+ if (rdev->family <= CHIP_RS200)
+ track->num_texture = 3;
+ else
+ track->num_texture = 6;
+ track->maxy = 2048;
+ track->separate_cube = 1;
+ } else {
+ track->num_cb = 4;
+ track->num_texture = 16;
+ track->maxy = 4096;
+ track->separate_cube = 0;
+ track->aaresolve = false;
+ track->aa.robj = NULL;
+ }
+
+ for (i = 0; i < track->num_cb; i++) {
+ track->cb[i].robj = NULL;
+ track->cb[i].pitch = 8192;
+ track->cb[i].cpp = 16;
+ track->cb[i].offset = 0;
+ }
+ track->z_enabled = true;
+ track->zb.robj = NULL;
+ track->zb.pitch = 8192;
+ track->zb.cpp = 4;
+ track->zb.offset = 0;
+ track->vtx_size = 0x7F;
+ track->immd_dwords = 0xFFFFFFFFUL;
+ track->num_arrays = 11;
+ track->max_indx = 0x00FFFFFFUL;
+ for (i = 0; i < track->num_arrays; i++) {
+ track->arrays[i].robj = NULL;
+ track->arrays[i].esize = 0x7F;
+ }
+ for (i = 0; i < track->num_texture; i++) {
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+ track->textures[i].pitch = 16536;
+ track->textures[i].width = 16536;
+ track->textures[i].height = 16536;
+ track->textures[i].width_11 = 1 << 11;
+ track->textures[i].height_11 = 1 << 11;
+ track->textures[i].num_levels = 12;
+ if (rdev->family <= CHIP_RS200) {
+ track->textures[i].tex_coord_type = 0;
+ track->textures[i].txdepth = 0;
+ } else {
+ track->textures[i].txdepth = 16;
+ track->textures[i].tex_coord_type = 1;
+ }
+ track->textures[i].cpp = 64;
+ track->textures[i].robj = NULL;
+ /* CS IB emission code makes sure texture unit are disabled */
+ track->textures[i].enabled = false;
+ track->textures[i].lookup_disable = false;
+ track->textures[i].roundup_w = true;
+ track->textures[i].roundup_h = true;
+ if (track->separate_cube)
+ for (face = 0; face < 5; face++) {
+ track->textures[i].cube_info[face].robj = NULL;
+ track->textures[i].cube_info[face].width = 16536;
+ track->textures[i].cube_info[face].height = 16536;
+ track->textures[i].cube_info[face].offset = 0;
+ }
+ }
+}
+
+/*
+ * Global GPU functions
+ */
+static void r100_errata(struct radeon_device *rdev)
+{
+ rdev->pll_errata = 0;
+
+ if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
+ rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
+ }
+
+ if (rdev->family == CHIP_RV100 ||
+ rdev->family == CHIP_RS100 ||
+ rdev->family == CHIP_RS200) {
+ rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
+ }
+}
+
+static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
+ if (tmp >= n) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
+int r100_gui_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
+ printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
+ " Bad things might happen.\n");
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(RADEON_RBBM_STATUS);
+ if (!(tmp & RADEON_RBBM_ACTIVE)) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
+int r100_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(RADEON_MC_STATUS);
+ if (tmp & RADEON_MC_IDLE) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 rbbm_status;
+
+ rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+void r100_enable_bm(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ /* Enable bus mastering */
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
+}
+
+void r100_bm_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ /* disable bus mastering */
+ tmp = RREG32(R_000030_BUS_CNTL);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
+ mdelay(1);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
+ mdelay(1);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
+ tmp = RREG32(RADEON_BUS_CNTL);
+ mdelay(1);
+ pci_clear_master(rdev->pdev);
+ mdelay(1);
+}
+
+int r100_asic_reset(struct radeon_device *rdev)
+{
+ struct r100_mc_save save;
+ u32 status, tmp;
+ int ret = 0;
+
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
+ }
+ r100_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ /* save PCI state */
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ r100_bm_disable(rdev);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
+ S_0000F0_SOFT_RESET_RE(1) |
+ S_0000F0_SOFT_RESET_PP(1) |
+ S_0000F0_SOFT_RESET_RB(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* reset CP */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ r100_enable_bm(rdev);
+ /* Check if GPU is idle */
+ if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
+ G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ ret = -1;
+ } else
+ dev_info(rdev->dev, "GPU reset succeed\n");
+ r100_mc_resume(rdev, &save);
+ return ret;
+}
+
+void r100_set_common_regs(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ bool force_dac2 = false;
+ u32 tmp;
+
+ /* set these so they don't interfere with anything */
+ WREG32(RADEON_OV0_SCALE_CNTL, 0);
+ WREG32(RADEON_SUBPIC_CNTL, 0);
+ WREG32(RADEON_VIPH_CONTROL, 0);
+ WREG32(RADEON_I2C_CNTL_1, 0);
+ WREG32(RADEON_DVI_I2C_CNTL_1, 0);
+ WREG32(RADEON_CAP0_TRIG_CNTL, 0);
+ WREG32(RADEON_CAP1_TRIG_CNTL, 0);
+
+ /* always set up dac2 on rn50 and some rv100 as lots
+ * of servers seem to wire it up to a VGA port but
+ * don't report it in the bios connector
+ * table.
+ */
+ switch (dev->pdev->device) {
+ /* RN50 */
+ case 0x515e:
+ case 0x5969:
+ force_dac2 = true;
+ break;
+ /* RV100*/
+ case 0x5159:
+ case 0x515a:
+ /* DELL triple head servers */
+ if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
+ ((dev->pdev->subsystem_device == 0x016c) ||
+ (dev->pdev->subsystem_device == 0x016d) ||
+ (dev->pdev->subsystem_device == 0x016e) ||
+ (dev->pdev->subsystem_device == 0x016f) ||
+ (dev->pdev->subsystem_device == 0x0170) ||
+ (dev->pdev->subsystem_device == 0x017d) ||
+ (dev->pdev->subsystem_device == 0x017e) ||
+ (dev->pdev->subsystem_device == 0x0183) ||
+ (dev->pdev->subsystem_device == 0x018a) ||
+ (dev->pdev->subsystem_device == 0x019a)))
+ force_dac2 = true;
+ break;
+ }
+
+ if (force_dac2) {
+ u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+ u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+ u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
+
+ /* For CRT on DAC2, don't turn it on if BIOS didn't
+ enable it, even it's detected.
+ */
+
+ /* force it to crtc0 */
+ dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
+ dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
+ disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+
+ /* set up the TV DAC */
+ tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
+ RADEON_TV_DAC_STD_MASK |
+ RADEON_TV_DAC_RDACPD |
+ RADEON_TV_DAC_GDACPD |
+ RADEON_TV_DAC_BDACPD |
+ RADEON_TV_DAC_BGADJ_MASK |
+ RADEON_TV_DAC_DACADJ_MASK);
+ tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
+ RADEON_TV_DAC_NHOLD |
+ RADEON_TV_DAC_STD_PS2 |
+ (0x58 << 16));
+
+ WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+ WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+ WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+ }
+
+ /* switch PM block to ACPI mode */
+ tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
+ tmp &= ~RADEON_PM_MODE_SEL;
+ WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
+
+}
+
+/*
+ * VRAM info
+ */
+static void r100_vram_get_type(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ rdev->mc.vram_is_ddr = false;
+ if (rdev->flags & RADEON_IS_IGP)
+ rdev->mc.vram_is_ddr = true;
+ else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
+ rdev->mc.vram_is_ddr = true;
+ if ((rdev->family == CHIP_RV100) ||
+ (rdev->family == CHIP_RS100) ||
+ (rdev->family == CHIP_RS200)) {
+ tmp = RREG32(RADEON_MEM_CNTL);
+ if (tmp & RV100_HALF_MODE) {
+ rdev->mc.vram_width = 32;
+ } else {
+ rdev->mc.vram_width = 64;
+ }
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ rdev->mc.vram_width /= 4;
+ rdev->mc.vram_is_ddr = true;
+ }
+ } else if (rdev->family <= CHIP_RV280) {
+ tmp = RREG32(RADEON_MEM_CNTL);
+ if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
+ rdev->mc.vram_width = 128;
+ } else {
+ rdev->mc.vram_width = 64;
+ }
+ } else {
+ /* newer IGPs */
+ rdev->mc.vram_width = 128;
+ }
+}
+
+static u32 r100_get_accessible_vram(struct radeon_device *rdev)
+{
+ u32 aper_size;
+ u8 byte;
+
+ aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
+
+ /* Set HDP_APER_CNTL only on cards that are known not to be broken,
+ * that is has the 2nd generation multifunction PCI interface
+ */
+ if (rdev->family == CHIP_RV280 ||
+ rdev->family >= CHIP_RV350) {
+ WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
+ ~RADEON_HDP_APER_CNTL);
+ DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
+ return aper_size * 2;
+ }
+
+ /* Older cards have all sorts of funny issues to deal with. First
+ * check if it's a multifunction card by reading the PCI config
+ * header type... Limit those to one aperture size
+ */
+ pci_read_config_byte(rdev->pdev, 0xe, &byte);
+ if (byte & 0x80) {
+ DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
+ DRM_INFO("Limiting VRAM to one aperture\n");
+ return aper_size;
+ }
+
+ /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
+ * have set it up. We don't write this as it's broken on some ASICs but
+ * we expect the BIOS to have done the right thing (might be too optimistic...)
+ */
+ if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
+ return aper_size * 2;
+ return aper_size;
+}
+
+void r100_vram_init_sizes(struct radeon_device *rdev)
+{
+ u64 config_aper_size;
+
+ /* work out accessible VRAM */
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+ rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
+ /* FIXME we don't use the second aperture yet when we could use it */
+ if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
+ if (rdev->flags & RADEON_IS_IGP) {
+ uint32_t tom;
+ /* read NB_TOM to get the amount of ram stolen for the GPU */
+ tom = RREG32(RADEON_NB_TOM);
+ rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
+ WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+ rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+ } else {
+ rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+ /* Some production boards of m6 will report 0
+ * if it's 8 MB
+ */
+ if (rdev->mc.real_vram_size == 0) {
+ rdev->mc.real_vram_size = 8192 * 1024;
+ WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+ }
+ /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
+ * Novell bug 204882 + along with lots of ubuntu ones
+ */
+ if (rdev->mc.aper_size > config_aper_size)
+ config_aper_size = rdev->mc.aper_size;
+
+ if (config_aper_size > rdev->mc.real_vram_size)
+ rdev->mc.mc_vram_size = config_aper_size;
+ else
+ rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+ }
+}
+
+void r100_vga_set_state(struct radeon_device *rdev, bool state)
+{
+ uint32_t temp;
+
+ temp = RREG32(RADEON_CONFIG_CNTL);
+ if (state == false) {
+ temp &= ~RADEON_CFG_VGA_RAM_EN;
+ temp |= RADEON_CFG_VGA_IO_DIS;
+ } else {
+ temp &= ~RADEON_CFG_VGA_IO_DIS;
+ }
+ WREG32(RADEON_CONFIG_CNTL, temp);
+}
+
+static void r100_mc_init(struct radeon_device *rdev)
+{
+ u64 base;
+
+ r100_vram_get_type(rdev);
+ r100_vram_init_sizes(rdev);
+ base = rdev->mc.aper_base;
+ if (rdev->flags & RADEON_IS_IGP)
+ base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ rdev->mc.gtt_base_align = 0;
+ if (!(rdev->flags & RADEON_IS_AGP))
+ radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+void r100_pll_errata_after_index(struct radeon_device *rdev)
+{
+ if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
+ (void)RREG32(RADEON_CLOCK_CNTL_DATA);
+ (void)RREG32(RADEON_CRTC_GEN_CNTL);
+ }
+}
+
+static void r100_pll_errata_after_data(struct radeon_device *rdev)
+{
+ /* This workarounds is necessary on RV100, RS100 and RS200 chips
+ * or the chip could hang on a subsequent access
+ */
+ if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
+ mdelay(5);
+ }
+
+ /* This function is required to workaround a hardware bug in some (all?)
+ * revisions of the R300. This workaround should be called after every
+ * CLOCK_CNTL_INDEX register access. If not, register reads afterward
+ * may not be correct.
+ */
+ if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
+ uint32_t save, tmp;
+
+ save = RREG32(RADEON_CLOCK_CNTL_INDEX);
+ tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
+ WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
+ tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
+ WREG32(RADEON_CLOCK_CNTL_INDEX, save);
+ }
+}
+
+uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t data;
+
+ WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
+ r100_pll_errata_after_index(rdev);
+ data = RREG32(RADEON_CLOCK_CNTL_DATA);
+ r100_pll_errata_after_data(rdev);
+ return data;
+}
+
+void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
+ r100_pll_errata_after_index(rdev);
+ WREG32(RADEON_CLOCK_CNTL_DATA, v);
+ r100_pll_errata_after_data(rdev);
+}
+
+static void r100_set_safe_registers(struct radeon_device *rdev)
+{
+ if (ASIC_IS_RN50(rdev)) {
+ rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
+ rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
+ } else if (rdev->family < CHIP_R200) {
+ rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
+ rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
+ } else {
+ r200_set_safe_registers(rdev);
+ }
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t reg, value;
+ unsigned i;
+
+ seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
+ seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
+ seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+ for (i = 0; i < 64; i++) {
+ WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
+ reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
+ WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
+ value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
+ seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
+ }
+ return 0;
+}
+
+static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ uint32_t rdp, wdp;
+ unsigned count, i, j;
+
+ radeon_ring_free_size(rdev, ring);
+ rdp = RREG32(RADEON_CP_RB_RPTR);
+ wdp = RREG32(RADEON_CP_RB_WPTR);
+ count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
+ seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+ seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
+ seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
+ seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
+ seq_printf(m, "%u dwords in ring\n", count);
+ if (ring->ready) {
+ for (j = 0; j <= count; j++) {
+ i = (rdp + j) & ring->ptr_mask;
+ seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+ }
+ }
+ return 0;
+}
+
+
+static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t csq_stat, csq2_stat, tmp;
+ unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
+ unsigned i;
+
+ seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+ seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
+ csq_stat = RREG32(RADEON_CP_CSQ_STAT);
+ csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
+ r_rptr = (csq_stat >> 0) & 0x3ff;
+ r_wptr = (csq_stat >> 10) & 0x3ff;
+ ib1_rptr = (csq_stat >> 20) & 0x3ff;
+ ib1_wptr = (csq2_stat >> 0) & 0x3ff;
+ ib2_rptr = (csq2_stat >> 10) & 0x3ff;
+ ib2_wptr = (csq2_stat >> 20) & 0x3ff;
+ seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
+ seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
+ seq_printf(m, "Ring rptr %u\n", r_rptr);
+ seq_printf(m, "Ring wptr %u\n", r_wptr);
+ seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
+ seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
+ seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
+ seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
+ /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
+ * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
+ seq_printf(m, "Ring fifo:\n");
+ for (i = 0; i < 256; i++) {
+ WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+ tmp = RREG32(RADEON_CP_CSQ_DATA);
+ seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
+ }
+ seq_printf(m, "Indirect1 fifo:\n");
+ for (i = 256; i <= 512; i++) {
+ WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+ tmp = RREG32(RADEON_CP_CSQ_DATA);
+ seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
+ }
+ seq_printf(m, "Indirect2 fifo:\n");
+ for (i = 640; i < ib1_wptr; i++) {
+ WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+ tmp = RREG32(RADEON_CP_CSQ_DATA);
+ seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
+ }
+ return 0;
+}
+
+static int r100_debugfs_mc_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = RREG32(RADEON_CONFIG_MEMSIZE);
+ seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_MC_FB_LOCATION);
+ seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_BUS_CNTL);
+ seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_MC_AGP_LOCATION);
+ seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_AGP_BASE);
+ seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_HOST_PATH_CNTL);
+ seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
+ tmp = RREG32(0x01D0);
+ seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_AIC_LO_ADDR);
+ seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_AIC_HI_ADDR);
+ seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
+ tmp = RREG32(0x01E4);
+ seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
+ return 0;
+}
+
+static struct drm_info_list r100_debugfs_rbbm_list[] = {
+ {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
+};
+
+static struct drm_info_list r100_debugfs_cp_list[] = {
+ {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
+ {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
+};
+
+static struct drm_info_list r100_debugfs_mc_info_list[] = {
+ {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
+};
+#endif
+
+int r100_debugfs_rbbm_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
+#else
+ return 0;
+#endif
+}
+
+int r100_debugfs_cp_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
+#else
+ return 0;
+#endif
+}
+
+int r100_debugfs_mc_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
+#else
+ return 0;
+#endif
+}
+
+int r100_set_surface_reg(struct radeon_device *rdev, int reg,
+ uint32_t tiling_flags, uint32_t pitch,
+ uint32_t offset, uint32_t obj_size)
+{
+ int surf_index = reg * 16;
+ int flags = 0;
+
+ if (rdev->family <= CHIP_RS200) {
+ if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
+ == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
+ flags |= RADEON_SURF_TILE_COLOR_BOTH;
+ if (tiling_flags & RADEON_TILING_MACRO)
+ flags |= RADEON_SURF_TILE_COLOR_MACRO;
+ } else if (rdev->family <= CHIP_RV280) {
+ if (tiling_flags & (RADEON_TILING_MACRO))
+ flags |= R200_SURF_TILE_COLOR_MACRO;
+ if (tiling_flags & RADEON_TILING_MICRO)
+ flags |= R200_SURF_TILE_COLOR_MICRO;
+ } else {
+ if (tiling_flags & RADEON_TILING_MACRO)
+ flags |= R300_SURF_TILE_MACRO;
+ if (tiling_flags & RADEON_TILING_MICRO)
+ flags |= R300_SURF_TILE_MICRO;
+ }
+
+ if (tiling_flags & RADEON_TILING_SWAP_16BIT)
+ flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
+ if (tiling_flags & RADEON_TILING_SWAP_32BIT)
+ flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
+
+ /* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */
+ if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) {
+ if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO)))
+ if (ASIC_IS_RN50(rdev))
+ pitch /= 16;
+ }
+
+ /* r100/r200 divide by 16 */
+ if (rdev->family < CHIP_R300)
+ flags |= pitch / 16;
+ else
+ flags |= pitch / 8;
+
+
+ DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
+ WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
+ WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
+ WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
+ return 0;
+}
+
+void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
+{
+ int surf_index = reg * 16;
+ WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
+}
+
+void r100_bandwidth_update(struct radeon_device *rdev)
+{
+ fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
+ fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
+ fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
+ uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
+ fixed20_12 memtcas_ff[8] = {
+ dfixed_init(1),
+ dfixed_init(2),
+ dfixed_init(3),
+ dfixed_init(0),
+ dfixed_init_half(1),
+ dfixed_init_half(2),
+ dfixed_init(0),
+ };
+ fixed20_12 memtcas_rs480_ff[8] = {
+ dfixed_init(0),
+ dfixed_init(1),
+ dfixed_init(2),
+ dfixed_init(3),
+ dfixed_init(0),
+ dfixed_init_half(1),
+ dfixed_init_half(2),
+ dfixed_init_half(3),
+ };
+ fixed20_12 memtcas2_ff[8] = {
+ dfixed_init(0),
+ dfixed_init(1),
+ dfixed_init(2),
+ dfixed_init(3),
+ dfixed_init(4),
+ dfixed_init(5),
+ dfixed_init(6),
+ dfixed_init(7),
+ };
+ fixed20_12 memtrbs[8] = {
+ dfixed_init(1),
+ dfixed_init_half(1),
+ dfixed_init(2),
+ dfixed_init_half(2),
+ dfixed_init(3),
+ dfixed_init_half(3),
+ dfixed_init(4),
+ dfixed_init_half(4)
+ };
+ fixed20_12 memtrbs_r4xx[8] = {
+ dfixed_init(4),
+ dfixed_init(5),
+ dfixed_init(6),
+ dfixed_init(7),
+ dfixed_init(8),
+ dfixed_init(9),
+ dfixed_init(10),
+ dfixed_init(11)
+ };
+ fixed20_12 min_mem_eff;
+ fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
+ fixed20_12 cur_latency_mclk, cur_latency_sclk;
+ fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
+ disp_drain_rate2, read_return_rate;
+ fixed20_12 time_disp1_drop_priority;
+ int c;
+ int cur_size = 16; /* in octawords */
+ int critical_point = 0, critical_point2;
+/* uint32_t read_return_rate, time_disp1_drop_priority; */
+ int stop_req, max_stop_req;
+ struct drm_display_mode *mode1 = NULL;
+ struct drm_display_mode *mode2 = NULL;
+ uint32_t pixel_bytes1 = 0;
+ uint32_t pixel_bytes2 = 0;
+
+ radeon_update_display_priority(rdev);
+
+ if (rdev->mode_info.crtcs[0]->base.enabled) {
+ mode1 = &rdev->mode_info.crtcs[0]->base.mode;
+ pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
+ }
+ if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+ if (rdev->mode_info.crtcs[1]->base.enabled) {
+ mode2 = &rdev->mode_info.crtcs[1]->base.mode;
+ pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
+ }
+ }
+
+ min_mem_eff.full = dfixed_const_8(0);
+ /* get modes */
+ if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
+ uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
+ mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
+ mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
+ /* check crtc enables */
+ if (mode2)
+ mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
+ if (mode1)
+ mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
+ WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
+ }
+
+ /*
+ * determine is there is enough bw for current mode
+ */
+ sclk_ff = rdev->pm.sclk;
+ mclk_ff = rdev->pm.mclk;
+
+ temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
+ temp_ff.full = dfixed_const(temp);
+ mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
+
+ pix_clk.full = 0;
+ pix_clk2.full = 0;
+ peak_disp_bw.full = 0;
+ if (mode1) {
+ temp_ff.full = dfixed_const(1000);
+ pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
+ pix_clk.full = dfixed_div(pix_clk, temp_ff);
+ temp_ff.full = dfixed_const(pixel_bytes1);
+ peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
+ }
+ if (mode2) {
+ temp_ff.full = dfixed_const(1000);
+ pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
+ pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
+ temp_ff.full = dfixed_const(pixel_bytes2);
+ peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
+ }
+
+ mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
+ if (peak_disp_bw.full >= mem_bw.full) {
+ DRM_ERROR("You may not have enough display bandwidth for current mode\n"
+ "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
+ }
+
+ /* Get values from the EXT_MEM_CNTL register...converting its contents. */
+ temp = RREG32(RADEON_MEM_TIMING_CNTL);
+ if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
+ mem_trcd = ((temp >> 2) & 0x3) + 1;
+ mem_trp = ((temp & 0x3)) + 1;
+ mem_tras = ((temp & 0x70) >> 4) + 1;
+ } else if (rdev->family == CHIP_R300 ||
+ rdev->family == CHIP_R350) { /* r300, r350 */
+ mem_trcd = (temp & 0x7) + 1;
+ mem_trp = ((temp >> 8) & 0x7) + 1;
+ mem_tras = ((temp >> 11) & 0xf) + 4;
+ } else if (rdev->family == CHIP_RV350 ||
+ rdev->family <= CHIP_RV380) {
+ /* rv3x0 */
+ mem_trcd = (temp & 0x7) + 3;
+ mem_trp = ((temp >> 8) & 0x7) + 3;
+ mem_tras = ((temp >> 11) & 0xf) + 6;
+ } else if (rdev->family == CHIP_R420 ||
+ rdev->family == CHIP_R423 ||
+ rdev->family == CHIP_RV410) {
+ /* r4xx */
+ mem_trcd = (temp & 0xf) + 3;
+ if (mem_trcd > 15)
+ mem_trcd = 15;
+ mem_trp = ((temp >> 8) & 0xf) + 3;
+ if (mem_trp > 15)
+ mem_trp = 15;
+ mem_tras = ((temp >> 12) & 0x1f) + 6;
+ if (mem_tras > 31)
+ mem_tras = 31;
+ } else { /* RV200, R200 */
+ mem_trcd = (temp & 0x7) + 1;
+ mem_trp = ((temp >> 8) & 0x7) + 1;
+ mem_tras = ((temp >> 12) & 0xf) + 4;
+ }
+ /* convert to FF */
+ trcd_ff.full = dfixed_const(mem_trcd);
+ trp_ff.full = dfixed_const(mem_trp);
+ tras_ff.full = dfixed_const(mem_tras);
+
+ /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
+ temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+ data = (temp & (7 << 20)) >> 20;
+ if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
+ if (rdev->family == CHIP_RS480) /* don't think rs400 */
+ tcas_ff = memtcas_rs480_ff[data];
+ else
+ tcas_ff = memtcas_ff[data];
+ } else
+ tcas_ff = memtcas2_ff[data];
+
+ if (rdev->family == CHIP_RS400 ||
+ rdev->family == CHIP_RS480) {
+ /* extra cas latency stored in bits 23-25 0-4 clocks */
+ data = (temp >> 23) & 0x7;
+ if (data < 5)
+ tcas_ff.full += dfixed_const(data);
+ }
+
+ if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
+ /* on the R300, Tcas is included in Trbs.
+ */
+ temp = RREG32(RADEON_MEM_CNTL);
+ data = (R300_MEM_NUM_CHANNELS_MASK & temp);
+ if (data == 1) {
+ if (R300_MEM_USE_CD_CH_ONLY & temp) {
+ temp = RREG32(R300_MC_IND_INDEX);
+ temp &= ~R300_MC_IND_ADDR_MASK;
+ temp |= R300_MC_READ_CNTL_CD_mcind;
+ WREG32(R300_MC_IND_INDEX, temp);
+ temp = RREG32(R300_MC_IND_DATA);
+ data = (R300_MEM_RBS_POSITION_C_MASK & temp);
+ } else {
+ temp = RREG32(R300_MC_READ_CNTL_AB);
+ data = (R300_MEM_RBS_POSITION_A_MASK & temp);
+ }
+ } else {
+ temp = RREG32(R300_MC_READ_CNTL_AB);
+ data = (R300_MEM_RBS_POSITION_A_MASK & temp);
+ }
+ if (rdev->family == CHIP_RV410 ||
+ rdev->family == CHIP_R420 ||
+ rdev->family == CHIP_R423)
+ trbs_ff = memtrbs_r4xx[data];
+ else
+ trbs_ff = memtrbs[data];
+ tcas_ff.full += trbs_ff.full;
+ }
+
+ sclk_eff_ff.full = sclk_ff.full;
+
+ if (rdev->flags & RADEON_IS_AGP) {
+ fixed20_12 agpmode_ff;
+ agpmode_ff.full = dfixed_const(radeon_agpmode);
+ temp_ff.full = dfixed_const_666(16);
+ sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
+ }
+ /* TODO PCIE lanes may affect this - agpmode == 16?? */
+
+ if (ASIC_IS_R300(rdev)) {
+ sclk_delay_ff.full = dfixed_const(250);
+ } else {
+ if ((rdev->family == CHIP_RV100) ||
+ rdev->flags & RADEON_IS_IGP) {
+ if (rdev->mc.vram_is_ddr)
+ sclk_delay_ff.full = dfixed_const(41);
+ else
+ sclk_delay_ff.full = dfixed_const(33);
+ } else {
+ if (rdev->mc.vram_width == 128)
+ sclk_delay_ff.full = dfixed_const(57);
+ else
+ sclk_delay_ff.full = dfixed_const(41);
+ }
+ }
+
+ mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
+
+ if (rdev->mc.vram_is_ddr) {
+ if (rdev->mc.vram_width == 32) {
+ k1.full = dfixed_const(40);
+ c = 3;
+ } else {
+ k1.full = dfixed_const(20);
+ c = 1;
+ }
+ } else {
+ k1.full = dfixed_const(40);
+ c = 3;
+ }
+
+ temp_ff.full = dfixed_const(2);
+ mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
+ temp_ff.full = dfixed_const(c);
+ mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
+ temp_ff.full = dfixed_const(4);
+ mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
+ mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
+ mc_latency_mclk.full += k1.full;
+
+ mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
+ mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
+
+ /*
+ HW cursor time assuming worst case of full size colour cursor.
+ */
+ temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
+ temp_ff.full += trcd_ff.full;
+ if (temp_ff.full < tras_ff.full)
+ temp_ff.full = tras_ff.full;
+ cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
+
+ temp_ff.full = dfixed_const(cur_size);
+ cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
+ /*
+ Find the total latency for the display data.
+ */
+ disp_latency_overhead.full = dfixed_const(8);
+ disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
+ mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
+ mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
+
+ if (mc_latency_mclk.full > mc_latency_sclk.full)
+ disp_latency.full = mc_latency_mclk.full;
+ else
+ disp_latency.full = mc_latency_sclk.full;
+
+ /* setup Max GRPH_STOP_REQ default value */
+ if (ASIC_IS_RV100(rdev))
+ max_stop_req = 0x5c;
+ else
+ max_stop_req = 0x7c;
+
+ if (mode1) {
+ /* CRTC1
+ Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
+ GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
+ */
+ stop_req = mode1->hdisplay * pixel_bytes1 / 16;
+
+ if (stop_req > max_stop_req)
+ stop_req = max_stop_req;
+
+ /*
+ Find the drain rate of the display buffer.
+ */
+ temp_ff.full = dfixed_const((16/pixel_bytes1));
+ disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
+
+ /*
+ Find the critical point of the display buffer.
+ */
+ crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
+ crit_point_ff.full += dfixed_const_half(0);
+
+ critical_point = dfixed_trunc(crit_point_ff);
+
+ if (rdev->disp_priority == 2) {
+ critical_point = 0;
+ }
+
+ /*
+ The critical point should never be above max_stop_req-4. Setting
+ GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
+ */
+ if (max_stop_req - critical_point < 4)
+ critical_point = 0;
+
+ if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
+ /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
+ critical_point = 0x10;
+ }
+
+ temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
+ temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
+ temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
+ temp &= ~(RADEON_GRPH_START_REQ_MASK);
+ if ((rdev->family == CHIP_R350) &&
+ (stop_req > 0x15)) {
+ stop_req -= 0x10;
+ }
+ temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
+ temp |= RADEON_GRPH_BUFFER_SIZE;
+ temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
+ RADEON_GRPH_CRITICAL_AT_SOF |
+ RADEON_GRPH_STOP_CNTL);
+ /*
+ Write the result into the register.
+ */
+ WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
+ (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
+
+#if 0
+ if ((rdev->family == CHIP_RS400) ||
+ (rdev->family == CHIP_RS480)) {
+ /* attempt to program RS400 disp regs correctly ??? */
+ temp = RREG32(RS400_DISP1_REG_CNTL);
+ temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
+ RS400_DISP1_STOP_REQ_LEVEL_MASK);
+ WREG32(RS400_DISP1_REQ_CNTL1, (temp |
+ (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
+ (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
+ temp = RREG32(RS400_DMIF_MEM_CNTL1);
+ temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
+ RS400_DISP1_CRITICAL_POINT_STOP_MASK);
+ WREG32(RS400_DMIF_MEM_CNTL1, (temp |
+ (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
+ (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
+ }
+#endif
+
+ DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",
+ /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
+ (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
+ }
+
+ if (mode2) {
+ u32 grph2_cntl;
+ stop_req = mode2->hdisplay * pixel_bytes2 / 16;
+
+ if (stop_req > max_stop_req)
+ stop_req = max_stop_req;
+
+ /*
+ Find the drain rate of the display buffer.
+ */
+ temp_ff.full = dfixed_const((16/pixel_bytes2));
+ disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
+
+ grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
+ grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
+ grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
+ grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
+ if ((rdev->family == CHIP_R350) &&
+ (stop_req > 0x15)) {
+ stop_req -= 0x10;
+ }
+ grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
+ grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
+ grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
+ RADEON_GRPH_CRITICAL_AT_SOF |
+ RADEON_GRPH_STOP_CNTL);
+
+ if ((rdev->family == CHIP_RS100) ||
+ (rdev->family == CHIP_RS200))
+ critical_point2 = 0;
+ else {
+ temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
+ temp_ff.full = dfixed_const(temp);
+ temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
+ if (sclk_ff.full < temp_ff.full)
+ temp_ff.full = sclk_ff.full;
+
+ read_return_rate.full = temp_ff.full;
+
+ if (mode1) {
+ temp_ff.full = read_return_rate.full - disp_drain_rate.full;
+ time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
+ } else {
+ time_disp1_drop_priority.full = 0;
+ }
+ crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
+ crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
+ crit_point_ff.full += dfixed_const_half(0);
+
+ critical_point2 = dfixed_trunc(crit_point_ff);
+
+ if (rdev->disp_priority == 2) {
+ critical_point2 = 0;
+ }
+
+ if (max_stop_req - critical_point2 < 4)
+ critical_point2 = 0;
+
+ }
+
+ if (critical_point2 == 0 && rdev->family == CHIP_R300) {
+ /* some R300 cards have problem with this set to 0 */
+ critical_point2 = 0x10;
+ }
+
+ WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
+ (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
+
+ if ((rdev->family == CHIP_RS400) ||
+ (rdev->family == CHIP_RS480)) {
+#if 0
+ /* attempt to program RS400 disp2 regs correctly ??? */
+ temp = RREG32(RS400_DISP2_REQ_CNTL1);
+ temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
+ RS400_DISP2_STOP_REQ_LEVEL_MASK);
+ WREG32(RS400_DISP2_REQ_CNTL1, (temp |
+ (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
+ (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
+ temp = RREG32(RS400_DISP2_REQ_CNTL2);
+ temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
+ RS400_DISP2_CRITICAL_POINT_STOP_MASK);
+ WREG32(RS400_DISP2_REQ_CNTL2, (temp |
+ (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
+ (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
+#endif
+ WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
+ WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
+ WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
+ WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
+ }
+
+ DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
+ (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
+ }
+}
+
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ uint32_t scratch;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ r = radeon_scratch_get(rdev, &scratch);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+ r = radeon_ring_lock(rdev, ring, 2);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ radeon_scratch_free(rdev, scratch);
+ return r;
+ }
+ radeon_ring_write(ring, PACKET0(scratch, 0));
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF) {
+ break;
+ }
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test succeeded in %d usecs\n", i);
+ } else {
+ DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
+ scratch, tmp);
+ r = -EINVAL;
+ }
+ radeon_scratch_free(rdev, scratch);
+ return r;
+}
+
+void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+ if (ring->rptr_save_reg) {
+ u32 next_rptr = ring->wptr + 2 + 3;
+ radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0));
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
+ radeon_ring_write(ring, ib->gpu_addr);
+ radeon_ring_write(ring, ib->length_dw);
+}
+
+int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_ib ib;
+ uint32_t scratch;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ r = radeon_scratch_get(rdev, &scratch);
+ if (r) {
+ DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+ r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256);
+ if (r) {
+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ goto free_scratch;
+ }
+ ib.ptr[0] = PACKET0(scratch, 0);
+ ib.ptr[1] = 0xDEADBEEF;
+ ib.ptr[2] = PACKET2(0);
+ ib.ptr[3] = PACKET2(0);
+ ib.ptr[4] = PACKET2(0);
+ ib.ptr[5] = PACKET2(0);
+ ib.ptr[6] = PACKET2(0);
+ ib.ptr[7] = PACKET2(0);
+ ib.length_dw = 8;
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ goto free_ib;
+ }
+ r = radeon_fence_wait(ib.fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ goto free_ib;
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF) {
+ break;
+ }
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ib test succeeded in %u usecs\n", i);
+ } else {
+ DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
+ scratch, tmp);
+ r = -EINVAL;
+ }
+free_ib:
+ radeon_ib_free(rdev, &ib);
+free_scratch:
+ radeon_scratch_free(rdev, scratch);
+ return r;
+}
+
+void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
+{
+ /* Shutdown CP we shouldn't need to do that but better be safe than
+ * sorry
+ */
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ WREG32(R_000740_CP_CSQ_CNTL, 0);
+
+ /* Save few CRTC registers */
+ save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
+ save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
+ save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
+ save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
+ if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+ save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
+ save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
+ }
+
+ /* Disable VGA aperture access */
+ WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
+ /* Disable cursor, overlay, crtc */
+ WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
+ WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
+ S_000054_CRTC_DISPLAY_DIS(1));
+ WREG32(R_000050_CRTC_GEN_CNTL,
+ (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
+ S_000050_CRTC_DISP_REQ_EN_B(1));
+ WREG32(R_000420_OV0_SCALE_CNTL,
+ C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
+ WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
+ if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+ WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
+ S_000360_CUR2_LOCK(1));
+ WREG32(R_0003F8_CRTC2_GEN_CNTL,
+ (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
+ S_0003F8_CRTC2_DISPLAY_DIS(1) |
+ S_0003F8_CRTC2_DISP_REQ_EN_B(1));
+ WREG32(R_000360_CUR2_OFFSET,
+ C_000360_CUR2_LOCK & save->CUR2_OFFSET);
+ }
+}
+
+void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
+{
+ /* Update base address for crtc */
+ WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
+ if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+ WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
+ }
+ /* Restore CRTC registers */
+ WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
+ WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
+ WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
+ if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+ WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
+ }
+}
+
+void r100_vga_render_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = RREG8(R_0003C2_GENMO_WT);
+ WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
+}
+
+static void r100_debugfs(struct radeon_device *rdev)
+{
+ int r;
+
+ r = r100_debugfs_mc_info_init(rdev);
+ if (r)
+ dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
+}
+
+static void r100_mc_program(struct radeon_device *rdev)
+{
+ struct r100_mc_save save;
+
+ /* Stops all mc clients */
+ r100_mc_stop(rdev, &save);
+ if (rdev->flags & RADEON_IS_AGP) {
+ WREG32(R_00014C_MC_AGP_LOCATION,
+ S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+ S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+ WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+ if (rdev->family > CHIP_RV200)
+ WREG32(R_00015C_AGP_BASE_2,
+ upper_32_bits(rdev->mc.agp_base) & 0xff);
+ } else {
+ WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
+ WREG32(R_000170_AGP_BASE, 0);
+ if (rdev->family > CHIP_RV200)
+ WREG32(R_00015C_AGP_BASE_2, 0);
+ }
+ /* Wait for mc idle */
+ if (r100_mc_wait_for_idle(rdev))
+ dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
+ /* Program MC, should be a 32bits limited address space */
+ WREG32(R_000148_MC_FB_LOCATION,
+ S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
+ S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
+ r100_mc_resume(rdev, &save);
+}
+
+static void r100_clock_startup(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (radeon_dynclks != -1 && radeon_dynclks)
+ radeon_legacy_set_clock_gating(rdev, 1);
+ /* We need to force on some of the block */
+ tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
+ tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
+ if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
+ tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
+ WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
+}
+
+static int r100_startup(struct radeon_device *rdev)
+{
+ int r;
+
+ /* set common regs */
+ r100_set_common_regs(rdev);
+ /* program mc */
+ r100_mc_program(rdev);
+ /* Resume clock */
+ r100_clock_startup(rdev);
+ /* Initialize GART (initialize after TTM so we can allocate
+ * memory through TTM but finalize after TTM) */
+ r100_enable_bm(rdev);
+ if (rdev->flags & RADEON_IS_PCI) {
+ r = r100_pci_gart_enable(rdev);
+ if (r)
+ return r;
+ }
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ /* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
+ r100_irq_set(rdev);
+ rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+ r = r100_cp_init(rdev, 1024 * 1024);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ return r;
+ }
+
+ return 0;
+}
+
+int r100_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Make sur GART are not working */
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_disable(rdev);
+ /* Resume clock before doing reset */
+ r100_clock_startup(rdev);
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_asic_reset(rdev)) {
+ dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* post */
+ radeon_combios_asic_init(rdev->ddev);
+ /* Resume clock after posting */
+ r100_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
+ r = r100_startup(rdev);
+ if (r) {
+ rdev->accel_working = false;
+ }
+ return r;
+}
+
+int r100_suspend(struct radeon_device *rdev)
+{
+ r100_cp_disable(rdev);
+ radeon_wb_disable(rdev);
+ r100_irq_disable(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_disable(rdev);
+ return 0;
+}
+
+void r100_fini(struct radeon_device *rdev)
+{
+ r100_cp_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_gem_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_fini(rdev);
+ radeon_agp_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_bo_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
+/*
+ * Due to how kexec works, it can leave the hw fully initialised when it
+ * boots the new kernel. However doing our init sequence with the CP and
+ * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
+ * do some quick sanity checks and restore sane values to avoid this
+ * problem.
+ */
+void r100_restore_sanity(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = RREG32(RADEON_CP_CSQ_CNTL);
+ if (tmp) {
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ }
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ if (tmp) {
+ WREG32(RADEON_CP_RB_CNTL, 0);
+ }
+ tmp = RREG32(RADEON_SCRATCH_UMSK);
+ if (tmp) {
+ WREG32(RADEON_SCRATCH_UMSK, 0);
+ }
+}
+
+int r100_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Register debugfs file specific to this group of asics */
+ r100_debugfs(rdev);
+ /* Disable VGA */
+ r100_vga_render_disable(rdev);
+ /* Initialize scratch registers */
+ radeon_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* sanity check some register to avoid hangs like after kexec */
+ r100_restore_sanity(rdev);
+ /* TODO: disable VGA need to use VGA request */
+ /* BIOS*/
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ if (rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+ return -EINVAL;
+ } else {
+ r = radeon_combios_init(rdev);
+ if (r)
+ return r;
+ }
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_asic_reset(rdev)) {
+ dev_warn(rdev->dev,
+ "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* check if cards are posted or not */
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+ /* Set asic errata */
+ r100_errata(rdev);
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
+ }
+ /* initialize VRAM */
+ r100_mc_init(rdev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+ if (rdev->flags & RADEON_IS_PCI) {
+ r = r100_pci_gart_init(rdev);
+ if (r)
+ return r;
+ }
+ r100_set_safe_registers(rdev);
+
+ rdev->accel_working = true;
+ r = r100_startup(rdev);
+ if (r) {
+ /* Somethings want wront with the accel init stop accel */
+ dev_err(rdev->dev, "Disabling GPU acceleration\n");
+ r100_cp_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_fini(rdev);
+ rdev->accel_working = false;
+ }
+ return 0;
+}
+
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+ bool always_indirect)
+{
+ if (reg < rdev->rmmio_size && !always_indirect)
+ return readl(((void __iomem *)rdev->rmmio) + reg);
+ else {
+ unsigned long flags;
+ uint32_t ret;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+ return ret;
+ }
+}
+
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+ bool always_indirect)
+{
+ if (reg < rdev->rmmio_size && !always_indirect)
+ writel(v, ((void __iomem *)rdev->rmmio) + reg);
+ else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+ }
+}
+
+u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
+{
+ if (reg < rdev->rio_mem_size)
+ return ioread32(rdev->rio_mem + reg);
+ else {
+ iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
+ return ioread32(rdev->rio_mem + RADEON_MM_DATA);
+ }
+}
+
+void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+ if (reg < rdev->rio_mem_size)
+ iowrite32(v, rdev->rio_mem + reg);
+ else {
+ iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
+ iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
new file mode 100644
index 0000000..eb40888
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -0,0 +1,97 @@
+
+#define R100_TRACK_MAX_TEXTURE 3
+#define R200_TRACK_MAX_TEXTURE 6
+#define R300_TRACK_MAX_TEXTURE 16
+
+#define R100_MAX_CB 1
+#define R300_MAX_CB 4
+
+/*
+ * CS functions
+ */
+struct r100_cs_track_cb {
+ struct radeon_bo *robj;
+ unsigned pitch;
+ unsigned cpp;
+ unsigned offset;
+};
+
+struct r100_cs_track_array {
+ struct radeon_bo *robj;
+ unsigned esize;
+};
+
+struct r100_cs_cube_info {
+ struct radeon_bo *robj;
+ unsigned offset;
+ unsigned width;
+ unsigned height;
+};
+
+#define R100_TRACK_COMP_NONE 0
+#define R100_TRACK_COMP_DXT1 1
+#define R100_TRACK_COMP_DXT35 2
+
+struct r100_cs_track_texture {
+ struct radeon_bo *robj;
+ struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
+ unsigned pitch;
+ unsigned width;
+ unsigned height;
+ unsigned num_levels;
+ unsigned cpp;
+ unsigned tex_coord_type;
+ unsigned txdepth;
+ unsigned width_11;
+ unsigned height_11;
+ bool use_pitch;
+ bool enabled;
+ bool lookup_disable;
+ bool roundup_w;
+ bool roundup_h;
+ unsigned compress_format;
+};
+
+struct r100_cs_track {
+ unsigned num_cb;
+ unsigned num_texture;
+ unsigned maxy;
+ unsigned vtx_size;
+ unsigned vap_vf_cntl;
+ unsigned vap_alt_nverts;
+ unsigned immd_dwords;
+ unsigned num_arrays;
+ unsigned max_indx;
+ unsigned color_channel_mask;
+ struct r100_cs_track_array arrays[16];
+ struct r100_cs_track_cb cb[R300_MAX_CB];
+ struct r100_cs_track_cb zb;
+ struct r100_cs_track_cb aa;
+ struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
+ bool z_enabled;
+ bool separate_cube;
+ bool zb_cb_clear;
+ bool blend_read_enable;
+ bool cb_dirty;
+ bool zb_dirty;
+ bool tex_dirty;
+ bool aa_dirty;
+ bool aaresolve;
+};
+
+int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
+void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track);
+
+int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
+
+int r200_packet0_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx, unsigned reg);
+
+int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx,
+ unsigned reg);
+int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ int idx);
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
new file mode 100644
index 0000000..f0f8ee6
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -0,0 +1,869 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __R100D_H__
+#define __R100D_H__
+
+#define CP_PACKET0 0x00000000
+#define PACKET0_BASE_INDEX_SHIFT 0
+#define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
+#define PACKET0_COUNT_SHIFT 16
+#define PACKET0_COUNT_MASK (0x3fff << 16)
+#define CP_PACKET1 0x40000000
+#define CP_PACKET2 0x80000000
+#define PACKET2_PAD_SHIFT 0
+#define PACKET2_PAD_MASK (0x3fffffff << 0)
+#define CP_PACKET3 0xC0000000
+#define PACKET3_IT_OPCODE_SHIFT 8
+#define PACKET3_IT_OPCODE_MASK (0xff << 8)
+#define PACKET3_COUNT_SHIFT 16
+#define PACKET3_COUNT_MASK (0x3fff << 16)
+/* PACKET3 op code */
+#define PACKET3_NOP 0x10
+#define PACKET3_3D_DRAW_VBUF 0x28
+#define PACKET3_3D_DRAW_IMMD 0x29
+#define PACKET3_3D_DRAW_INDX 0x2A
+#define PACKET3_3D_LOAD_VBPNTR 0x2F
+#define PACKET3_3D_CLEAR_ZMASK 0x32
+#define PACKET3_INDX_BUFFER 0x33
+#define PACKET3_3D_DRAW_VBUF_2 0x34
+#define PACKET3_3D_DRAW_IMMD_2 0x35
+#define PACKET3_3D_DRAW_INDX_2 0x36
+#define PACKET3_3D_CLEAR_HIZ 0x37
+#define PACKET3_BITBLT_MULTI 0x9B
+
+#define PACKET0(reg, n) (CP_PACKET0 | \
+ REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) | \
+ REG_SET(PACKET0_COUNT, (n)))
+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define PACKET3(op, n) (CP_PACKET3 | \
+ REG_SET(PACKET3_IT_OPCODE, (op)) | \
+ REG_SET(PACKET3_COUNT, (n)))
+
+/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_SE(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_SE(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_SE 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define R_000030_BUS_CNTL 0x000030
+#define S_000030_BUS_DBL_RESYNC(x) (((x) & 0x1) << 0)
+#define G_000030_BUS_DBL_RESYNC(x) (((x) >> 0) & 0x1)
+#define C_000030_BUS_DBL_RESYNC 0xFFFFFFFE
+#define S_000030_BUS_MSTR_RESET(x) (((x) & 0x1) << 1)
+#define G_000030_BUS_MSTR_RESET(x) (((x) >> 1) & 0x1)
+#define C_000030_BUS_MSTR_RESET 0xFFFFFFFD
+#define S_000030_BUS_FLUSH_BUF(x) (((x) & 0x1) << 2)
+#define G_000030_BUS_FLUSH_BUF(x) (((x) >> 2) & 0x1)
+#define C_000030_BUS_FLUSH_BUF 0xFFFFFFFB
+#define S_000030_BUS_STOP_REQ_DIS(x) (((x) & 0x1) << 3)
+#define G_000030_BUS_STOP_REQ_DIS(x) (((x) >> 3) & 0x1)
+#define C_000030_BUS_STOP_REQ_DIS 0xFFFFFFF7
+#define S_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 4)
+#define G_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) >> 4) & 0x1)
+#define C_000030_BUS_PM4_READ_COMBINE_EN 0xFFFFFFEF
+#define S_000030_BUS_WRT_COMBINE_EN(x) (((x) & 0x1) << 5)
+#define G_000030_BUS_WRT_COMBINE_EN(x) (((x) >> 5) & 0x1)
+#define C_000030_BUS_WRT_COMBINE_EN 0xFFFFFFDF
+#define S_000030_BUS_MASTER_DIS(x) (((x) & 0x1) << 6)
+#define G_000030_BUS_MASTER_DIS(x) (((x) >> 6) & 0x1)
+#define C_000030_BUS_MASTER_DIS 0xFFFFFFBF
+#define S_000030_BIOS_ROM_WRT_EN(x) (((x) & 0x1) << 7)
+#define G_000030_BIOS_ROM_WRT_EN(x) (((x) >> 7) & 0x1)
+#define C_000030_BIOS_ROM_WRT_EN 0xFFFFFF7F
+#define S_000030_BM_DAC_CRIPPLE(x) (((x) & 0x1) << 8)
+#define G_000030_BM_DAC_CRIPPLE(x) (((x) >> 8) & 0x1)
+#define C_000030_BM_DAC_CRIPPLE 0xFFFFFEFF
+#define S_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 9)
+#define G_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) >> 9) & 0x1)
+#define C_000030_BUS_NON_PM4_READ_COMBINE_EN 0xFFFFFDFF
+#define S_000030_BUS_XFERD_DISCARD_EN(x) (((x) & 0x1) << 10)
+#define G_000030_BUS_XFERD_DISCARD_EN(x) (((x) >> 10) & 0x1)
+#define C_000030_BUS_XFERD_DISCARD_EN 0xFFFFFBFF
+#define S_000030_BUS_SGL_READ_DISABLE(x) (((x) & 0x1) << 11)
+#define G_000030_BUS_SGL_READ_DISABLE(x) (((x) >> 11) & 0x1)
+#define C_000030_BUS_SGL_READ_DISABLE 0xFFFFF7FF
+#define S_000030_BIOS_DIS_ROM(x) (((x) & 0x1) << 12)
+#define G_000030_BIOS_DIS_ROM(x) (((x) >> 12) & 0x1)
+#define C_000030_BIOS_DIS_ROM 0xFFFFEFFF
+#define S_000030_BUS_PCI_READ_RETRY_EN(x) (((x) & 0x1) << 13)
+#define G_000030_BUS_PCI_READ_RETRY_EN(x) (((x) >> 13) & 0x1)
+#define C_000030_BUS_PCI_READ_RETRY_EN 0xFFFFDFFF
+#define S_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) & 0x1) << 14)
+#define G_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) >> 14) & 0x1)
+#define C_000030_BUS_AGP_AD_STEPPING_EN 0xFFFFBFFF
+#define S_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) & 0x1) << 15)
+#define G_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) >> 15) & 0x1)
+#define C_000030_BUS_PCI_WRT_RETRY_EN 0xFFFF7FFF
+#define S_000030_BUS_RETRY_WS(x) (((x) & 0xF) << 16)
+#define G_000030_BUS_RETRY_WS(x) (((x) >> 16) & 0xF)
+#define C_000030_BUS_RETRY_WS 0xFFF0FFFF
+#define S_000030_BUS_MSTR_RD_MULT(x) (((x) & 0x1) << 20)
+#define G_000030_BUS_MSTR_RD_MULT(x) (((x) >> 20) & 0x1)
+#define C_000030_BUS_MSTR_RD_MULT 0xFFEFFFFF
+#define S_000030_BUS_MSTR_RD_LINE(x) (((x) & 0x1) << 21)
+#define G_000030_BUS_MSTR_RD_LINE(x) (((x) >> 21) & 0x1)
+#define C_000030_BUS_MSTR_RD_LINE 0xFFDFFFFF
+#define S_000030_BUS_SUSPEND(x) (((x) & 0x1) << 22)
+#define G_000030_BUS_SUSPEND(x) (((x) >> 22) & 0x1)
+#define C_000030_BUS_SUSPEND 0xFFBFFFFF
+#define S_000030_LAT_16X(x) (((x) & 0x1) << 23)
+#define G_000030_LAT_16X(x) (((x) >> 23) & 0x1)
+#define C_000030_LAT_16X 0xFF7FFFFF
+#define S_000030_BUS_RD_DISCARD_EN(x) (((x) & 0x1) << 24)
+#define G_000030_BUS_RD_DISCARD_EN(x) (((x) >> 24) & 0x1)
+#define C_000030_BUS_RD_DISCARD_EN 0xFEFFFFFF
+#define S_000030_ENFRCWRDY(x) (((x) & 0x1) << 25)
+#define G_000030_ENFRCWRDY(x) (((x) >> 25) & 0x1)
+#define C_000030_ENFRCWRDY 0xFDFFFFFF
+#define S_000030_BUS_MSTR_WS(x) (((x) & 0x1) << 26)
+#define G_000030_BUS_MSTR_WS(x) (((x) >> 26) & 0x1)
+#define C_000030_BUS_MSTR_WS 0xFBFFFFFF
+#define S_000030_BUS_PARKING_DIS(x) (((x) & 0x1) << 27)
+#define G_000030_BUS_PARKING_DIS(x) (((x) >> 27) & 0x1)
+#define C_000030_BUS_PARKING_DIS 0xF7FFFFFF
+#define S_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) & 0x1) << 28)
+#define G_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) >> 28) & 0x1)
+#define C_000030_BUS_MSTR_DISCONNECT_EN 0xEFFFFFFF
+#define S_000030_SERR_EN(x) (((x) & 0x1) << 29)
+#define G_000030_SERR_EN(x) (((x) >> 29) & 0x1)
+#define C_000030_SERR_EN 0xDFFFFFFF
+#define S_000030_BUS_READ_BURST(x) (((x) & 0x1) << 30)
+#define G_000030_BUS_READ_BURST(x) (((x) >> 30) & 0x1)
+#define C_000030_BUS_READ_BURST 0xBFFFFFFF
+#define S_000030_BUS_RDY_READ_DLY(x) (((x) & 0x1) << 31)
+#define G_000030_BUS_RDY_READ_DLY(x) (((x) >> 31) & 0x1)
+#define C_000030_BUS_RDY_READ_DLY 0x7FFFFFFF
+#define R_000040_GEN_INT_CNTL 0x000040
+#define S_000040_CRTC_VBLANK(x) (((x) & 0x1) << 0)
+#define G_000040_CRTC_VBLANK(x) (((x) >> 0) & 0x1)
+#define C_000040_CRTC_VBLANK 0xFFFFFFFE
+#define S_000040_CRTC_VLINE(x) (((x) & 0x1) << 1)
+#define G_000040_CRTC_VLINE(x) (((x) >> 1) & 0x1)
+#define C_000040_CRTC_VLINE 0xFFFFFFFD
+#define S_000040_CRTC_VSYNC(x) (((x) & 0x1) << 2)
+#define G_000040_CRTC_VSYNC(x) (((x) >> 2) & 0x1)
+#define C_000040_CRTC_VSYNC 0xFFFFFFFB
+#define S_000040_SNAPSHOT(x) (((x) & 0x1) << 3)
+#define G_000040_SNAPSHOT(x) (((x) >> 3) & 0x1)
+#define C_000040_SNAPSHOT 0xFFFFFFF7
+#define S_000040_FP_DETECT(x) (((x) & 0x1) << 4)
+#define G_000040_FP_DETECT(x) (((x) >> 4) & 0x1)
+#define C_000040_FP_DETECT 0xFFFFFFEF
+#define S_000040_CRTC2_VLINE(x) (((x) & 0x1) << 5)
+#define G_000040_CRTC2_VLINE(x) (((x) >> 5) & 0x1)
+#define C_000040_CRTC2_VLINE 0xFFFFFFDF
+#define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12)
+#define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1)
+#define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF
+#define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6)
+#define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1)
+#define C_000040_CRTC2_VSYNC 0xFFFFFFBF
+#define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7)
+#define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1)
+#define C_000040_SNAPSHOT2 0xFFFFFF7F
+#define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9)
+#define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1)
+#define C_000040_CRTC2_VBLANK 0xFFFFFDFF
+#define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10)
+#define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1)
+#define C_000040_FP2_DETECT 0xFFFFFBFF
+#define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11)
+#define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1)
+#define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF
+#define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13)
+#define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1)
+#define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF
+#define S_000040_DMA_VIPH2_INT_EN(x) (((x) & 0x1) << 14)
+#define G_000040_DMA_VIPH2_INT_EN(x) (((x) >> 14) & 0x1)
+#define C_000040_DMA_VIPH2_INT_EN 0xFFFFBFFF
+#define S_000040_DMA_VIPH3_INT_EN(x) (((x) & 0x1) << 15)
+#define G_000040_DMA_VIPH3_INT_EN(x) (((x) >> 15) & 0x1)
+#define C_000040_DMA_VIPH3_INT_EN 0xFFFF7FFF
+#define S_000040_I2C_INT_EN(x) (((x) & 0x1) << 17)
+#define G_000040_I2C_INT_EN(x) (((x) >> 17) & 0x1)
+#define C_000040_I2C_INT_EN 0xFFFDFFFF
+#define S_000040_GUI_IDLE(x) (((x) & 0x1) << 19)
+#define G_000040_GUI_IDLE(x) (((x) >> 19) & 0x1)
+#define C_000040_GUI_IDLE 0xFFF7FFFF
+#define S_000040_VIPH_INT_EN(x) (((x) & 0x1) << 24)
+#define G_000040_VIPH_INT_EN(x) (((x) >> 24) & 0x1)
+#define C_000040_VIPH_INT_EN 0xFEFFFFFF
+#define S_000040_SW_INT_EN(x) (((x) & 0x1) << 25)
+#define G_000040_SW_INT_EN(x) (((x) >> 25) & 0x1)
+#define C_000040_SW_INT_EN 0xFDFFFFFF
+#define S_000040_GEYSERVILLE(x) (((x) & 0x1) << 27)
+#define G_000040_GEYSERVILLE(x) (((x) >> 27) & 0x1)
+#define C_000040_GEYSERVILLE 0xF7FFFFFF
+#define S_000040_HDCP_AUTHORIZED_INT(x) (((x) & 0x1) << 28)
+#define G_000040_HDCP_AUTHORIZED_INT(x) (((x) >> 28) & 0x1)
+#define C_000040_HDCP_AUTHORIZED_INT 0xEFFFFFFF
+#define S_000040_DVI_I2C_INT(x) (((x) & 0x1) << 29)
+#define G_000040_DVI_I2C_INT(x) (((x) >> 29) & 0x1)
+#define C_000040_DVI_I2C_INT 0xDFFFFFFF
+#define S_000040_GUIDMA(x) (((x) & 0x1) << 30)
+#define G_000040_GUIDMA(x) (((x) >> 30) & 0x1)
+#define C_000040_GUIDMA 0xBFFFFFFF
+#define S_000040_VIDDMA(x) (((x) & 0x1) << 31)
+#define G_000040_VIDDMA(x) (((x) >> 31) & 0x1)
+#define C_000040_VIDDMA 0x7FFFFFFF
+#define R_000044_GEN_INT_STATUS 0x000044
+#define S_000044_CRTC_VBLANK_STAT(x) (((x) & 0x1) << 0)
+#define G_000044_CRTC_VBLANK_STAT(x) (((x) >> 0) & 0x1)
+#define C_000044_CRTC_VBLANK_STAT 0xFFFFFFFE
+#define S_000044_CRTC_VBLANK_STAT_AK(x) (((x) & 0x1) << 0)
+#define G_000044_CRTC_VBLANK_STAT_AK(x) (((x) >> 0) & 0x1)
+#define C_000044_CRTC_VBLANK_STAT_AK 0xFFFFFFFE
+#define S_000044_CRTC_VLINE_STAT(x) (((x) & 0x1) << 1)
+#define G_000044_CRTC_VLINE_STAT(x) (((x) >> 1) & 0x1)
+#define C_000044_CRTC_VLINE_STAT 0xFFFFFFFD
+#define S_000044_CRTC_VLINE_STAT_AK(x) (((x) & 0x1) << 1)
+#define G_000044_CRTC_VLINE_STAT_AK(x) (((x) >> 1) & 0x1)
+#define C_000044_CRTC_VLINE_STAT_AK 0xFFFFFFFD
+#define S_000044_CRTC_VSYNC_STAT(x) (((x) & 0x1) << 2)
+#define G_000044_CRTC_VSYNC_STAT(x) (((x) >> 2) & 0x1)
+#define C_000044_CRTC_VSYNC_STAT 0xFFFFFFFB
+#define S_000044_CRTC_VSYNC_STAT_AK(x) (((x) & 0x1) << 2)
+#define G_000044_CRTC_VSYNC_STAT_AK(x) (((x) >> 2) & 0x1)
+#define C_000044_CRTC_VSYNC_STAT_AK 0xFFFFFFFB
+#define S_000044_SNAPSHOT_STAT(x) (((x) & 0x1) << 3)
+#define G_000044_SNAPSHOT_STAT(x) (((x) >> 3) & 0x1)
+#define C_000044_SNAPSHOT_STAT 0xFFFFFFF7
+#define S_000044_SNAPSHOT_STAT_AK(x) (((x) & 0x1) << 3)
+#define G_000044_SNAPSHOT_STAT_AK(x) (((x) >> 3) & 0x1)
+#define C_000044_SNAPSHOT_STAT_AK 0xFFFFFFF7
+#define S_000044_FP_DETECT_STAT(x) (((x) & 0x1) << 4)
+#define G_000044_FP_DETECT_STAT(x) (((x) >> 4) & 0x1)
+#define C_000044_FP_DETECT_STAT 0xFFFFFFEF
+#define S_000044_FP_DETECT_STAT_AK(x) (((x) & 0x1) << 4)
+#define G_000044_FP_DETECT_STAT_AK(x) (((x) >> 4) & 0x1)
+#define C_000044_FP_DETECT_STAT_AK 0xFFFFFFEF
+#define S_000044_CRTC2_VLINE_STAT(x) (((x) & 0x1) << 5)
+#define G_000044_CRTC2_VLINE_STAT(x) (((x) >> 5) & 0x1)
+#define C_000044_CRTC2_VLINE_STAT 0xFFFFFFDF
+#define S_000044_CRTC2_VLINE_STAT_AK(x) (((x) & 0x1) << 5)
+#define G_000044_CRTC2_VLINE_STAT_AK(x) (((x) >> 5) & 0x1)
+#define C_000044_CRTC2_VLINE_STAT_AK 0xFFFFFFDF
+#define S_000044_CRTC2_VSYNC_STAT(x) (((x) & 0x1) << 6)
+#define G_000044_CRTC2_VSYNC_STAT(x) (((x) >> 6) & 0x1)
+#define C_000044_CRTC2_VSYNC_STAT 0xFFFFFFBF
+#define S_000044_CRTC2_VSYNC_STAT_AK(x) (((x) & 0x1) << 6)
+#define G_000044_CRTC2_VSYNC_STAT_AK(x) (((x) >> 6) & 0x1)
+#define C_000044_CRTC2_VSYNC_STAT_AK 0xFFFFFFBF
+#define S_000044_SNAPSHOT2_STAT(x) (((x) & 0x1) << 7)
+#define G_000044_SNAPSHOT2_STAT(x) (((x) >> 7) & 0x1)
+#define C_000044_SNAPSHOT2_STAT 0xFFFFFF7F
+#define S_000044_SNAPSHOT2_STAT_AK(x) (((x) & 0x1) << 7)
+#define G_000044_SNAPSHOT2_STAT_AK(x) (((x) >> 7) & 0x1)
+#define C_000044_SNAPSHOT2_STAT_AK 0xFFFFFF7F
+#define S_000044_CAP0_INT_ACTIVE(x) (((x) & 0x1) << 8)
+#define G_000044_CAP0_INT_ACTIVE(x) (((x) >> 8) & 0x1)
+#define C_000044_CAP0_INT_ACTIVE 0xFFFFFEFF
+#define S_000044_CRTC2_VBLANK_STAT(x) (((x) & 0x1) << 9)
+#define G_000044_CRTC2_VBLANK_STAT(x) (((x) >> 9) & 0x1)
+#define C_000044_CRTC2_VBLANK_STAT 0xFFFFFDFF
+#define S_000044_CRTC2_VBLANK_STAT_AK(x) (((x) & 0x1) << 9)
+#define G_000044_CRTC2_VBLANK_STAT_AK(x) (((x) >> 9) & 0x1)
+#define C_000044_CRTC2_VBLANK_STAT_AK 0xFFFFFDFF
+#define S_000044_FP2_DETECT_STAT(x) (((x) & 0x1) << 10)
+#define G_000044_FP2_DETECT_STAT(x) (((x) >> 10) & 0x1)
+#define C_000044_FP2_DETECT_STAT 0xFFFFFBFF
+#define S_000044_FP2_DETECT_STAT_AK(x) (((x) & 0x1) << 10)
+#define G_000044_FP2_DETECT_STAT_AK(x) (((x) >> 10) & 0x1)
+#define C_000044_FP2_DETECT_STAT_AK 0xFFFFFBFF
+#define S_000044_VSYNC_DIFF_OVER_LIMIT_STAT(x) (((x) & 0x1) << 11)
+#define G_000044_VSYNC_DIFF_OVER_LIMIT_STAT(x) (((x) >> 11) & 0x1)
+#define C_000044_VSYNC_DIFF_OVER_LIMIT_STAT 0xFFFFF7FF
+#define S_000044_VSYNC_DIFF_OVER_LIMIT_STAT_AK(x) (((x) & 0x1) << 11)
+#define G_000044_VSYNC_DIFF_OVER_LIMIT_STAT_AK(x) (((x) >> 11) & 0x1)
+#define C_000044_VSYNC_DIFF_OVER_LIMIT_STAT_AK 0xFFFFF7FF
+#define S_000044_DMA_VIPH0_INT(x) (((x) & 0x1) << 12)
+#define G_000044_DMA_VIPH0_INT(x) (((x) >> 12) & 0x1)
+#define C_000044_DMA_VIPH0_INT 0xFFFFEFFF
+#define S_000044_DMA_VIPH0_INT_AK(x) (((x) & 0x1) << 12)
+#define G_000044_DMA_VIPH0_INT_AK(x) (((x) >> 12) & 0x1)
+#define C_000044_DMA_VIPH0_INT_AK 0xFFFFEFFF
+#define S_000044_DMA_VIPH1_INT(x) (((x) & 0x1) << 13)
+#define G_000044_DMA_VIPH1_INT(x) (((x) >> 13) & 0x1)
+#define C_000044_DMA_VIPH1_INT 0xFFFFDFFF
+#define S_000044_DMA_VIPH1_INT_AK(x) (((x) & 0x1) << 13)
+#define G_000044_DMA_VIPH1_INT_AK(x) (((x) >> 13) & 0x1)
+#define C_000044_DMA_VIPH1_INT_AK 0xFFFFDFFF
+#define S_000044_DMA_VIPH2_INT(x) (((x) & 0x1) << 14)
+#define G_000044_DMA_VIPH2_INT(x) (((x) >> 14) & 0x1)
+#define C_000044_DMA_VIPH2_INT 0xFFFFBFFF
+#define S_000044_DMA_VIPH2_INT_AK(x) (((x) & 0x1) << 14)
+#define G_000044_DMA_VIPH2_INT_AK(x) (((x) >> 14) & 0x1)
+#define C_000044_DMA_VIPH2_INT_AK 0xFFFFBFFF
+#define S_000044_DMA_VIPH3_INT(x) (((x) & 0x1) << 15)
+#define G_000044_DMA_VIPH3_INT(x) (((x) >> 15) & 0x1)
+#define C_000044_DMA_VIPH3_INT 0xFFFF7FFF
+#define S_000044_DMA_VIPH3_INT_AK(x) (((x) & 0x1) << 15)
+#define G_000044_DMA_VIPH3_INT_AK(x) (((x) >> 15) & 0x1)
+#define C_000044_DMA_VIPH3_INT_AK 0xFFFF7FFF
+#define S_000044_I2C_INT(x) (((x) & 0x1) << 17)
+#define G_000044_I2C_INT(x) (((x) >> 17) & 0x1)
+#define C_000044_I2C_INT 0xFFFDFFFF
+#define S_000044_I2C_INT_AK(x) (((x) & 0x1) << 17)
+#define G_000044_I2C_INT_AK(x) (((x) >> 17) & 0x1)
+#define C_000044_I2C_INT_AK 0xFFFDFFFF
+#define S_000044_GUI_IDLE_STAT(x) (((x) & 0x1) << 19)
+#define G_000044_GUI_IDLE_STAT(x) (((x) >> 19) & 0x1)
+#define C_000044_GUI_IDLE_STAT 0xFFF7FFFF
+#define S_000044_GUI_IDLE_STAT_AK(x) (((x) & 0x1) << 19)
+#define G_000044_GUI_IDLE_STAT_AK(x) (((x) >> 19) & 0x1)
+#define C_000044_GUI_IDLE_STAT_AK 0xFFF7FFFF
+#define S_000044_VIPH_INT(x) (((x) & 0x1) << 24)
+#define G_000044_VIPH_INT(x) (((x) >> 24) & 0x1)
+#define C_000044_VIPH_INT 0xFEFFFFFF
+#define S_000044_SW_INT(x) (((x) & 0x1) << 25)
+#define G_000044_SW_INT(x) (((x) >> 25) & 0x1)
+#define C_000044_SW_INT 0xFDFFFFFF
+#define S_000044_SW_INT_AK(x) (((x) & 0x1) << 25)
+#define G_000044_SW_INT_AK(x) (((x) >> 25) & 0x1)
+#define C_000044_SW_INT_AK 0xFDFFFFFF
+#define S_000044_SW_INT_SET(x) (((x) & 0x1) << 26)
+#define G_000044_SW_INT_SET(x) (((x) >> 26) & 0x1)
+#define C_000044_SW_INT_SET 0xFBFFFFFF
+#define S_000044_GEYSERVILLE_STAT(x) (((x) & 0x1) << 27)
+#define G_000044_GEYSERVILLE_STAT(x) (((x) >> 27) & 0x1)
+#define C_000044_GEYSERVILLE_STAT 0xF7FFFFFF
+#define S_000044_GEYSERVILLE_STAT_AK(x) (((x) & 0x1) << 27)
+#define G_000044_GEYSERVILLE_STAT_AK(x) (((x) >> 27) & 0x1)
+#define C_000044_GEYSERVILLE_STAT_AK 0xF7FFFFFF
+#define S_000044_HDCP_AUTHORIZED_INT_STAT(x) (((x) & 0x1) << 28)
+#define G_000044_HDCP_AUTHORIZED_INT_STAT(x) (((x) >> 28) & 0x1)
+#define C_000044_HDCP_AUTHORIZED_INT_STAT 0xEFFFFFFF
+#define S_000044_HDCP_AUTHORIZED_INT_AK(x) (((x) & 0x1) << 28)
+#define G_000044_HDCP_AUTHORIZED_INT_AK(x) (((x) >> 28) & 0x1)
+#define C_000044_HDCP_AUTHORIZED_INT_AK 0xEFFFFFFF
+#define S_000044_DVI_I2C_INT_STAT(x) (((x) & 0x1) << 29)
+#define G_000044_DVI_I2C_INT_STAT(x) (((x) >> 29) & 0x1)
+#define C_000044_DVI_I2C_INT_STAT 0xDFFFFFFF
+#define S_000044_DVI_I2C_INT_AK(x) (((x) & 0x1) << 29)
+#define G_000044_DVI_I2C_INT_AK(x) (((x) >> 29) & 0x1)
+#define C_000044_DVI_I2C_INT_AK 0xDFFFFFFF
+#define S_000044_GUIDMA_STAT(x) (((x) & 0x1) << 30)
+#define G_000044_GUIDMA_STAT(x) (((x) >> 30) & 0x1)
+#define C_000044_GUIDMA_STAT 0xBFFFFFFF
+#define S_000044_GUIDMA_AK(x) (((x) & 0x1) << 30)
+#define G_000044_GUIDMA_AK(x) (((x) >> 30) & 0x1)
+#define C_000044_GUIDMA_AK 0xBFFFFFFF
+#define S_000044_VIDDMA_STAT(x) (((x) & 0x1) << 31)
+#define G_000044_VIDDMA_STAT(x) (((x) >> 31) & 0x1)
+#define C_000044_VIDDMA_STAT 0x7FFFFFFF
+#define S_000044_VIDDMA_AK(x) (((x) & 0x1) << 31)
+#define G_000044_VIDDMA_AK(x) (((x) >> 31) & 0x1)
+#define C_000044_VIDDMA_AK 0x7FFFFFFF
+#define R_000050_CRTC_GEN_CNTL 0x000050
+#define S_000050_CRTC_DBL_SCAN_EN(x) (((x) & 0x1) << 0)
+#define G_000050_CRTC_DBL_SCAN_EN(x) (((x) >> 0) & 0x1)
+#define C_000050_CRTC_DBL_SCAN_EN 0xFFFFFFFE
+#define S_000050_CRTC_INTERLACE_EN(x) (((x) & 0x1) << 1)
+#define G_000050_CRTC_INTERLACE_EN(x) (((x) >> 1) & 0x1)
+#define C_000050_CRTC_INTERLACE_EN 0xFFFFFFFD
+#define S_000050_CRTC_C_SYNC_EN(x) (((x) & 0x1) << 4)
+#define G_000050_CRTC_C_SYNC_EN(x) (((x) >> 4) & 0x1)
+#define C_000050_CRTC_C_SYNC_EN 0xFFFFFFEF
+#define S_000050_CRTC_PIX_WIDTH(x) (((x) & 0xF) << 8)
+#define G_000050_CRTC_PIX_WIDTH(x) (((x) >> 8) & 0xF)
+#define C_000050_CRTC_PIX_WIDTH 0xFFFFF0FF
+#define S_000050_CRTC_ICON_EN(x) (((x) & 0x1) << 15)
+#define G_000050_CRTC_ICON_EN(x) (((x) >> 15) & 0x1)
+#define C_000050_CRTC_ICON_EN 0xFFFF7FFF
+#define S_000050_CRTC_CUR_EN(x) (((x) & 0x1) << 16)
+#define G_000050_CRTC_CUR_EN(x) (((x) >> 16) & 0x1)
+#define C_000050_CRTC_CUR_EN 0xFFFEFFFF
+#define S_000050_CRTC_VSTAT_MODE(x) (((x) & 0x3) << 17)
+#define G_000050_CRTC_VSTAT_MODE(x) (((x) >> 17) & 0x3)
+#define C_000050_CRTC_VSTAT_MODE 0xFFF9FFFF
+#define S_000050_CRTC_CUR_MODE(x) (((x) & 0x7) << 20)
+#define G_000050_CRTC_CUR_MODE(x) (((x) >> 20) & 0x7)
+#define C_000050_CRTC_CUR_MODE 0xFF8FFFFF
+#define S_000050_CRTC_EXT_DISP_EN(x) (((x) & 0x1) << 24)
+#define G_000050_CRTC_EXT_DISP_EN(x) (((x) >> 24) & 0x1)
+#define C_000050_CRTC_EXT_DISP_EN 0xFEFFFFFF
+#define S_000050_CRTC_EN(x) (((x) & 0x1) << 25)
+#define G_000050_CRTC_EN(x) (((x) >> 25) & 0x1)
+#define C_000050_CRTC_EN 0xFDFFFFFF
+#define S_000050_CRTC_DISP_REQ_EN_B(x) (((x) & 0x1) << 26)
+#define G_000050_CRTC_DISP_REQ_EN_B(x) (((x) >> 26) & 0x1)
+#define C_000050_CRTC_DISP_REQ_EN_B 0xFBFFFFFF
+#define R_000054_CRTC_EXT_CNTL 0x000054
+#define S_000054_CRTC_VGA_XOVERSCAN(x) (((x) & 0x1) << 0)
+#define G_000054_CRTC_VGA_XOVERSCAN(x) (((x) >> 0) & 0x1)
+#define C_000054_CRTC_VGA_XOVERSCAN 0xFFFFFFFE
+#define S_000054_VGA_BLINK_RATE(x) (((x) & 0x3) << 1)
+#define G_000054_VGA_BLINK_RATE(x) (((x) >> 1) & 0x3)
+#define C_000054_VGA_BLINK_RATE 0xFFFFFFF9
+#define S_000054_VGA_ATI_LINEAR(x) (((x) & 0x1) << 3)
+#define G_000054_VGA_ATI_LINEAR(x) (((x) >> 3) & 0x1)
+#define C_000054_VGA_ATI_LINEAR 0xFFFFFFF7
+#define S_000054_VGA_128KAP_PAGING(x) (((x) & 0x1) << 4)
+#define G_000054_VGA_128KAP_PAGING(x) (((x) >> 4) & 0x1)
+#define C_000054_VGA_128KAP_PAGING 0xFFFFFFEF
+#define S_000054_VGA_TEXT_132(x) (((x) & 0x1) << 5)
+#define G_000054_VGA_TEXT_132(x) (((x) >> 5) & 0x1)
+#define C_000054_VGA_TEXT_132 0xFFFFFFDF
+#define S_000054_VGA_XCRT_CNT_EN(x) (((x) & 0x1) << 6)
+#define G_000054_VGA_XCRT_CNT_EN(x) (((x) >> 6) & 0x1)
+#define C_000054_VGA_XCRT_CNT_EN 0xFFFFFFBF
+#define S_000054_CRTC_HSYNC_DIS(x) (((x) & 0x1) << 8)
+#define G_000054_CRTC_HSYNC_DIS(x) (((x) >> 8) & 0x1)
+#define C_000054_CRTC_HSYNC_DIS 0xFFFFFEFF
+#define S_000054_CRTC_VSYNC_DIS(x) (((x) & 0x1) << 9)
+#define G_000054_CRTC_VSYNC_DIS(x) (((x) >> 9) & 0x1)
+#define C_000054_CRTC_VSYNC_DIS 0xFFFFFDFF
+#define S_000054_CRTC_DISPLAY_DIS(x) (((x) & 0x1) << 10)
+#define G_000054_CRTC_DISPLAY_DIS(x) (((x) >> 10) & 0x1)
+#define C_000054_CRTC_DISPLAY_DIS 0xFFFFFBFF
+#define S_000054_CRTC_SYNC_TRISTATE(x) (((x) & 0x1) << 11)
+#define G_000054_CRTC_SYNC_TRISTATE(x) (((x) >> 11) & 0x1)
+#define C_000054_CRTC_SYNC_TRISTATE 0xFFFFF7FF
+#define S_000054_CRTC_HSYNC_TRISTATE(x) (((x) & 0x1) << 12)
+#define G_000054_CRTC_HSYNC_TRISTATE(x) (((x) >> 12) & 0x1)
+#define C_000054_CRTC_HSYNC_TRISTATE 0xFFFFEFFF
+#define S_000054_CRTC_VSYNC_TRISTATE(x) (((x) & 0x1) << 13)
+#define G_000054_CRTC_VSYNC_TRISTATE(x) (((x) >> 13) & 0x1)
+#define C_000054_CRTC_VSYNC_TRISTATE 0xFFFFDFFF
+#define S_000054_CRT_ON(x) (((x) & 0x1) << 15)
+#define G_000054_CRT_ON(x) (((x) >> 15) & 0x1)
+#define C_000054_CRT_ON 0xFFFF7FFF
+#define S_000054_VGA_CUR_B_TEST(x) (((x) & 0x1) << 17)
+#define G_000054_VGA_CUR_B_TEST(x) (((x) >> 17) & 0x1)
+#define C_000054_VGA_CUR_B_TEST 0xFFFDFFFF
+#define S_000054_VGA_PACK_DIS(x) (((x) & 0x1) << 18)
+#define G_000054_VGA_PACK_DIS(x) (((x) >> 18) & 0x1)
+#define C_000054_VGA_PACK_DIS 0xFFFBFFFF
+#define S_000054_VGA_MEM_PS_EN(x) (((x) & 0x1) << 19)
+#define G_000054_VGA_MEM_PS_EN(x) (((x) >> 19) & 0x1)
+#define C_000054_VGA_MEM_PS_EN 0xFFF7FFFF
+#define S_000054_VCRTC_IDX_MASTER(x) (((x) & 0x7F) << 24)
+#define G_000054_VCRTC_IDX_MASTER(x) (((x) >> 24) & 0x7F)
+#define C_000054_VCRTC_IDX_MASTER 0x80FFFFFF
+#define R_000148_MC_FB_LOCATION 0x000148
+#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000148_MC_FB_START 0xFFFF0000
+#define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_000148_MC_FB_TOP 0x0000FFFF
+#define R_00014C_MC_AGP_LOCATION 0x00014C
+#define S_00014C_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
+#define G_00014C_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
+#define C_00014C_MC_AGP_START 0xFFFF0000
+#define S_00014C_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_00014C_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_00014C_MC_AGP_TOP 0x0000FFFF
+#define R_000170_AGP_BASE 0x000170
+#define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_000170_AGP_BASE_ADDR 0x00000000
+#define R_00023C_DISPLAY_BASE_ADDR 0x00023C
+#define S_00023C_DISPLAY_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_00023C_DISPLAY_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_00023C_DISPLAY_BASE_ADDR 0x00000000
+#define R_000260_CUR_OFFSET 0x000260
+#define S_000260_CUR_OFFSET(x) (((x) & 0x7FFFFFF) << 0)
+#define G_000260_CUR_OFFSET(x) (((x) >> 0) & 0x7FFFFFF)
+#define C_000260_CUR_OFFSET 0xF8000000
+#define S_000260_CUR_LOCK(x) (((x) & 0x1) << 31)
+#define G_000260_CUR_LOCK(x) (((x) >> 31) & 0x1)
+#define C_000260_CUR_LOCK 0x7FFFFFFF
+#define R_00033C_CRTC2_DISPLAY_BASE_ADDR 0x00033C
+#define S_00033C_CRTC2_DISPLAY_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_00033C_CRTC2_DISPLAY_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_00033C_CRTC2_DISPLAY_BASE_ADDR 0x00000000
+#define R_000360_CUR2_OFFSET 0x000360
+#define S_000360_CUR2_OFFSET(x) (((x) & 0x7FFFFFF) << 0)
+#define G_000360_CUR2_OFFSET(x) (((x) >> 0) & 0x7FFFFFF)
+#define C_000360_CUR2_OFFSET 0xF8000000
+#define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31)
+#define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1)
+#define C_000360_CUR2_LOCK 0x7FFFFFFF
+#define R_0003C2_GENMO_WT 0x0003C2
+#define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0)
+#define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1)
+#define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE
+#define S_0003C2_VGA_RAM_EN(x) (((x) & 0x1) << 1)
+#define G_0003C2_VGA_RAM_EN(x) (((x) >> 1) & 0x1)
+#define C_0003C2_VGA_RAM_EN 0xFD
+#define S_0003C2_VGA_CKSEL(x) (((x) & 0x3) << 2)
+#define G_0003C2_VGA_CKSEL(x) (((x) >> 2) & 0x3)
+#define C_0003C2_VGA_CKSEL 0xF3
+#define S_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5)
+#define G_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1)
+#define C_0003C2_ODD_EVEN_MD_PGSEL 0xDF
+#define S_0003C2_VGA_HSYNC_POL(x) (((x) & 0x1) << 6)
+#define G_0003C2_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1)
+#define C_0003C2_VGA_HSYNC_POL 0xBF
+#define S_0003C2_VGA_VSYNC_POL(x) (((x) & 0x1) << 7)
+#define G_0003C2_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1)
+#define C_0003C2_VGA_VSYNC_POL 0x7F
+#define R_0003F8_CRTC2_GEN_CNTL 0x0003F8
+#define S_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) & 0x1) << 0)
+#define G_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) >> 0) & 0x1)
+#define C_0003F8_CRTC2_DBL_SCAN_EN 0xFFFFFFFE
+#define S_0003F8_CRTC2_INTERLACE_EN(x) (((x) & 0x1) << 1)
+#define G_0003F8_CRTC2_INTERLACE_EN(x) (((x) >> 1) & 0x1)
+#define C_0003F8_CRTC2_INTERLACE_EN 0xFFFFFFFD
+#define S_0003F8_CRTC2_SYNC_TRISTATE(x) (((x) & 0x1) << 4)
+#define G_0003F8_CRTC2_SYNC_TRISTATE(x) (((x) >> 4) & 0x1)
+#define C_0003F8_CRTC2_SYNC_TRISTATE 0xFFFFFFEF
+#define S_0003F8_CRTC2_HSYNC_TRISTATE(x) (((x) & 0x1) << 5)
+#define G_0003F8_CRTC2_HSYNC_TRISTATE(x) (((x) >> 5) & 0x1)
+#define C_0003F8_CRTC2_HSYNC_TRISTATE 0xFFFFFFDF
+#define S_0003F8_CRTC2_VSYNC_TRISTATE(x) (((x) & 0x1) << 6)
+#define G_0003F8_CRTC2_VSYNC_TRISTATE(x) (((x) >> 6) & 0x1)
+#define C_0003F8_CRTC2_VSYNC_TRISTATE 0xFFFFFFBF
+#define S_0003F8_CRT2_ON(x) (((x) & 0x1) << 7)
+#define G_0003F8_CRT2_ON(x) (((x) >> 7) & 0x1)
+#define C_0003F8_CRT2_ON 0xFFFFFF7F
+#define S_0003F8_CRTC2_PIX_WIDTH(x) (((x) & 0xF) << 8)
+#define G_0003F8_CRTC2_PIX_WIDTH(x) (((x) >> 8) & 0xF)
+#define C_0003F8_CRTC2_PIX_WIDTH 0xFFFFF0FF
+#define S_0003F8_CRTC2_ICON_EN(x) (((x) & 0x1) << 15)
+#define G_0003F8_CRTC2_ICON_EN(x) (((x) >> 15) & 0x1)
+#define C_0003F8_CRTC2_ICON_EN 0xFFFF7FFF
+#define S_0003F8_CRTC2_CUR_EN(x) (((x) & 0x1) << 16)
+#define G_0003F8_CRTC2_CUR_EN(x) (((x) >> 16) & 0x1)
+#define C_0003F8_CRTC2_CUR_EN 0xFFFEFFFF
+#define S_0003F8_CRTC2_CUR_MODE(x) (((x) & 0x7) << 20)
+#define G_0003F8_CRTC2_CUR_MODE(x) (((x) >> 20) & 0x7)
+#define C_0003F8_CRTC2_CUR_MODE 0xFF8FFFFF
+#define S_0003F8_CRTC2_DISPLAY_DIS(x) (((x) & 0x1) << 23)
+#define G_0003F8_CRTC2_DISPLAY_DIS(x) (((x) >> 23) & 0x1)
+#define C_0003F8_CRTC2_DISPLAY_DIS 0xFF7FFFFF
+#define S_0003F8_CRTC2_EN(x) (((x) & 0x1) << 25)
+#define G_0003F8_CRTC2_EN(x) (((x) >> 25) & 0x1)
+#define C_0003F8_CRTC2_EN 0xFDFFFFFF
+#define S_0003F8_CRTC2_DISP_REQ_EN_B(x) (((x) & 0x1) << 26)
+#define G_0003F8_CRTC2_DISP_REQ_EN_B(x) (((x) >> 26) & 0x1)
+#define C_0003F8_CRTC2_DISP_REQ_EN_B 0xFBFFFFFF
+#define S_0003F8_CRTC2_C_SYNC_EN(x) (((x) & 0x1) << 27)
+#define G_0003F8_CRTC2_C_SYNC_EN(x) (((x) >> 27) & 0x1)
+#define C_0003F8_CRTC2_C_SYNC_EN 0xF7FFFFFF
+#define S_0003F8_CRTC2_HSYNC_DIS(x) (((x) & 0x1) << 28)
+#define G_0003F8_CRTC2_HSYNC_DIS(x) (((x) >> 28) & 0x1)
+#define C_0003F8_CRTC2_HSYNC_DIS 0xEFFFFFFF
+#define S_0003F8_CRTC2_VSYNC_DIS(x) (((x) & 0x1) << 29)
+#define G_0003F8_CRTC2_VSYNC_DIS(x) (((x) >> 29) & 0x1)
+#define C_0003F8_CRTC2_VSYNC_DIS 0xDFFFFFFF
+#define R_000420_OV0_SCALE_CNTL 0x000420
+#define S_000420_OV0_NO_READ_BEHIND_SCAN(x) (((x) & 0x1) << 1)
+#define G_000420_OV0_NO_READ_BEHIND_SCAN(x) (((x) >> 1) & 0x1)
+#define C_000420_OV0_NO_READ_BEHIND_SCAN 0xFFFFFFFD
+#define S_000420_OV0_HORZ_PICK_NEAREST(x) (((x) & 0x1) << 2)
+#define G_000420_OV0_HORZ_PICK_NEAREST(x) (((x) >> 2) & 0x1)
+#define C_000420_OV0_HORZ_PICK_NEAREST 0xFFFFFFFB
+#define S_000420_OV0_VERT_PICK_NEAREST(x) (((x) & 0x1) << 3)
+#define G_000420_OV0_VERT_PICK_NEAREST(x) (((x) >> 3) & 0x1)
+#define C_000420_OV0_VERT_PICK_NEAREST 0xFFFFFFF7
+#define S_000420_OV0_SIGNED_UV(x) (((x) & 0x1) << 4)
+#define G_000420_OV0_SIGNED_UV(x) (((x) >> 4) & 0x1)
+#define C_000420_OV0_SIGNED_UV 0xFFFFFFEF
+#define S_000420_OV0_GAMMA_SEL(x) (((x) & 0x7) << 5)
+#define G_000420_OV0_GAMMA_SEL(x) (((x) >> 5) & 0x7)
+#define C_000420_OV0_GAMMA_SEL 0xFFFFFF1F
+#define S_000420_OV0_SURFACE_FORMAT(x) (((x) & 0xF) << 8)
+#define G_000420_OV0_SURFACE_FORMAT(x) (((x) >> 8) & 0xF)
+#define C_000420_OV0_SURFACE_FORMAT 0xFFFFF0FF
+#define S_000420_OV0_ADAPTIVE_DEINT(x) (((x) & 0x1) << 12)
+#define G_000420_OV0_ADAPTIVE_DEINT(x) (((x) >> 12) & 0x1)
+#define C_000420_OV0_ADAPTIVE_DEINT 0xFFFFEFFF
+#define S_000420_OV0_CRTC_SEL(x) (((x) & 0x1) << 14)
+#define G_000420_OV0_CRTC_SEL(x) (((x) >> 14) & 0x1)
+#define C_000420_OV0_CRTC_SEL 0xFFFFBFFF
+#define S_000420_OV0_BURST_PER_PLANE(x) (((x) & 0x7F) << 16)
+#define G_000420_OV0_BURST_PER_PLANE(x) (((x) >> 16) & 0x7F)
+#define C_000420_OV0_BURST_PER_PLANE 0xFF80FFFF
+#define S_000420_OV0_DOUBLE_BUFFER_REGS(x) (((x) & 0x1) << 24)
+#define G_000420_OV0_DOUBLE_BUFFER_REGS(x) (((x) >> 24) & 0x1)
+#define C_000420_OV0_DOUBLE_BUFFER_REGS 0xFEFFFFFF
+#define S_000420_OV0_BANDWIDTH(x) (((x) & 0x1) << 26)
+#define G_000420_OV0_BANDWIDTH(x) (((x) >> 26) & 0x1)
+#define C_000420_OV0_BANDWIDTH 0xFBFFFFFF
+#define S_000420_OV0_LIN_TRANS_BYPASS(x) (((x) & 0x1) << 28)
+#define G_000420_OV0_LIN_TRANS_BYPASS(x) (((x) >> 28) & 0x1)
+#define C_000420_OV0_LIN_TRANS_BYPASS 0xEFFFFFFF
+#define S_000420_OV0_INT_EMU(x) (((x) & 0x1) << 29)
+#define G_000420_OV0_INT_EMU(x) (((x) >> 29) & 0x1)
+#define C_000420_OV0_INT_EMU 0xDFFFFFFF
+#define S_000420_OV0_OVERLAY_EN(x) (((x) & 0x1) << 30)
+#define G_000420_OV0_OVERLAY_EN(x) (((x) >> 30) & 0x1)
+#define C_000420_OV0_OVERLAY_EN 0xBFFFFFFF
+#define S_000420_OV0_SOFT_RESET(x) (((x) & 0x1) << 31)
+#define G_000420_OV0_SOFT_RESET(x) (((x) >> 31) & 0x1)
+#define C_000420_OV0_SOFT_RESET 0x7FFFFFFF
+#define R_00070C_CP_RB_RPTR_ADDR 0x00070C
+#define S_00070C_RB_RPTR_SWAP(x) (((x) & 0x3) << 0)
+#define G_00070C_RB_RPTR_SWAP(x) (((x) >> 0) & 0x3)
+#define C_00070C_RB_RPTR_SWAP 0xFFFFFFFC
+#define S_00070C_RB_RPTR_ADDR(x) (((x) & 0x3FFFFFFF) << 2)
+#define G_00070C_RB_RPTR_ADDR(x) (((x) >> 2) & 0x3FFFFFFF)
+#define C_00070C_RB_RPTR_ADDR 0x00000003
+#define R_000740_CP_CSQ_CNTL 0x000740
+#define S_000740_CSQ_CNT_PRIMARY(x) (((x) & 0xFF) << 0)
+#define G_000740_CSQ_CNT_PRIMARY(x) (((x) >> 0) & 0xFF)
+#define C_000740_CSQ_CNT_PRIMARY 0xFFFFFF00
+#define S_000740_CSQ_CNT_INDIRECT(x) (((x) & 0xFF) << 8)
+#define G_000740_CSQ_CNT_INDIRECT(x) (((x) >> 8) & 0xFF)
+#define C_000740_CSQ_CNT_INDIRECT 0xFFFF00FF
+#define S_000740_CSQ_MODE(x) (((x) & 0xF) << 28)
+#define G_000740_CSQ_MODE(x) (((x) >> 28) & 0xF)
+#define C_000740_CSQ_MODE 0x0FFFFFFF
+#define R_000770_SCRATCH_UMSK 0x000770
+#define S_000770_SCRATCH_UMSK(x) (((x) & 0x3F) << 0)
+#define G_000770_SCRATCH_UMSK(x) (((x) >> 0) & 0x3F)
+#define C_000770_SCRATCH_UMSK 0xFFFFFFC0
+#define S_000770_SCRATCH_SWAP(x) (((x) & 0x3) << 16)
+#define G_000770_SCRATCH_SWAP(x) (((x) >> 16) & 0x3)
+#define C_000770_SCRATCH_SWAP 0xFFFCFFFF
+#define R_000774_SCRATCH_ADDR 0x000774
+#define S_000774_SCRATCH_ADDR(x) (((x) & 0x7FFFFFF) << 5)
+#define G_000774_SCRATCH_ADDR(x) (((x) >> 5) & 0x7FFFFFF)
+#define C_000774_SCRATCH_ADDR 0x0000001F
+#define R_0007C0_CP_STAT 0x0007C0
+#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
+#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
+#define C_0007C0_MRU_BUSY 0xFFFFFFFE
+#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
+#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
+#define C_0007C0_MWU_BUSY 0xFFFFFFFD
+#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
+#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
+#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
+#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
+#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
+#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
+#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
+#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
+#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
+#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
+#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
+#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
+#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
+#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
+#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
+#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
+#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
+#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
+#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
+#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
+#define C_0007C0_CSI_BUSY 0xFFFFDFFF
+#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
+#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
+#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
+#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
+#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
+#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
+#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
+#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
+#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
+#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
+#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
+#define C_0007C0_CP_BUSY 0x7FFFFFFF
+#define R_000E40_RBBM_STATUS 0x000E40
+#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
+#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
+#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
+#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
+#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
+#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
+#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
+#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
+#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
+#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
+#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
+#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
+#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
+#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
+#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
+#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
+#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
+#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
+#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
+#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
+#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
+#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
+#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
+#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
+#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
+#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
+#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
+#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
+#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
+#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
+#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
+#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
+#define C_000E40_E2_BUSY 0xFFFDFFFF
+#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
+#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
+#define C_000E40_RB2D_BUSY 0xFFFBFFFF
+#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
+#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
+#define C_000E40_RB3D_BUSY 0xFFF7FFFF
+#define S_000E40_SE_BUSY(x) (((x) & 0x1) << 20)
+#define G_000E40_SE_BUSY(x) (((x) >> 20) & 0x1)
+#define C_000E40_SE_BUSY 0xFFEFFFFF
+#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
+#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
+#define C_000E40_RE_BUSY 0xFFDFFFFF
+#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
+#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
+#define C_000E40_TAM_BUSY 0xFFBFFFFF
+#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
+#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
+#define C_000E40_TDM_BUSY 0xFF7FFFFF
+#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
+#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
+#define C_000E40_PB_BUSY 0xFEFFFFFF
+#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
+#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
+#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
+
+
+#define R_00000D_SCLK_CNTL 0x00000D
+#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
+#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7)
+#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8
+#define S_00000D_TCLK_SRC_SEL(x) (((x) & 0x7) << 8)
+#define G_00000D_TCLK_SRC_SEL(x) (((x) >> 8) & 0x7)
+#define C_00000D_TCLK_SRC_SEL 0xFFFFF8FF
+#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16)
+#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1)
+#define C_00000D_FORCE_CP 0xFFFEFFFF
+#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17)
+#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1)
+#define C_00000D_FORCE_HDP 0xFFFDFFFF
+#define S_00000D_FORCE_DISP(x) (((x) & 0x1) << 18)
+#define G_00000D_FORCE_DISP(x) (((x) >> 18) & 0x1)
+#define C_00000D_FORCE_DISP 0xFFFBFFFF
+#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19)
+#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1)
+#define C_00000D_FORCE_TOP 0xFFF7FFFF
+#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
+#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
+#define C_00000D_FORCE_E2 0xFFEFFFFF
+#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21)
+#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1)
+#define C_00000D_FORCE_SE 0xFFDFFFFF
+#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
+#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
+#define C_00000D_FORCE_IDCT 0xFFBFFFFF
+#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23)
+#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1)
+#define C_00000D_FORCE_VIP 0xFF7FFFFF
+#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
+#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
+#define C_00000D_FORCE_RE 0xFEFFFFFF
+#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25)
+#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1)
+#define C_00000D_FORCE_PB 0xFDFFFFFF
+#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26)
+#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1)
+#define C_00000D_FORCE_TAM 0xFBFFFFFF
+#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27)
+#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1)
+#define C_00000D_FORCE_TDM 0xF7FFFFFF
+#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28)
+#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
+#define C_00000D_FORCE_RB 0xEFFFFFFF
+
+/* PLL regs */
+#define SCLK_CNTL 0xd
+#define FORCE_HDP (1 << 17)
+#define CLK_PWRMGT_CNTL 0x14
+#define GLOBAL_PMAN_EN (1 << 10)
+#define DISP_PM (1 << 20)
+#define PLL_PWRMGT_CNTL 0x15
+#define MPLL_TURNOFF (1 << 0)
+#define SPLL_TURNOFF (1 << 1)
+#define PPLL_TURNOFF (1 << 2)
+#define P2PLL_TURNOFF (1 << 3)
+#define TVPLL_TURNOFF (1 << 4)
+#define MOBILE_SU (1 << 16)
+#define SU_SCLK_USE_BCLK (1 << 17)
+#define SCLK_CNTL2 0x1e
+#define REDUCED_SPEED_SCLK_MODE (1 << 16)
+#define REDUCED_SPEED_SCLK_SEL(x) ((x) << 17)
+#define MCLK_MISC 0x1f
+#define EN_MCLK_TRISTATE_IN_SUSPEND (1 << 18)
+#define SCLK_MORE_CNTL 0x35
+#define REDUCED_SPEED_SCLK_EN (1 << 16)
+#define IO_CG_VOLTAGE_DROP (1 << 17)
+#define VOLTAGE_DELAY_SEL(x) ((x) << 20)
+#define VOLTAGE_DROP_SYNC (1 << 19)
+
+/* mmreg */
+#define DISP_PWR_MAN 0xd08
+#define DISP_D3_GRPH_RST (1 << 18)
+#define DISP_D3_SUBPIC_RST (1 << 19)
+#define DISP_D3_OV0_RST (1 << 20)
+#define DISP_D1D2_GRPH_RST (1 << 21)
+#define DISP_D1D2_SUBPIC_RST (1 << 22)
+#define DISP_D1D2_OV0_RST (1 << 23)
+#define DISP_DVO_ENABLE_RST (1 << 24)
+#define TV_ENABLE_RST (1 << 25)
+#define AUTO_PWRUP_EN (1 << 26)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
new file mode 100644
index 0000000..b3807ed
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+
+#include "r100d.h"
+#include "r200_reg_safe.h"
+
+#include "r100_track.h"
+
+static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
+{
+ int vtx_size, i;
+ vtx_size = 2;
+
+ if (vtx_fmt_0 & R200_VTX_Z0)
+ vtx_size++;
+ if (vtx_fmt_0 & R200_VTX_W0)
+ vtx_size++;
+ /* blend weight */
+ if (vtx_fmt_0 & (0x7 << R200_VTX_WEIGHT_COUNT_SHIFT))
+ vtx_size += (vtx_fmt_0 >> R200_VTX_WEIGHT_COUNT_SHIFT) & 0x7;
+ if (vtx_fmt_0 & R200_VTX_PV_MATRIX_SEL)
+ vtx_size++;
+ if (vtx_fmt_0 & R200_VTX_N0)
+ vtx_size += 3;
+ if (vtx_fmt_0 & R200_VTX_POINT_SIZE)
+ vtx_size++;
+ if (vtx_fmt_0 & R200_VTX_DISCRETE_FOG)
+ vtx_size++;
+ if (vtx_fmt_0 & R200_VTX_SHININESS_0)
+ vtx_size++;
+ if (vtx_fmt_0 & R200_VTX_SHININESS_1)
+ vtx_size++;
+ for (i = 0; i < 8; i++) {
+ int color_size = (vtx_fmt_0 >> (11 + 2*i)) & 0x3;
+ switch (color_size) {
+ case 0: break;
+ case 1: vtx_size++; break;
+ case 2: vtx_size += 3; break;
+ case 3: vtx_size += 4; break;
+ }
+ }
+ if (vtx_fmt_0 & R200_VTX_XY1)
+ vtx_size += 2;
+ if (vtx_fmt_0 & R200_VTX_Z1)
+ vtx_size++;
+ if (vtx_fmt_0 & R200_VTX_W1)
+ vtx_size++;
+ if (vtx_fmt_0 & R200_VTX_N1)
+ vtx_size += 3;
+ return vtx_size;
+}
+
+int r200_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ uint32_t size;
+ uint32_t cur_size;
+ int i, num_loops;
+ int r = 0;
+
+ /* radeon pitch is /64 */
+ size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
+ num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
+ r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+ /* Must wait for 2D idle & clean before DMA or hangs might happen */
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring, (1 << 16));
+ for (i = 0; i < num_loops; i++) {
+ cur_size = size;
+ if (cur_size > 0x1FFFFF) {
+ cur_size = 0x1FFFFF;
+ }
+ size -= cur_size;
+ radeon_ring_write(ring, PACKET0(0x720, 2));
+ radeon_ring_write(ring, src_offset);
+ radeon_ring_write(ring, dst_offset);
+ radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
+ src_offset += cur_size;
+ dst_offset += cur_size;
+ }
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
+ if (fence) {
+ r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
+ }
+ radeon_ring_unlock_commit(rdev, ring);
+ return r;
+}
+
+
+static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
+{
+ int vtx_size, i, tex_size;
+ vtx_size = 0;
+ for (i = 0; i < 6; i++) {
+ tex_size = (vtx_fmt_1 >> (i * 3)) & 0x7;
+ if (tex_size > 4)
+ continue;
+ vtx_size += tex_size;
+ }
+ return vtx_size;
+}
+
+int r200_packet0_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx, unsigned reg)
+{
+ struct radeon_cs_reloc *reloc;
+ struct r100_cs_track *track;
+ volatile uint32_t *ib;
+ uint32_t tmp;
+ int r;
+ int i;
+ int face;
+ u32 tile_flags = 0;
+ u32 idx_value;
+
+ ib = p->ib.ptr;
+ track = (struct r100_cs_track *)p->track;
+ idx_value = radeon_get_ib_value(p, idx);
+ switch (reg) {
+ case RADEON_CRTC_GUI_TRIG_VLINE:
+ r = r100_cs_packet_parse_vline(p);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ break;
+ /* FIXME: only allow PACKET3 blit? easier to check for out of
+ * range access */
+ case RADEON_DST_PITCH_OFFSET:
+ case RADEON_SRC_PITCH_OFFSET:
+ r = r100_reloc_pitch_offset(p, pkt, idx, reg);
+ if (r)
+ return r;
+ break;
+ case RADEON_RB3D_DEPTHOFFSET:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ track->zb.robj = reloc->robj;
+ track->zb.offset = idx_value;
+ track->zb_dirty = true;
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ break;
+ case RADEON_RB3D_COLOROFFSET:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ track->cb[0].robj = reloc->robj;
+ track->cb[0].offset = idx_value;
+ track->cb_dirty = true;
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ break;
+ case R200_PP_TXOFFSET_0:
+ case R200_PP_TXOFFSET_1:
+ case R200_PP_TXOFFSET_2:
+ case R200_PP_TXOFFSET_3:
+ case R200_PP_TXOFFSET_4:
+ case R200_PP_TXOFFSET_5:
+ i = (reg - R200_PP_TXOFFSET_0) / 24;
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R200_TXO_MACRO_TILE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R200_TXO_MICRO_TILE;
+
+ tmp = idx_value & ~(0x7 << 2);
+ tmp |= tile_flags;
+ ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
+ } else
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ track->textures[i].robj = reloc->robj;
+ track->tex_dirty = true;
+ break;
+ case R200_PP_CUBIC_OFFSET_F1_0:
+ case R200_PP_CUBIC_OFFSET_F2_0:
+ case R200_PP_CUBIC_OFFSET_F3_0:
+ case R200_PP_CUBIC_OFFSET_F4_0:
+ case R200_PP_CUBIC_OFFSET_F5_0:
+ case R200_PP_CUBIC_OFFSET_F1_1:
+ case R200_PP_CUBIC_OFFSET_F2_1:
+ case R200_PP_CUBIC_OFFSET_F3_1:
+ case R200_PP_CUBIC_OFFSET_F4_1:
+ case R200_PP_CUBIC_OFFSET_F5_1:
+ case R200_PP_CUBIC_OFFSET_F1_2:
+ case R200_PP_CUBIC_OFFSET_F2_2:
+ case R200_PP_CUBIC_OFFSET_F3_2:
+ case R200_PP_CUBIC_OFFSET_F4_2:
+ case R200_PP_CUBIC_OFFSET_F5_2:
+ case R200_PP_CUBIC_OFFSET_F1_3:
+ case R200_PP_CUBIC_OFFSET_F2_3:
+ case R200_PP_CUBIC_OFFSET_F3_3:
+ case R200_PP_CUBIC_OFFSET_F4_3:
+ case R200_PP_CUBIC_OFFSET_F5_3:
+ case R200_PP_CUBIC_OFFSET_F1_4:
+ case R200_PP_CUBIC_OFFSET_F2_4:
+ case R200_PP_CUBIC_OFFSET_F3_4:
+ case R200_PP_CUBIC_OFFSET_F4_4:
+ case R200_PP_CUBIC_OFFSET_F5_4:
+ case R200_PP_CUBIC_OFFSET_F1_5:
+ case R200_PP_CUBIC_OFFSET_F2_5:
+ case R200_PP_CUBIC_OFFSET_F3_5:
+ case R200_PP_CUBIC_OFFSET_F4_5:
+ case R200_PP_CUBIC_OFFSET_F5_5:
+ i = (reg - R200_PP_TXOFFSET_0) / 24;
+ face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ track->textures[i].cube_info[face - 1].offset = idx_value;
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ track->textures[i].cube_info[face - 1].robj = reloc->robj;
+ track->tex_dirty = true;
+ break;
+ case RADEON_RE_WIDTH_HEIGHT:
+ track->maxy = ((idx_value >> 16) & 0x7FF);
+ track->cb_dirty = true;
+ track->zb_dirty = true;
+ break;
+ case RADEON_RB3D_COLORPITCH:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= RADEON_COLOR_TILE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
+
+ tmp = idx_value & ~(0x7 << 16);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
+ } else
+ ib[idx] = idx_value;
+
+ track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+ track->cb_dirty = true;
+ break;
+ case RADEON_RB3D_DEPTHPITCH:
+ track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+ track->zb_dirty = true;
+ break;
+ case RADEON_RB3D_CNTL:
+ switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
+ case 7:
+ case 8:
+ case 9:
+ case 11:
+ case 12:
+ track->cb[0].cpp = 1;
+ break;
+ case 3:
+ case 4:
+ case 15:
+ track->cb[0].cpp = 2;
+ break;
+ case 6:
+ track->cb[0].cpp = 4;
+ break;
+ default:
+ DRM_ERROR("Invalid color buffer format (%d) !\n",
+ ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
+ return -EINVAL;
+ }
+ if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) {
+ DRM_ERROR("No support for depth xy offset in kms\n");
+ return -EINVAL;
+ }
+
+ track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+ track->cb_dirty = true;
+ track->zb_dirty = true;
+ break;
+ case RADEON_RB3D_ZSTENCILCNTL:
+ switch (idx_value & 0xf) {
+ case 0:
+ track->zb.cpp = 2;
+ break;
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 9:
+ case 11:
+ track->zb.cpp = 4;
+ break;
+ default:
+ break;
+ }
+ track->zb_dirty = true;
+ break;
+ case RADEON_RB3D_ZPASS_ADDR:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ break;
+ case RADEON_PP_CNTL:
+ {
+ uint32_t temp = idx_value >> 4;
+ for (i = 0; i < track->num_texture; i++)
+ track->textures[i].enabled = !!(temp & (1 << i));
+ track->tex_dirty = true;
+ }
+ break;
+ case RADEON_SE_VF_CNTL:
+ track->vap_vf_cntl = idx_value;
+ break;
+ case 0x210c:
+ /* VAP_VF_MAX_VTX_INDX */
+ track->max_indx = idx_value & 0x00FFFFFFUL;
+ break;
+ case R200_SE_VTX_FMT_0:
+ track->vtx_size = r200_get_vtx_size_0(idx_value);
+ break;
+ case R200_SE_VTX_FMT_1:
+ track->vtx_size += r200_get_vtx_size_1(idx_value);
+ break;
+ case R200_PP_TXSIZE_0:
+ case R200_PP_TXSIZE_1:
+ case R200_PP_TXSIZE_2:
+ case R200_PP_TXSIZE_3:
+ case R200_PP_TXSIZE_4:
+ case R200_PP_TXSIZE_5:
+ i = (reg - R200_PP_TXSIZE_0) / 32;
+ track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
+ track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+ track->tex_dirty = true;
+ break;
+ case R200_PP_TXPITCH_0:
+ case R200_PP_TXPITCH_1:
+ case R200_PP_TXPITCH_2:
+ case R200_PP_TXPITCH_3:
+ case R200_PP_TXPITCH_4:
+ case R200_PP_TXPITCH_5:
+ i = (reg - R200_PP_TXPITCH_0) / 32;
+ track->textures[i].pitch = idx_value + 32;
+ track->tex_dirty = true;
+ break;
+ case R200_PP_TXFILTER_0:
+ case R200_PP_TXFILTER_1:
+ case R200_PP_TXFILTER_2:
+ case R200_PP_TXFILTER_3:
+ case R200_PP_TXFILTER_4:
+ case R200_PP_TXFILTER_5:
+ i = (reg - R200_PP_TXFILTER_0) / 32;
+ track->textures[i].num_levels = ((idx_value & R200_MAX_MIP_LEVEL_MASK)
+ >> R200_MAX_MIP_LEVEL_SHIFT);
+ tmp = (idx_value >> 23) & 0x7;
+ if (tmp == 2 || tmp == 6)
+ track->textures[i].roundup_w = false;
+ tmp = (idx_value >> 27) & 0x7;
+ if (tmp == 2 || tmp == 6)
+ track->textures[i].roundup_h = false;
+ track->tex_dirty = true;
+ break;
+ case R200_PP_TXMULTI_CTL_0:
+ case R200_PP_TXMULTI_CTL_1:
+ case R200_PP_TXMULTI_CTL_2:
+ case R200_PP_TXMULTI_CTL_3:
+ case R200_PP_TXMULTI_CTL_4:
+ case R200_PP_TXMULTI_CTL_5:
+ i = (reg - R200_PP_TXMULTI_CTL_0) / 32;
+ break;
+ case R200_PP_TXFORMAT_X_0:
+ case R200_PP_TXFORMAT_X_1:
+ case R200_PP_TXFORMAT_X_2:
+ case R200_PP_TXFORMAT_X_3:
+ case R200_PP_TXFORMAT_X_4:
+ case R200_PP_TXFORMAT_X_5:
+ i = (reg - R200_PP_TXFORMAT_X_0) / 32;
+ track->textures[i].txdepth = idx_value & 0x7;
+ tmp = (idx_value >> 16) & 0x3;
+ /* 2D, 3D, CUBE */
+ switch (tmp) {
+ case 0:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ /* 1D/2D */
+ track->textures[i].tex_coord_type = 0;
+ break;
+ case 1:
+ /* CUBE */
+ track->textures[i].tex_coord_type = 2;
+ break;
+ case 2:
+ /* 3D */
+ track->textures[i].tex_coord_type = 1;
+ break;
+ }
+ track->tex_dirty = true;
+ break;
+ case R200_PP_TXFORMAT_0:
+ case R200_PP_TXFORMAT_1:
+ case R200_PP_TXFORMAT_2:
+ case R200_PP_TXFORMAT_3:
+ case R200_PP_TXFORMAT_4:
+ case R200_PP_TXFORMAT_5:
+ i = (reg - R200_PP_TXFORMAT_0) / 32;
+ if (idx_value & R200_TXFORMAT_NON_POWER2) {
+ track->textures[i].use_pitch = 1;
+ } else {
+ track->textures[i].use_pitch = 0;
+ track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
+ track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
+ }
+ if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
+ track->textures[i].lookup_disable = true;
+ switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
+ case R200_TXFORMAT_I8:
+ case R200_TXFORMAT_RGB332:
+ case R200_TXFORMAT_Y8:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+ break;
+ case R200_TXFORMAT_AI88:
+ case R200_TXFORMAT_ARGB1555:
+ case R200_TXFORMAT_RGB565:
+ case R200_TXFORMAT_ARGB4444:
+ case R200_TXFORMAT_VYUY422:
+ case R200_TXFORMAT_YVYU422:
+ case R200_TXFORMAT_LDVDU655:
+ case R200_TXFORMAT_DVDU88:
+ case R200_TXFORMAT_AVYU4444:
+ track->textures[i].cpp = 2;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+ break;
+ case R200_TXFORMAT_ARGB8888:
+ case R200_TXFORMAT_RGBA8888:
+ case R200_TXFORMAT_ABGR8888:
+ case R200_TXFORMAT_BGR111110:
+ case R200_TXFORMAT_LDVDU8888:
+ track->textures[i].cpp = 4;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+ break;
+ case R200_TXFORMAT_DXT1:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+ break;
+ case R200_TXFORMAT_DXT23:
+ case R200_TXFORMAT_DXT45:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+ break;
+ }
+ track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
+ track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+ track->tex_dirty = true;
+ break;
+ case R200_PP_CUBIC_FACES_0:
+ case R200_PP_CUBIC_FACES_1:
+ case R200_PP_CUBIC_FACES_2:
+ case R200_PP_CUBIC_FACES_3:
+ case R200_PP_CUBIC_FACES_4:
+ case R200_PP_CUBIC_FACES_5:
+ tmp = idx_value;
+ i = (reg - R200_PP_CUBIC_FACES_0) / 32;
+ for (face = 0; face < 4; face++) {
+ track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
+ track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
+ }
+ track->tex_dirty = true;
+ break;
+ default:
+ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
+ reg, idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void r200_set_safe_registers(struct radeon_device *rdev)
+{
+ rdev->config.r100.reg_safe_bm = r200_reg_safe_bm;
+ rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm);
+}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
new file mode 100644
index 0000000..b9b776f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -0,0 +1,1558 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc_helper.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include <drm/radeon_drm.h>
+#include "r100_track.h"
+#include "r300d.h"
+#include "rv350d.h"
+#include "r300_reg_safe.h"
+
+/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
+ *
+ * GPU Errata:
+ * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
+ * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
+ * However, scheduling such write to the ring seems harmless, i suspect
+ * the CP read collide with the flush somehow, or maybe the MC, hard to
+ * tell. (Jerome Glisse)
+ */
+
+/*
+ * rv370,rv380 PCIE GART
+ */
+static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
+
+void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ int i;
+
+ /* Workaround HW bug do flush 2 times */
+ for (i = 0; i < 2; i++) {
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
+ (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+ }
+ mb();
+}
+
+#define R300_PTE_WRITEABLE (1 << 2)
+#define R300_PTE_READABLE (1 << 3)
+
+int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+ void __iomem *ptr = rdev->gart.ptr;
+
+ if (i < 0 || i > rdev->gart.num_gpu_pages) {
+ return -EINVAL;
+ }
+ addr = (lower_32_bits(addr) >> 8) |
+ ((upper_32_bits(addr) & 0xff) << 24) |
+ R300_PTE_WRITEABLE | R300_PTE_READABLE;
+ /* on x86 we want this to be CPU endian, on powerpc
+ * on powerpc without HW swappers, it'll get swapped on way
+ * into VRAM - so no need for cpu_to_le32 on VRAM tables */
+ writel(addr, ((void __iomem *)ptr) + (i * 4));
+ return 0;
+}
+
+int rv370_pcie_gart_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->gart.robj) {
+ WARN(1, "RV370 PCIE GART already initialized\n");
+ return 0;
+ }
+ /* Initialize common gart structure */
+ r = radeon_gart_init(rdev);
+ if (r)
+ return r;
+ r = rv370_debugfs_pcie_gart_info_init(rdev);
+ if (r)
+ DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
+ rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+ rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
+ return radeon_gart_table_vram_alloc(rdev);
+}
+
+int rv370_pcie_gart_enable(struct radeon_device *rdev)
+{
+ uint32_t table_addr;
+ uint32_t tmp;
+ int r;
+
+ if (rdev->gart.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
+ return r;
+ radeon_gart_restore(rdev);
+ /* discard memory request outside of configured range */
+ tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
+ tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
+ table_addr = rdev->gart.table_addr;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
+ /* FIXME: setup default page */
+ WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
+ WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
+ /* Clear error */
+ WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0);
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+ tmp |= RADEON_PCIE_TX_GART_EN;
+ tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+ rv370_pcie_gart_tlb_flush(rdev);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)table_addr);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+void rv370_pcie_gart_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+ tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
+ radeon_gart_table_vram_unpin(rdev);
+}
+
+void rv370_pcie_gart_fini(struct radeon_device *rdev)
+{
+ radeon_gart_fini(rdev);
+ rv370_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+}
+
+void r300_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+
+ /* Who ever call radeon_fence_emit should call ring_lock and ask
+ * for enough space (today caller are ib schedule and buffer move) */
+ /* Write SC register so SC & US assert idle */
+ radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
+ radeon_ring_write(ring, 0);
+ /* Flush 3D cache */
+ radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_ZC_FLUSH);
+ /* Wait until IDLE & CLEAN */
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
+ RADEON_WAIT_2D_IDLECLEAN |
+ RADEON_WAIT_DMA_GUI_IDLE));
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
+ RADEON_HDP_READ_BUFFER_INVALIDATE);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
+ /* Emit fence sequence & fire IRQ */
+ radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+ radeon_ring_write(ring, RADEON_SW_INT_FIRE);
+}
+
+void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ unsigned gb_tile_config;
+ int r;
+
+ /* Sub pixel 1/12 so we can have 4K rendering according to doc */
+ gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
+ switch(rdev->num_gb_pipes) {
+ case 2:
+ gb_tile_config |= R300_PIPE_COUNT_R300;
+ break;
+ case 3:
+ gb_tile_config |= R300_PIPE_COUNT_R420_3P;
+ break;
+ case 4:
+ gb_tile_config |= R300_PIPE_COUNT_R420;
+ break;
+ case 1:
+ default:
+ gb_tile_config |= R300_PIPE_COUNT_RV350;
+ break;
+ }
+
+ r = radeon_ring_lock(rdev, ring, 64);
+ if (r) {
+ return;
+ }
+ radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+ radeon_ring_write(ring,
+ RADEON_ISYNC_ANY2D_IDLE3D |
+ RADEON_ISYNC_ANY3D_IDLE2D |
+ RADEON_ISYNC_WAIT_IDLEGUI |
+ RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+ radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
+ radeon_ring_write(ring, gb_tile_config);
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring,
+ RADEON_WAIT_2D_IDLECLEAN |
+ RADEON_WAIT_3D_IDLECLEAN);
+ radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+ radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+ radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring,
+ RADEON_WAIT_2D_IDLECLEAN |
+ RADEON_WAIT_3D_IDLECLEAN);
+ radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+ radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
+ radeon_ring_write(ring,
+ ((6 << R300_MS_X0_SHIFT) |
+ (6 << R300_MS_Y0_SHIFT) |
+ (6 << R300_MS_X1_SHIFT) |
+ (6 << R300_MS_Y1_SHIFT) |
+ (6 << R300_MS_X2_SHIFT) |
+ (6 << R300_MS_Y2_SHIFT) |
+ (6 << R300_MSBD0_Y_SHIFT) |
+ (6 << R300_MSBD0_X_SHIFT)));
+ radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
+ radeon_ring_write(ring,
+ ((6 << R300_MS_X3_SHIFT) |
+ (6 << R300_MS_Y3_SHIFT) |
+ (6 << R300_MS_X4_SHIFT) |
+ (6 << R300_MS_Y4_SHIFT) |
+ (6 << R300_MS_X5_SHIFT) |
+ (6 << R300_MS_Y5_SHIFT) |
+ (6 << R300_MSBD1_SHIFT)));
+ radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
+ radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
+ radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
+ radeon_ring_write(ring,
+ R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
+ radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
+ radeon_ring_write(ring,
+ R300_GEOMETRY_ROUND_NEAREST |
+ R300_COLOR_ROUND_NEAREST);
+ radeon_ring_unlock_commit(rdev, ring);
+}
+
+static void r300_errata(struct radeon_device *rdev)
+{
+ rdev->pll_errata = 0;
+
+ if (rdev->family == CHIP_R300 &&
+ (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
+ rdev->pll_errata |= CHIP_ERRATA_R300_CG;
+ }
+}
+
+int r300_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(RADEON_MC_STATUS);
+ if (tmp & R300_MC_IDLE) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
+static void r300_gpu_init(struct radeon_device *rdev)
+{
+ uint32_t gb_tile_config, tmp;
+
+ if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
+ (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
+ /* r300,r350 */
+ rdev->num_gb_pipes = 2;
+ } else {
+ /* rv350,rv370,rv380,r300 AD, r350 AH */
+ rdev->num_gb_pipes = 1;
+ }
+ rdev->num_z_pipes = 1;
+ gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
+ switch (rdev->num_gb_pipes) {
+ case 2:
+ gb_tile_config |= R300_PIPE_COUNT_R300;
+ break;
+ case 3:
+ gb_tile_config |= R300_PIPE_COUNT_R420_3P;
+ break;
+ case 4:
+ gb_tile_config |= R300_PIPE_COUNT_R420;
+ break;
+ default:
+ case 1:
+ gb_tile_config |= R300_PIPE_COUNT_RV350;
+ break;
+ }
+ WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
+
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+
+ tmp = RREG32(R300_DST_PIPE_CONFIG);
+ WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
+
+ WREG32(R300_RB2D_DSTCACHE_MODE,
+ R300_DC_AUTOFLUSH_ENABLE |
+ R300_DC_DC_DISABLE_IGNORE_PE);
+
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+ if (r300_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+ DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
+ rdev->num_gb_pipes, rdev->num_z_pipes);
+}
+
+int r300_asic_reset(struct radeon_device *rdev)
+{
+ struct r100_mc_save save;
+ u32 status, tmp;
+ int ret = 0;
+
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
+ }
+ r100_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ /* save PCI state */
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ r100_bm_disable(rdev);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+ S_0000F0_SOFT_RESET_GA(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* resetting the CP seems to be problematic sometimes it end up
+ * hard locking the computer, but it's necessary for successful
+ * reset more test & playing is needed on R3XX/R4XX to find a
+ * reliable (if any solution)
+ */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ r100_enable_bm(rdev);
+ /* Check if GPU is idle */
+ if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ ret = -1;
+ } else
+ dev_info(rdev->dev, "GPU reset succeed\n");
+ r100_mc_resume(rdev, &save);
+ return ret;
+}
+
+/*
+ * r300,r350,rv350,rv380 VRAM info
+ */
+void r300_mc_init(struct radeon_device *rdev)
+{
+ u64 base;
+ u32 tmp;
+
+ /* DDR for all card after R300 & IGP */
+ rdev->mc.vram_is_ddr = true;
+ tmp = RREG32(RADEON_MEM_CNTL);
+ tmp &= R300_MEM_NUM_CHANNELS_MASK;
+ switch (tmp) {
+ case 0: rdev->mc.vram_width = 64; break;
+ case 1: rdev->mc.vram_width = 128; break;
+ case 2: rdev->mc.vram_width = 256; break;
+ default: rdev->mc.vram_width = 128; break;
+ }
+ r100_vram_init_sizes(rdev);
+ base = rdev->mc.aper_base;
+ if (rdev->flags & RADEON_IS_IGP)
+ base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ rdev->mc.gtt_base_align = 0;
+ if (!(rdev->flags & RADEON_IS_AGP))
+ radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
+}
+
+void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
+{
+ uint32_t link_width_cntl, mask;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* FIXME wait for idle */
+
+ switch (lanes) {
+ case 0:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
+ break;
+ case 1:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
+ break;
+ case 2:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
+ break;
+ case 4:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
+ break;
+ case 8:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
+ break;
+ case 12:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
+ break;
+ case 16:
+ default:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
+ break;
+ }
+
+ link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+ if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
+ (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
+ return;
+
+ link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
+ RADEON_PCIE_LC_RECONFIG_NOW |
+ RADEON_PCIE_LC_RECONFIG_LATER |
+ RADEON_PCIE_LC_SHORT_RECONFIG_EN);
+ link_width_cntl |= mask;
+ WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
+ RADEON_PCIE_LC_RECONFIG_NOW));
+
+ /* wait for lane set to complete */
+ link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+ while (link_width_cntl == 0xffffffff)
+ link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+}
+
+int rv370_get_pcie_lanes(struct radeon_device *rdev)
+{
+ u32 link_width_cntl;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return 0;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return 0;
+
+ /* FIXME wait for idle */
+
+ link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+ switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
+ case RADEON_PCIE_LC_LINK_WIDTH_X0:
+ return 0;
+ case RADEON_PCIE_LC_LINK_WIDTH_X1:
+ return 1;
+ case RADEON_PCIE_LC_LINK_WIDTH_X2:
+ return 2;
+ case RADEON_PCIE_LC_LINK_WIDTH_X4:
+ return 4;
+ case RADEON_PCIE_LC_LINK_WIDTH_X8:
+ return 8;
+ case RADEON_PCIE_LC_LINK_WIDTH_X16:
+ default:
+ return 16;
+ }
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+ seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
+ seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
+ seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
+ seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
+ seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
+ seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
+ tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
+ seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
+ return 0;
+}
+
+static struct drm_info_list rv370_pcie_gart_info_list[] = {
+ {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
+};
+#endif
+
+static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
+#else
+ return 0;
+#endif
+}
+
+static int r300_packet0_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx, unsigned reg)
+{
+ struct radeon_cs_reloc *reloc;
+ struct r100_cs_track *track;
+ volatile uint32_t *ib;
+ uint32_t tmp, tile_flags = 0;
+ unsigned i;
+ int r;
+ u32 idx_value;
+
+ ib = p->ib.ptr;
+ track = (struct r100_cs_track *)p->track;
+ idx_value = radeon_get_ib_value(p, idx);
+
+ switch(reg) {
+ case AVIVO_D1MODE_VLINE_START_END:
+ case RADEON_CRTC_GUI_TRIG_VLINE:
+ r = r100_cs_packet_parse_vline(p);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ break;
+ case RADEON_DST_PITCH_OFFSET:
+ case RADEON_SRC_PITCH_OFFSET:
+ r = r100_reloc_pitch_offset(p, pkt, idx, reg);
+ if (r)
+ return r;
+ break;
+ case R300_RB3D_COLOROFFSET0:
+ case R300_RB3D_COLOROFFSET1:
+ case R300_RB3D_COLOROFFSET2:
+ case R300_RB3D_COLOROFFSET3:
+ i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ track->cb[i].robj = reloc->robj;
+ track->cb[i].offset = idx_value;
+ track->cb_dirty = true;
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ break;
+ case R300_ZB_DEPTHOFFSET:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ track->zb.robj = reloc->robj;
+ track->zb.offset = idx_value;
+ track->zb_dirty = true;
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ break;
+ case R300_TX_OFFSET_0:
+ case R300_TX_OFFSET_0+4:
+ case R300_TX_OFFSET_0+8:
+ case R300_TX_OFFSET_0+12:
+ case R300_TX_OFFSET_0+16:
+ case R300_TX_OFFSET_0+20:
+ case R300_TX_OFFSET_0+24:
+ case R300_TX_OFFSET_0+28:
+ case R300_TX_OFFSET_0+32:
+ case R300_TX_OFFSET_0+36:
+ case R300_TX_OFFSET_0+40:
+ case R300_TX_OFFSET_0+44:
+ case R300_TX_OFFSET_0+48:
+ case R300_TX_OFFSET_0+52:
+ case R300_TX_OFFSET_0+56:
+ case R300_TX_OFFSET_0+60:
+ i = (reg - R300_TX_OFFSET_0) >> 2;
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+
+ if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
+ ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
+ ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
+ } else {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_TXO_MACRO_TILE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_TXO_MICRO_TILE;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
+
+ tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
+ }
+ track->textures[i].robj = reloc->robj;
+ track->tex_dirty = true;
+ break;
+ /* Tracked registers */
+ case 0x2084:
+ /* VAP_VF_CNTL */
+ track->vap_vf_cntl = idx_value;
+ break;
+ case 0x20B4:
+ /* VAP_VTX_SIZE */
+ track->vtx_size = idx_value & 0x7F;
+ break;
+ case 0x2134:
+ /* VAP_VF_MAX_VTX_INDX */
+ track->max_indx = idx_value & 0x00FFFFFFUL;
+ break;
+ case 0x2088:
+ /* VAP_ALT_NUM_VERTICES - only valid on r500 */
+ if (p->rdev->family < CHIP_RV515)
+ goto fail;
+ track->vap_alt_nverts = idx_value & 0xFFFFFF;
+ break;
+ case 0x43E4:
+ /* SC_SCISSOR1 */
+ track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
+ if (p->rdev->family < CHIP_RV515) {
+ track->maxy -= 1440;
+ }
+ track->cb_dirty = true;
+ track->zb_dirty = true;
+ break;
+ case 0x4E00:
+ /* RB3D_CCTL */
+ if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
+ p->rdev->cmask_filp != p->filp) {
+ DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
+ return -EINVAL;
+ }
+ track->num_cb = ((idx_value >> 5) & 0x3) + 1;
+ track->cb_dirty = true;
+ break;
+ case 0x4E38:
+ case 0x4E3C:
+ case 0x4E40:
+ case 0x4E44:
+ /* RB3D_COLORPITCH0 */
+ /* RB3D_COLORPITCH1 */
+ /* RB3D_COLORPITCH2 */
+ /* RB3D_COLORPITCH3 */
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_COLOR_TILE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_COLOR_MICROTILE_ENABLE;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
+
+ tmp = idx_value & ~(0x7 << 16);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
+ }
+ i = (reg - 0x4E38) >> 2;
+ track->cb[i].pitch = idx_value & 0x3FFE;
+ switch (((idx_value >> 21) & 0xF)) {
+ case 9:
+ case 11:
+ case 12:
+ track->cb[i].cpp = 1;
+ break;
+ case 3:
+ case 4:
+ case 13:
+ case 15:
+ track->cb[i].cpp = 2;
+ break;
+ case 5:
+ if (p->rdev->family < CHIP_RV515) {
+ DRM_ERROR("Invalid color buffer format (%d)!\n",
+ ((idx_value >> 21) & 0xF));
+ return -EINVAL;
+ }
+ /* Pass through. */
+ case 6:
+ track->cb[i].cpp = 4;
+ break;
+ case 10:
+ track->cb[i].cpp = 8;
+ break;
+ case 7:
+ track->cb[i].cpp = 16;
+ break;
+ default:
+ DRM_ERROR("Invalid color buffer format (%d) !\n",
+ ((idx_value >> 21) & 0xF));
+ return -EINVAL;
+ }
+ track->cb_dirty = true;
+ break;
+ case 0x4F00:
+ /* ZB_CNTL */
+ if (idx_value & 2) {
+ track->z_enabled = true;
+ } else {
+ track->z_enabled = false;
+ }
+ track->zb_dirty = true;
+ break;
+ case 0x4F10:
+ /* ZB_FORMAT */
+ switch ((idx_value & 0xF)) {
+ case 0:
+ case 1:
+ track->zb.cpp = 2;
+ break;
+ case 2:
+ track->zb.cpp = 4;
+ break;
+ default:
+ DRM_ERROR("Invalid z buffer format (%d) !\n",
+ (idx_value & 0xF));
+ return -EINVAL;
+ }
+ track->zb_dirty = true;
+ break;
+ case 0x4F24:
+ /* ZB_DEPTHPITCH */
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_DEPTHMACROTILE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_DEPTHMICROTILE_TILED;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
+
+ tmp = idx_value & ~(0x7 << 16);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
+ }
+ track->zb.pitch = idx_value & 0x3FFC;
+ track->zb_dirty = true;
+ break;
+ case 0x4104:
+ /* TX_ENABLE */
+ for (i = 0; i < 16; i++) {
+ bool enabled;
+
+ enabled = !!(idx_value & (1 << i));
+ track->textures[i].enabled = enabled;
+ }
+ track->tex_dirty = true;
+ break;
+ case 0x44C0:
+ case 0x44C4:
+ case 0x44C8:
+ case 0x44CC:
+ case 0x44D0:
+ case 0x44D4:
+ case 0x44D8:
+ case 0x44DC:
+ case 0x44E0:
+ case 0x44E4:
+ case 0x44E8:
+ case 0x44EC:
+ case 0x44F0:
+ case 0x44F4:
+ case 0x44F8:
+ case 0x44FC:
+ /* TX_FORMAT1_[0-15] */
+ i = (reg - 0x44C0) >> 2;
+ tmp = (idx_value >> 25) & 0x3;
+ track->textures[i].tex_coord_type = tmp;
+ switch ((idx_value & 0x1F)) {
+ case R300_TX_FORMAT_X8:
+ case R300_TX_FORMAT_Y4X4:
+ case R300_TX_FORMAT_Z3Y3X2:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+ break;
+ case R300_TX_FORMAT_X16:
+ case R300_TX_FORMAT_FL_I16:
+ case R300_TX_FORMAT_Y8X8:
+ case R300_TX_FORMAT_Z5Y6X5:
+ case R300_TX_FORMAT_Z6Y5X5:
+ case R300_TX_FORMAT_W4Z4Y4X4:
+ case R300_TX_FORMAT_W1Z5Y5X5:
+ case R300_TX_FORMAT_D3DMFT_CxV8U8:
+ case R300_TX_FORMAT_B8G8_B8G8:
+ case R300_TX_FORMAT_G8R8_G8B8:
+ track->textures[i].cpp = 2;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+ break;
+ case R300_TX_FORMAT_Y16X16:
+ case R300_TX_FORMAT_FL_I16A16:
+ case R300_TX_FORMAT_Z11Y11X10:
+ case R300_TX_FORMAT_Z10Y11X11:
+ case R300_TX_FORMAT_W8Z8Y8X8:
+ case R300_TX_FORMAT_W2Z10Y10X10:
+ case 0x17:
+ case R300_TX_FORMAT_FL_I32:
+ case 0x1e:
+ track->textures[i].cpp = 4;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+ break;
+ case R300_TX_FORMAT_W16Z16Y16X16:
+ case R300_TX_FORMAT_FL_R16G16B16A16:
+ case R300_TX_FORMAT_FL_I32A32:
+ track->textures[i].cpp = 8;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+ break;
+ case R300_TX_FORMAT_FL_R32G32B32A32:
+ track->textures[i].cpp = 16;
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+ break;
+ case R300_TX_FORMAT_DXT1:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+ break;
+ case R300_TX_FORMAT_ATI2N:
+ if (p->rdev->family < CHIP_R420) {
+ DRM_ERROR("Invalid texture format %u\n",
+ (idx_value & 0x1F));
+ return -EINVAL;
+ }
+ /* The same rules apply as for DXT3/5. */
+ /* Pass through. */
+ case R300_TX_FORMAT_DXT3:
+ case R300_TX_FORMAT_DXT5:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
+ break;
+ default:
+ DRM_ERROR("Invalid texture format %u\n",
+ (idx_value & 0x1F));
+ return -EINVAL;
+ }
+ track->tex_dirty = true;
+ break;
+ case 0x4400:
+ case 0x4404:
+ case 0x4408:
+ case 0x440C:
+ case 0x4410:
+ case 0x4414:
+ case 0x4418:
+ case 0x441C:
+ case 0x4420:
+ case 0x4424:
+ case 0x4428:
+ case 0x442C:
+ case 0x4430:
+ case 0x4434:
+ case 0x4438:
+ case 0x443C:
+ /* TX_FILTER0_[0-15] */
+ i = (reg - 0x4400) >> 2;
+ tmp = idx_value & 0x7;
+ if (tmp == 2 || tmp == 4 || tmp == 6) {
+ track->textures[i].roundup_w = false;
+ }
+ tmp = (idx_value >> 3) & 0x7;
+ if (tmp == 2 || tmp == 4 || tmp == 6) {
+ track->textures[i].roundup_h = false;
+ }
+ track->tex_dirty = true;
+ break;
+ case 0x4500:
+ case 0x4504:
+ case 0x4508:
+ case 0x450C:
+ case 0x4510:
+ case 0x4514:
+ case 0x4518:
+ case 0x451C:
+ case 0x4520:
+ case 0x4524:
+ case 0x4528:
+ case 0x452C:
+ case 0x4530:
+ case 0x4534:
+ case 0x4538:
+ case 0x453C:
+ /* TX_FORMAT2_[0-15] */
+ i = (reg - 0x4500) >> 2;
+ tmp = idx_value & 0x3FFF;
+ track->textures[i].pitch = tmp + 1;
+ if (p->rdev->family >= CHIP_RV515) {
+ tmp = ((idx_value >> 15) & 1) << 11;
+ track->textures[i].width_11 = tmp;
+ tmp = ((idx_value >> 16) & 1) << 11;
+ track->textures[i].height_11 = tmp;
+
+ /* ATI1N */
+ if (idx_value & (1 << 14)) {
+ /* The same rules apply as for DXT1. */
+ track->textures[i].compress_format =
+ R100_TRACK_COMP_DXT1;
+ }
+ } else if (idx_value & (1 << 14)) {
+ DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
+ return -EINVAL;
+ }
+ track->tex_dirty = true;
+ break;
+ case 0x4480:
+ case 0x4484:
+ case 0x4488:
+ case 0x448C:
+ case 0x4490:
+ case 0x4494:
+ case 0x4498:
+ case 0x449C:
+ case 0x44A0:
+ case 0x44A4:
+ case 0x44A8:
+ case 0x44AC:
+ case 0x44B0:
+ case 0x44B4:
+ case 0x44B8:
+ case 0x44BC:
+ /* TX_FORMAT0_[0-15] */
+ i = (reg - 0x4480) >> 2;
+ tmp = idx_value & 0x7FF;
+ track->textures[i].width = tmp + 1;
+ tmp = (idx_value >> 11) & 0x7FF;
+ track->textures[i].height = tmp + 1;
+ tmp = (idx_value >> 26) & 0xF;
+ track->textures[i].num_levels = tmp;
+ tmp = idx_value & (1 << 31);
+ track->textures[i].use_pitch = !!tmp;
+ tmp = (idx_value >> 22) & 0xF;
+ track->textures[i].txdepth = tmp;
+ track->tex_dirty = true;
+ break;
+ case R300_ZB_ZPASS_ADDR:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ break;
+ case 0x4e0c:
+ /* RB3D_COLOR_CHANNEL_MASK */
+ track->color_channel_mask = idx_value;
+ track->cb_dirty = true;
+ break;
+ case 0x43a4:
+ /* SC_HYPERZ_EN */
+ /* r300c emits this register - we need to disable hyperz for it
+ * without complaining */
+ if (p->rdev->hyperz_filp != p->filp) {
+ if (idx_value & 0x1)
+ ib[idx] = idx_value & ~1;
+ }
+ break;
+ case 0x4f1c:
+ /* ZB_BW_CNTL */
+ track->zb_cb_clear = !!(idx_value & (1 << 5));
+ track->cb_dirty = true;
+ track->zb_dirty = true;
+ if (p->rdev->hyperz_filp != p->filp) {
+ if (idx_value & (R300_HIZ_ENABLE |
+ R300_RD_COMP_ENABLE |
+ R300_WR_COMP_ENABLE |
+ R300_FAST_FILL_ENABLE))
+ goto fail;
+ }
+ break;
+ case 0x4e04:
+ /* RB3D_BLENDCNTL */
+ track->blend_read_enable = !!(idx_value & (1 << 2));
+ track->cb_dirty = true;
+ break;
+ case R300_RB3D_AARESOLVE_OFFSET:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ track->aa.robj = reloc->robj;
+ track->aa.offset = idx_value;
+ track->aa_dirty = true;
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ break;
+ case R300_RB3D_AARESOLVE_PITCH:
+ track->aa.pitch = idx_value & 0x3FFE;
+ track->aa_dirty = true;
+ break;
+ case R300_RB3D_AARESOLVE_CTL:
+ track->aaresolve = idx_value & 0x1;
+ track->aa_dirty = true;
+ break;
+ case 0x4f30: /* ZB_MASK_OFFSET */
+ case 0x4f34: /* ZB_ZMASK_PITCH */
+ case 0x4f44: /* ZB_HIZ_OFFSET */
+ case 0x4f54: /* ZB_HIZ_PITCH */
+ if (idx_value && (p->rdev->hyperz_filp != p->filp))
+ goto fail;
+ break;
+ case 0x4028:
+ if (idx_value && (p->rdev->hyperz_filp != p->filp))
+ goto fail;
+ /* GB_Z_PEQ_CONFIG */
+ if (p->rdev->family >= CHIP_RV350)
+ break;
+ goto fail;
+ break;
+ case 0x4be8:
+ /* valid register only on RV530 */
+ if (p->rdev->family == CHIP_RV530)
+ break;
+ /* fallthrough do not move */
+ default:
+ goto fail;
+ }
+ return 0;
+fail:
+ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d (val=%08x)\n",
+ reg, idx, idx_value);
+ return -EINVAL;
+}
+
+static int r300_packet3_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ struct radeon_cs_reloc *reloc;
+ struct r100_cs_track *track;
+ volatile uint32_t *ib;
+ unsigned idx;
+ int r;
+
+ ib = p->ib.ptr;
+ idx = pkt->idx + 1;
+ track = (struct r100_cs_track *)p->track;
+ switch(pkt->opcode) {
+ case PACKET3_3D_LOAD_VBPNTR:
+ r = r100_packet3_load_vbpntr(p, pkt, idx);
+ if (r)
+ return r;
+ break;
+ case PACKET3_INDX_BUFFER:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+ radeon_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+ r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
+ if (r) {
+ return r;
+ }
+ break;
+ /* Draw packet */
+ case PACKET3_3D_DRAW_IMMD:
+ /* Number of dwords is vtx_size * (num_vertices - 1)
+ * PRIM_WALK must be equal to 3 vertex data in embedded
+ * in cmd stream */
+ if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
+ DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+ return -EINVAL;
+ }
+ track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+ track->immd_dwords = pkt->count - 1;
+ r = r100_cs_track_check(p->rdev, track);
+ if (r) {
+ return r;
+ }
+ break;
+ case PACKET3_3D_DRAW_IMMD_2:
+ /* Number of dwords is vtx_size * (num_vertices - 1)
+ * PRIM_WALK must be equal to 3 vertex data in embedded
+ * in cmd stream */
+ if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
+ DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+ return -EINVAL;
+ }
+ track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+ track->immd_dwords = pkt->count;
+ r = r100_cs_track_check(p->rdev, track);
+ if (r) {
+ return r;
+ }
+ break;
+ case PACKET3_3D_DRAW_VBUF:
+ track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+ r = r100_cs_track_check(p->rdev, track);
+ if (r) {
+ return r;
+ }
+ break;
+ case PACKET3_3D_DRAW_VBUF_2:
+ track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+ r = r100_cs_track_check(p->rdev, track);
+ if (r) {
+ return r;
+ }
+ break;
+ case PACKET3_3D_DRAW_INDX:
+ track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+ r = r100_cs_track_check(p->rdev, track);
+ if (r) {
+ return r;
+ }
+ break;
+ case PACKET3_3D_DRAW_INDX_2:
+ track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+ r = r100_cs_track_check(p->rdev, track);
+ if (r) {
+ return r;
+ }
+ break;
+ case PACKET3_3D_CLEAR_HIZ:
+ case PACKET3_3D_CLEAR_ZMASK:
+ if (p->rdev->hyperz_filp != p->filp)
+ return -EINVAL;
+ break;
+ case PACKET3_3D_CLEAR_CMASK:
+ if (p->rdev->cmask_filp != p->filp)
+ return -EINVAL;
+ break;
+ case PACKET3_NOP:
+ break;
+ default:
+ DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int r300_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet pkt;
+ struct r100_cs_track *track;
+ int r;
+
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (track == NULL)
+ return -ENOMEM;
+ r100_cs_track_clear(p->rdev, track);
+ p->track = track;
+ do {
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
+ if (r) {
+ return r;
+ }
+ p->idx += pkt.count + 2;
+ switch (pkt.type) {
+ case RADEON_PACKET_TYPE0:
+ r = r100_cs_parse_packet0(p, &pkt,
+ p->rdev->config.r300.reg_safe_bm,
+ p->rdev->config.r300.reg_safe_bm_size,
+ &r300_packet0_check);
+ break;
+ case RADEON_PACKET_TYPE2:
+ break;
+ case RADEON_PACKET_TYPE3:
+ r = r300_packet3_check(p, &pkt);
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ return -EINVAL;
+ }
+ if (r) {
+ return r;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ return 0;
+}
+
+void r300_set_reg_safe(struct radeon_device *rdev)
+{
+ rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
+ rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
+}
+
+void r300_mc_program(struct radeon_device *rdev)
+{
+ struct r100_mc_save save;
+ int r;
+
+ r = r100_debugfs_mc_info_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n");
+ }
+
+ /* Stops all mc clients */
+ r100_mc_stop(rdev, &save);
+ if (rdev->flags & RADEON_IS_AGP) {
+ WREG32(R_00014C_MC_AGP_LOCATION,
+ S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+ S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+ WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+ WREG32(R_00015C_AGP_BASE_2,
+ upper_32_bits(rdev->mc.agp_base) & 0xff);
+ } else {
+ WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
+ WREG32(R_000170_AGP_BASE, 0);
+ WREG32(R_00015C_AGP_BASE_2, 0);
+ }
+ /* Wait for mc idle */
+ if (r300_mc_wait_for_idle(rdev))
+ DRM_INFO("Failed to wait MC idle before programming MC.\n");
+ /* Program MC, should be a 32bits limited address space */
+ WREG32(R_000148_MC_FB_LOCATION,
+ S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
+ S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
+ r100_mc_resume(rdev, &save);
+}
+
+void r300_clock_startup(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (radeon_dynclks != -1 && radeon_dynclks)
+ radeon_legacy_set_clock_gating(rdev, 1);
+ /* We need to force on some of the block */
+ tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
+ tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
+ if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380))
+ tmp |= S_00000D_FORCE_VAP(1);
+ WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
+}
+
+static int r300_startup(struct radeon_device *rdev)
+{
+ int r;
+
+ /* set common regs */
+ r100_set_common_regs(rdev);
+ /* program mc */
+ r300_mc_program(rdev);
+ /* Resume clock */
+ r300_clock_startup(rdev);
+ /* Initialize GPU configuration (# pipes, ...) */
+ r300_gpu_init(rdev);
+ /* Initialize GART (initialize after TTM so we can allocate
+ * memory through TTM but finalize after TTM) */
+ if (rdev->flags & RADEON_IS_PCIE) {
+ r = rv370_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ }
+
+ if (rdev->family == CHIP_R300 ||
+ rdev->family == CHIP_R350 ||
+ rdev->family == CHIP_RV350)
+ r100_enable_bm(rdev);
+
+ if (rdev->flags & RADEON_IS_PCI) {
+ r = r100_pci_gart_enable(rdev);
+ if (r)
+ return r;
+ }
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ /* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
+ r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+ r = r100_cp_init(rdev, 1024 * 1024);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ return r;
+ }
+
+ return 0;
+}
+
+int r300_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Make sur GART are not working */
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_disable(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_disable(rdev);
+ /* Resume clock before doing reset */
+ r300_clock_startup(rdev);
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_asic_reset(rdev)) {
+ dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* post */
+ radeon_combios_asic_init(rdev->ddev);
+ /* Resume clock after posting */
+ r300_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
+ r = r300_startup(rdev);
+ if (r) {
+ rdev->accel_working = false;
+ }
+ return r;
+}
+
+int r300_suspend(struct radeon_device *rdev)
+{
+ r100_cp_disable(rdev);
+ radeon_wb_disable(rdev);
+ r100_irq_disable(rdev);
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_disable(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_disable(rdev);
+ return 0;
+}
+
+void r300_fini(struct radeon_device *rdev)
+{
+ r100_cp_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_gem_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_fini(rdev);
+ radeon_agp_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_bo_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
+int r300_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Disable VGA */
+ r100_vga_render_disable(rdev);
+ /* Initialize scratch registers */
+ radeon_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* TODO: disable VGA need to use VGA request */
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
+ /* BIOS*/
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ if (rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+ return -EINVAL;
+ } else {
+ r = radeon_combios_init(rdev);
+ if (r)
+ return r;
+ }
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_asic_reset(rdev)) {
+ dev_warn(rdev->dev,
+ "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* check if cards are posted or not */
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+ /* Set asic errata */
+ r300_errata(rdev);
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
+ }
+ /* initialize memory controller */
+ r300_mc_init(rdev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+ if (rdev->flags & RADEON_IS_PCIE) {
+ r = rv370_pcie_gart_init(rdev);
+ if (r)
+ return r;
+ }
+ if (rdev->flags & RADEON_IS_PCI) {
+ r = r100_pci_gart_init(rdev);
+ if (r)
+ return r;
+ }
+ r300_set_reg_safe(rdev);
+
+ rdev->accel_working = true;
+ r = r300_startup(rdev);
+ if (r) {
+ /* Somethings want wront with the accel init stop accel */
+ dev_err(rdev->dev, "Disabling GPU acceleration\n");
+ r100_cp_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_fini(rdev);
+ radeon_agp_fini(rdev);
+ rdev->accel_working = false;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
new file mode 100644
index 0000000..60170ea
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -0,0 +1,1186 @@
+/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
+ *
+ * Copyright (C) The Weather Channel, Inc. 2002.
+ * Copyright (C) 2004 Nicolai Haehnle.
+ * All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Nicolai Haehnle <prefect_@gmx.net>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_buffer.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+#include "r300_reg.h"
+
+#include <asm/unaligned.h>
+
+#define R300_SIMULTANEOUS_CLIPRECTS 4
+
+/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
+ */
+static const int r300_cliprect_cntl[4] = {
+ 0xAAAA,
+ 0xEEEE,
+ 0xFEFE,
+ 0xFFFE
+};
+
+/**
+ * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
+ * buffer, starting with index n.
+ */
+static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf, int n)
+{
+ struct drm_clip_rect box;
+ int nr;
+ int i;
+ RING_LOCALS;
+
+ nr = cmdbuf->nbox - n;
+ if (nr > R300_SIMULTANEOUS_CLIPRECTS)
+ nr = R300_SIMULTANEOUS_CLIPRECTS;
+
+ DRM_DEBUG("%i cliprects\n", nr);
+
+ if (nr) {
+ BEGIN_RING(6 + nr * 2);
+ OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
+
+ for (i = 0; i < nr; ++i) {
+ if (DRM_COPY_FROM_USER
+ (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
+ DRM_ERROR("copy cliprect faulted\n");
+ return -EFAULT;
+ }
+
+ box.x2--; /* Hardware expects inclusive bottom-right corner */
+ box.y2--;
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
+ box.x1 = (box.x1) &
+ R300_CLIPRECT_MASK;
+ box.y1 = (box.y1) &
+ R300_CLIPRECT_MASK;
+ box.x2 = (box.x2) &
+ R300_CLIPRECT_MASK;
+ box.y2 = (box.y2) &
+ R300_CLIPRECT_MASK;
+ } else {
+ box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
+ R300_CLIPRECT_MASK;
+ box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
+ R300_CLIPRECT_MASK;
+ box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
+ R300_CLIPRECT_MASK;
+ box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
+ R300_CLIPRECT_MASK;
+ }
+
+ OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
+ (box.y1 << R300_CLIPRECT_Y_SHIFT));
+ OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
+ (box.y2 << R300_CLIPRECT_Y_SHIFT));
+
+ }
+
+ OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
+
+ /* TODO/SECURITY: Force scissors to a safe value, otherwise the
+ * client might be able to trample over memory.
+ * The impact should be very limited, but I'd rather be safe than
+ * sorry.
+ */
+ OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
+ OUT_RING(0);
+ OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
+ ADVANCE_RING();
+ } else {
+ /* Why we allow zero cliprect rendering:
+ * There are some commands in a command buffer that must be submitted
+ * even when there are no cliprects, e.g. DMA buffer discard
+ * or state setting (though state setting could be avoided by
+ * simulating a loss of context).
+ *
+ * Now since the cmdbuf interface is so chaotic right now (and is
+ * bound to remain that way for a bit until things settle down),
+ * it is basically impossible to filter out the commands that are
+ * necessary and those that aren't.
+ *
+ * So I choose the safe way and don't do any filtering at all;
+ * instead, I simply set up the engine so that all rendering
+ * can't produce any fragments.
+ */
+ BEGIN_RING(2);
+ OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
+ ADVANCE_RING();
+ }
+
+ /* flus cache and wait idle clean after cliprect change */
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ OUT_RING(R300_RB3D_DC_FLUSH);
+ ADVANCE_RING();
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+ OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
+ ADVANCE_RING();
+ /* set flush flag */
+ dev_priv->track_flush |= RADEON_FLUSH_EMITED;
+
+ return 0;
+}
+
+static u8 r300_reg_flags[0x10000 >> 2];
+
+void r300_init_reg_flags(struct drm_device *dev)
+{
+ int i;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ memset(r300_reg_flags, 0, 0x10000 >> 2);
+#define ADD_RANGE_MARK(reg, count,mark) \
+ for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
+ r300_reg_flags[i]|=(mark);
+
+#define MARK_SAFE 1
+#define MARK_CHECK_OFFSET 2
+
+#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
+
+ /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
+ ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
+ ADD_RANGE(R300_VAP_CNTL, 1);
+ ADD_RANGE(R300_SE_VTE_CNTL, 2);
+ ADD_RANGE(0x2134, 2);
+ ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
+ ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
+ ADD_RANGE(0x21DC, 1);
+ ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
+ ADD_RANGE(R300_VAP_CLIP_X_0, 4);
+ ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
+ ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
+ ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
+ ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
+ ADD_RANGE(R300_GB_ENABLE, 1);
+ ADD_RANGE(R300_GB_MSPOS0, 5);
+ ADD_RANGE(R300_TX_INVALTAGS, 1);
+ ADD_RANGE(R300_TX_ENABLE, 1);
+ ADD_RANGE(0x4200, 4);
+ ADD_RANGE(0x4214, 1);
+ ADD_RANGE(R300_RE_POINTSIZE, 1);
+ ADD_RANGE(0x4230, 3);
+ ADD_RANGE(R300_RE_LINE_CNT, 1);
+ ADD_RANGE(R300_RE_UNK4238, 1);
+ ADD_RANGE(0x4260, 3);
+ ADD_RANGE(R300_RE_SHADE, 4);
+ ADD_RANGE(R300_RE_POLYGON_MODE, 5);
+ ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
+ ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
+ ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
+ ADD_RANGE(R300_RE_CULL_CNTL, 1);
+ ADD_RANGE(0x42C0, 2);
+ ADD_RANGE(R300_RS_CNTL_0, 2);
+
+ ADD_RANGE(R300_SU_REG_DEST, 1);
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530)
+ ADD_RANGE(RV530_FG_ZBREG_DEST, 1);
+
+ ADD_RANGE(R300_SC_HYPERZ, 2);
+ ADD_RANGE(0x43E8, 1);
+
+ ADD_RANGE(0x46A4, 5);
+
+ ADD_RANGE(R300_RE_FOG_STATE, 1);
+ ADD_RANGE(R300_FOG_COLOR_R, 3);
+ ADD_RANGE(R300_PP_ALPHA_TEST, 2);
+ ADD_RANGE(0x4BD8, 1);
+ ADD_RANGE(R300_PFS_PARAM_0_X, 64);
+ ADD_RANGE(0x4E00, 1);
+ ADD_RANGE(R300_RB3D_CBLEND, 2);
+ ADD_RANGE(R300_RB3D_COLORMASK, 1);
+ ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
+ ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
+ ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
+ ADD_RANGE(0x4E50, 9);
+ ADD_RANGE(0x4E88, 1);
+ ADD_RANGE(0x4EA0, 2);
+ ADD_RANGE(R300_ZB_CNTL, 3);
+ ADD_RANGE(R300_ZB_FORMAT, 4);
+ ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
+ ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
+ ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
+ ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
+ ADD_RANGE(R300_ZB_ZPASS_DATA, 2); /* ZB_ZPASS_DATA, ZB_ZPASS_ADDR */
+
+ ADD_RANGE(R300_TX_FILTER_0, 16);
+ ADD_RANGE(R300_TX_FILTER1_0, 16);
+ ADD_RANGE(R300_TX_SIZE_0, 16);
+ ADD_RANGE(R300_TX_FORMAT_0, 16);
+ ADD_RANGE(R300_TX_PITCH_0, 16);
+ /* Texture offset is dangerous and needs more checking */
+ ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
+ ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
+ ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
+
+ /* Sporadic registers used as primitives are emitted */
+ ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
+ ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
+ ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
+ ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
+ ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
+ ADD_RANGE(R500_US_CONFIG, 2);
+ ADD_RANGE(R500_US_CODE_ADDR, 3);
+ ADD_RANGE(R500_US_FC_CTRL, 1);
+ ADD_RANGE(R500_RS_IP_0, 16);
+ ADD_RANGE(R500_RS_INST_0, 16);
+ ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
+ ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
+ ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
+ } else {
+ ADD_RANGE(R300_PFS_CNTL_0, 3);
+ ADD_RANGE(R300_PFS_NODE_0, 4);
+ ADD_RANGE(R300_PFS_TEXI_0, 64);
+ ADD_RANGE(R300_PFS_INSTR0_0, 64);
+ ADD_RANGE(R300_PFS_INSTR1_0, 64);
+ ADD_RANGE(R300_PFS_INSTR2_0, 64);
+ ADD_RANGE(R300_PFS_INSTR3_0, 64);
+ ADD_RANGE(R300_RS_INTERP_0, 8);
+ ADD_RANGE(R300_RS_ROUTE_0, 8);
+
+ }
+}
+
+static __inline__ int r300_check_range(unsigned reg, int count)
+{
+ int i;
+ if (reg & ~0xffff)
+ return -1;
+ for (i = (reg >> 2); i < (reg >> 2) + count; i++)
+ if (r300_reg_flags[i] != MARK_SAFE)
+ return 1;
+ return 0;
+}
+
+static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
+ dev_priv,
+ drm_radeon_kcmd_buffer_t
+ * cmdbuf,
+ drm_r300_cmd_header_t
+ header)
+{
+ int reg;
+ int sz;
+ int i;
+ u32 *value;
+ RING_LOCALS;
+
+ sz = header.packet0.count;
+ reg = (header.packet0.reghi << 8) | header.packet0.reglo;
+
+ if ((sz > 64) || (sz < 0)) {
+ DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
+ reg, sz);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < sz; i++) {
+ switch (r300_reg_flags[(reg >> 2) + i]) {
+ case MARK_SAFE:
+ break;
+ case MARK_CHECK_OFFSET:
+ value = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+ if (!radeon_check_offset(dev_priv, *value)) {
+ DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n",
+ reg, sz);
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("Register %04x failed check as flag=%02x\n",
+ reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
+ return -EINVAL;
+ }
+ }
+
+ BEGIN_RING(1 + sz);
+ OUT_RING(CP_PACKET0(reg, sz - 1));
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+ ADVANCE_RING();
+
+ return 0;
+}
+
+/**
+ * Emits a packet0 setting arbitrary registers.
+ * Called by r300_do_cp_cmdbuf.
+ *
+ * Note that checks are performed on contents and addresses of the registers
+ */
+static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf,
+ drm_r300_cmd_header_t header)
+{
+ int reg;
+ int sz;
+ RING_LOCALS;
+
+ sz = header.packet0.count;
+ reg = (header.packet0.reghi << 8) | header.packet0.reglo;
+
+ if (!sz)
+ return 0;
+
+ if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
+ return -EINVAL;
+
+ if (reg + sz * 4 >= 0x10000) {
+ DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
+ sz);
+ return -EINVAL;
+ }
+
+ if (r300_check_range(reg, sz)) {
+ /* go and check everything */
+ return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf,
+ header);
+ }
+ /* the rest of the data is safe to emit, whatever the values the user passed */
+
+ BEGIN_RING(1 + sz);
+ OUT_RING(CP_PACKET0(reg, sz - 1));
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+ ADVANCE_RING();
+
+ return 0;
+}
+
+/**
+ * Uploads user-supplied vertex program instructions or parameters onto
+ * the graphics card.
+ * Called by r300_do_cp_cmdbuf.
+ */
+static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf,
+ drm_r300_cmd_header_t header)
+{
+ int sz;
+ int addr;
+ RING_LOCALS;
+
+ sz = header.vpu.count;
+ addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
+
+ if (!sz)
+ return 0;
+ if (sz * 16 > drm_buffer_unprocessed(cmdbuf->buffer))
+ return -EINVAL;
+
+ /* VAP is very sensitive so we purge cache before we program it
+ * and we also flush its state before & after */
+ BEGIN_RING(6);
+ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ OUT_RING(R300_RB3D_DC_FLUSH);
+ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+ OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
+ OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
+ OUT_RING(0);
+ ADVANCE_RING();
+ /* set flush flag */
+ dev_priv->track_flush |= RADEON_FLUSH_EMITED;
+
+ BEGIN_RING(3 + sz * 4);
+ OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
+ OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * 4);
+ ADVANCE_RING();
+
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
+ OUT_RING(0);
+ ADVANCE_RING();
+
+ return 0;
+}
+
+/**
+ * Emit a clear packet from userspace.
+ * Called by r300_emit_packet3.
+ */
+static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ RING_LOCALS;
+
+ if (8 * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
+ return -EINVAL;
+
+ BEGIN_RING(10);
+ OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
+ OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
+ (1 << R300_PRIM_NUM_VERTICES_SHIFT));
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, 8);
+ ADVANCE_RING();
+
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ OUT_RING(R300_RB3D_DC_FLUSH);
+ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+ OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
+ ADVANCE_RING();
+ /* set flush flag */
+ dev_priv->track_flush |= RADEON_FLUSH_EMITED;
+
+ return 0;
+}
+
+static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf,
+ u32 header)
+{
+ int count, i, k;
+#define MAX_ARRAY_PACKET 64
+ u32 *data;
+ u32 narrays;
+ RING_LOCALS;
+
+ count = (header & RADEON_CP_PACKET_COUNT_MASK) >> 16;
+
+ if ((count + 1) > MAX_ARRAY_PACKET) {
+ DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
+ count);
+ return -EINVAL;
+ }
+ /* carefully check packet contents */
+
+ /* We have already read the header so advance the buffer. */
+ drm_buffer_advance(cmdbuf->buffer, 4);
+
+ narrays = *(u32 *)drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+ k = 0;
+ i = 1;
+ while ((k < narrays) && (i < (count + 1))) {
+ i++; /* skip attribute field */
+ data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+ if (!radeon_check_offset(dev_priv, *data)) {
+ DRM_ERROR
+ ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
+ k, i);
+ return -EINVAL;
+ }
+ k++;
+ i++;
+ if (k == narrays)
+ break;
+ /* have one more to process, they come in pairs */
+ data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+ if (!radeon_check_offset(dev_priv, *data)) {
+ DRM_ERROR
+ ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
+ k, i);
+ return -EINVAL;
+ }
+ k++;
+ i++;
+ }
+ /* do the counts match what we expect ? */
+ if ((k != narrays) || (i != (count + 1))) {
+ DRM_ERROR
+ ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
+ k, i, narrays, count + 1);
+ return -EINVAL;
+ }
+
+ /* all clear, output packet */
+
+ BEGIN_RING(count + 2);
+ OUT_RING(header);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 1);
+ ADVANCE_RING();
+
+ return 0;
+}
+
+static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+ int count, ret;
+ RING_LOCALS;
+
+
+ count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
+
+ if (*cmd & 0x8000) {
+ u32 offset;
+ u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ if (*cmd1 & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+ | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+
+ u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+ offset = *cmd2 << 10;
+ ret = !radeon_check_offset(dev_priv, offset);
+ if (ret) {
+ DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
+ return -EINVAL;
+ }
+ }
+
+ if ((*cmd1 & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+ (*cmd1 & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+ u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
+ offset = *cmd3 << 10;
+ ret = !radeon_check_offset(dev_priv, offset);
+ if (ret) {
+ DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
+ return -EINVAL;
+ }
+
+ }
+ }
+
+ BEGIN_RING(count+2);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
+ ADVANCE_RING();
+
+ return 0;
+}
+
+static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+ u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ int count;
+ int expected_count;
+ RING_LOCALS;
+
+ count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
+
+ expected_count = *cmd1 >> 16;
+ if (!(*cmd1 & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
+ expected_count = (expected_count+1)/2;
+
+ if (count && count != expected_count) {
+ DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
+ count, expected_count);
+ return -EINVAL;
+ }
+
+ BEGIN_RING(count+2);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
+ ADVANCE_RING();
+
+ if (!count) {
+ drm_r300_cmd_header_t stack_header, *header;
+ u32 *cmd1, *cmd2, *cmd3;
+
+ if (drm_buffer_unprocessed(cmdbuf->buffer)
+ < 4*4 + sizeof(stack_header)) {
+ DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
+ return -EINVAL;
+ }
+
+ header = drm_buffer_read_object(cmdbuf->buffer,
+ sizeof(stack_header), &stack_header);
+
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+ cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+ cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
+
+ if (header->header.cmd_type != R300_CMD_PACKET3 ||
+ header->packet3.packet != R300_CMD_PACKET3_RAW ||
+ *cmd != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
+ DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
+ return -EINVAL;
+ }
+
+ if ((*cmd1 & 0x8000ffff) != 0x80000810) {
+ DRM_ERROR("Invalid indx_buffer reg address %08X\n",
+ *cmd1);
+ return -EINVAL;
+ }
+ if (!radeon_check_offset(dev_priv, *cmd2)) {
+ DRM_ERROR("Invalid indx_buffer offset is %08X\n",
+ *cmd2);
+ return -EINVAL;
+ }
+ if (*cmd3 != expected_count) {
+ DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
+ *cmd3, expected_count);
+ return -EINVAL;
+ }
+
+ BEGIN_RING(4);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, 4);
+ ADVANCE_RING();
+ }
+
+ return 0;
+}
+
+static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ u32 *header;
+ int count;
+ RING_LOCALS;
+
+ if (4 > drm_buffer_unprocessed(cmdbuf->buffer))
+ return -EINVAL;
+
+ /* Fixme !! This simply emits a packet without much checking.
+ We need to be smarter. */
+
+ /* obtain first word - actual packet3 header */
+ header = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+
+ /* Is it packet 3 ? */
+ if ((*header >> 30) != 0x3) {
+ DRM_ERROR("Not a packet3 header (0x%08x)\n", *header);
+ return -EINVAL;
+ }
+
+ count = (*header >> 16) & 0x3fff;
+
+ /* Check again now that we know how much data to expect */
+ if ((count + 2) * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) {
+ DRM_ERROR
+ ("Expected packet3 of length %d but have only %d bytes left\n",
+ (count + 2) * 4, drm_buffer_unprocessed(cmdbuf->buffer));
+ return -EINVAL;
+ }
+
+ /* Is it a packet type we know about ? */
+ switch (*header & 0xff00) {
+ case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
+ return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, *header);
+
+ case RADEON_CNTL_BITBLT_MULTI:
+ return r300_emit_bitblt_multi(dev_priv, cmdbuf);
+
+ case RADEON_CP_INDX_BUFFER:
+ DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
+ return -EINVAL;
+ case RADEON_CP_3D_DRAW_IMMD_2:
+ /* triggers drawing using in-packet vertex data */
+ case RADEON_CP_3D_DRAW_VBUF_2:
+ /* triggers drawing of vertex buffers setup elsewhere */
+ dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
+ RADEON_PURGE_EMITED);
+ break;
+ case RADEON_CP_3D_DRAW_INDX_2:
+ /* triggers drawing using indices to vertex buffer */
+ /* whenever we send vertex we clear flush & purge */
+ dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
+ RADEON_PURGE_EMITED);
+ return r300_emit_draw_indx_2(dev_priv, cmdbuf);
+ case RADEON_WAIT_FOR_IDLE:
+ case RADEON_CP_NOP:
+ /* these packets are safe */
+ break;
+ default:
+ DRM_ERROR("Unknown packet3 header (0x%08x)\n", *header);
+ return -EINVAL;
+ }
+
+ BEGIN_RING(count + 2);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
+ ADVANCE_RING();
+
+ return 0;
+}
+
+/**
+ * Emit a rendering packet3 from userspace.
+ * Called by r300_do_cp_cmdbuf.
+ */
+static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf,
+ drm_r300_cmd_header_t header)
+{
+ int n;
+ int ret;
+ int orig_iter = cmdbuf->buffer->iterator;
+
+ /* This is a do-while-loop so that we run the interior at least once,
+ * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
+ */
+ n = 0;
+ do {
+ if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
+ ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
+ if (ret)
+ return ret;
+
+ cmdbuf->buffer->iterator = orig_iter;
+ }
+
+ switch (header.packet3.packet) {
+ case R300_CMD_PACKET3_CLEAR:
+ DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
+ ret = r300_emit_clear(dev_priv, cmdbuf);
+ if (ret) {
+ DRM_ERROR("r300_emit_clear failed\n");
+ return ret;
+ }
+ break;
+
+ case R300_CMD_PACKET3_RAW:
+ DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
+ ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
+ if (ret) {
+ DRM_ERROR("r300_emit_raw_packet3 failed\n");
+ return ret;
+ }
+ break;
+
+ default:
+ DRM_ERROR("bad packet3 type %i at byte %d\n",
+ header.packet3.packet,
+ cmdbuf->buffer->iterator - (int)sizeof(header));
+ return -EINVAL;
+ }
+
+ n += R300_SIMULTANEOUS_CLIPRECTS;
+ } while (n < cmdbuf->nbox);
+
+ return 0;
+}
+
+/* Some of the R300 chips seem to be extremely touchy about the two registers
+ * that are configured in r300_pacify.
+ * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
+ * sends a command buffer that contains only state setting commands and a
+ * vertex program/parameter upload sequence, this will eventually lead to a
+ * lockup, unless the sequence is bracketed by calls to r300_pacify.
+ * So we should take great care to *always* call r300_pacify before
+ * *anything* 3D related, and again afterwards. This is what the
+ * call bracket in r300_do_cp_cmdbuf is for.
+ */
+
+/**
+ * Emit the sequence to pacify R300.
+ */
+static void r300_pacify(drm_radeon_private_t *dev_priv)
+{
+ uint32_t cache_z, cache_3d, cache_2d;
+ RING_LOCALS;
+
+ cache_z = R300_ZC_FLUSH;
+ cache_2d = R300_RB2D_DC_FLUSH;
+ cache_3d = R300_RB3D_DC_FLUSH;
+ if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
+ /* we can purge, primitive where draw since last purge */
+ cache_z |= R300_ZC_FREE;
+ cache_2d |= R300_RB2D_DC_FREE;
+ cache_3d |= R300_RB3D_DC_FREE;
+ }
+
+ /* flush & purge zbuffer */
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
+ OUT_RING(cache_z);
+ ADVANCE_RING();
+ /* flush & purge 3d */
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ OUT_RING(cache_3d);
+ ADVANCE_RING();
+ /* flush & purge texture */
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
+ OUT_RING(0);
+ ADVANCE_RING();
+ /* FIXME: is this one really needed ? */
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
+ OUT_RING(0);
+ ADVANCE_RING();
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+ OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
+ ADVANCE_RING();
+ /* flush & purge 2d through E2 as RB2D will trigger lockup */
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
+ OUT_RING(cache_2d);
+ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+ OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
+ RADEON_WAIT_HOST_IDLECLEAN);
+ ADVANCE_RING();
+ /* set flush & purge flags */
+ dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
+}
+
+/**
+ * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
+ * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
+ * be careful about how this function is called.
+ */
+static void r300_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
+{
+ drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
+
+ buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
+ buf->pending = 1;
+ buf->used = 0;
+}
+
+static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
+ drm_r300_cmd_header_t header)
+{
+ u32 wait_until;
+ RING_LOCALS;
+
+ if (!header.wait.flags)
+ return;
+
+ wait_until = 0;
+
+ switch(header.wait.flags) {
+ case R300_WAIT_2D:
+ wait_until = RADEON_WAIT_2D_IDLE;
+ break;
+ case R300_WAIT_3D:
+ wait_until = RADEON_WAIT_3D_IDLE;
+ break;
+ case R300_NEW_WAIT_2D_3D:
+ wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
+ break;
+ case R300_NEW_WAIT_2D_2D_CLEAN:
+ wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
+ break;
+ case R300_NEW_WAIT_3D_3D_CLEAN:
+ wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
+ break;
+ case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
+ wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
+ wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
+ break;
+ default:
+ return;
+ }
+
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+ OUT_RING(wait_until);
+ ADVANCE_RING();
+}
+
+static int r300_scratch(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf,
+ drm_r300_cmd_header_t header)
+{
+ u32 *ref_age_base;
+ u32 i, *buf_idx, h_pending;
+ u64 *ptr_addr;
+ u64 stack_ptr_addr;
+ RING_LOCALS;
+
+ if (drm_buffer_unprocessed(cmdbuf->buffer) <
+ (sizeof(u64) + header.scratch.n_bufs * sizeof(*buf_idx))) {
+ return -EINVAL;
+ }
+
+ if (header.scratch.reg >= 5) {
+ return -EINVAL;
+ }
+
+ dev_priv->scratch_ages[header.scratch.reg]++;
+
+ ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
+ sizeof(stack_ptr_addr), &stack_ptr_addr);
+ ref_age_base = (u32 *)(unsigned long)get_unaligned(ptr_addr);
+
+ for (i=0; i < header.scratch.n_bufs; i++) {
+ buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+ *buf_idx *= 2; /* 8 bytes per buf */
+
+ if (DRM_COPY_TO_USER(ref_age_base + *buf_idx,
+ &dev_priv->scratch_ages[header.scratch.reg],
+ sizeof(u32)))
+ return -EINVAL;
+
+ if (DRM_COPY_FROM_USER(&h_pending,
+ ref_age_base + *buf_idx + 1,
+ sizeof(u32)))
+ return -EINVAL;
+
+ if (h_pending == 0)
+ return -EINVAL;
+
+ h_pending--;
+
+ if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1,
+ &h_pending,
+ sizeof(u32)))
+ return -EINVAL;
+
+ drm_buffer_advance(cmdbuf->buffer, sizeof(*buf_idx));
+ }
+
+ BEGIN_RING(2);
+ OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
+ OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
+ ADVANCE_RING();
+
+ return 0;
+}
+
+/**
+ * Uploads user-supplied vertex program instructions or parameters onto
+ * the graphics card.
+ * Called by r300_do_cp_cmdbuf.
+ */
+static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf,
+ drm_r300_cmd_header_t header)
+{
+ int sz;
+ int addr;
+ int type;
+ int isclamp;
+ int stride;
+ RING_LOCALS;
+
+ sz = header.r500fp.count;
+ /* address is 9 bits 0 - 8, bit 1 of flags is part of address */
+ addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
+
+ type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
+ isclamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
+
+ addr |= (type << 16);
+ addr |= (isclamp << 17);
+
+ stride = type ? 4 : 6;
+
+ DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
+ if (!sz)
+ return 0;
+ if (sz * stride * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
+ return -EINVAL;
+
+ BEGIN_RING(3 + sz * stride);
+ OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
+ OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * stride);
+
+ ADVANCE_RING();
+
+ return 0;
+}
+
+
+/**
+ * Parses and validates a user-supplied command buffer and emits appropriate
+ * commands on the DMA ring buffer.
+ * Called by the ioctl handler function radeon_cp_cmdbuf.
+ */
+int r300_do_cp_cmdbuf(struct drm_device *dev,
+ struct drm_file *file_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf = NULL;
+ int emit_dispatch_age = 0;
+ int ret = 0;
+
+ DRM_DEBUG("\n");
+
+ /* pacify */
+ r300_pacify(dev_priv);
+
+ if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
+ ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
+ if (ret)
+ goto cleanup;
+ }
+
+ while (drm_buffer_unprocessed(cmdbuf->buffer)
+ >= sizeof(drm_r300_cmd_header_t)) {
+ int idx;
+ drm_r300_cmd_header_t *header, stack_header;
+
+ header = drm_buffer_read_object(cmdbuf->buffer,
+ sizeof(stack_header), &stack_header);
+
+ switch (header->header.cmd_type) {
+ case R300_CMD_PACKET0:
+ DRM_DEBUG("R300_CMD_PACKET0\n");
+ ret = r300_emit_packet0(dev_priv, cmdbuf, *header);
+ if (ret) {
+ DRM_ERROR("r300_emit_packet0 failed\n");
+ goto cleanup;
+ }
+ break;
+
+ case R300_CMD_VPU:
+ DRM_DEBUG("R300_CMD_VPU\n");
+ ret = r300_emit_vpu(dev_priv, cmdbuf, *header);
+ if (ret) {
+ DRM_ERROR("r300_emit_vpu failed\n");
+ goto cleanup;
+ }
+ break;
+
+ case R300_CMD_PACKET3:
+ DRM_DEBUG("R300_CMD_PACKET3\n");
+ ret = r300_emit_packet3(dev_priv, cmdbuf, *header);
+ if (ret) {
+ DRM_ERROR("r300_emit_packet3 failed\n");
+ goto cleanup;
+ }
+ break;
+
+ case R300_CMD_END3D:
+ DRM_DEBUG("R300_CMD_END3D\n");
+ /* TODO:
+ Ideally userspace driver should not need to issue this call,
+ i.e. the drm driver should issue it automatically and prevent
+ lockups.
+
+ In practice, we do not understand why this call is needed and what
+ it does (except for some vague guesses that it has to do with cache
+ coherence) and so the user space driver does it.
+
+ Once we are sure which uses prevent lockups the code could be moved
+ into the kernel and the userspace driver will not
+ need to use this command.
+
+ Note that issuing this command does not hurt anything
+ except, possibly, performance */
+ r300_pacify(dev_priv);
+ break;
+
+ case R300_CMD_CP_DELAY:
+ /* simple enough, we can do it here */
+ DRM_DEBUG("R300_CMD_CP_DELAY\n");
+ {
+ int i;
+ RING_LOCALS;
+
+ BEGIN_RING(header->delay.count);
+ for (i = 0; i < header->delay.count; i++)
+ OUT_RING(RADEON_CP_PACKET2);
+ ADVANCE_RING();
+ }
+ break;
+
+ case R300_CMD_DMA_DISCARD:
+ DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
+ idx = header->dma.buf_idx;
+ if (idx < 0 || idx >= dma->buf_count) {
+ DRM_ERROR("buffer index %d (of %d max)\n",
+ idx, dma->buf_count - 1);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ buf = dma->buflist[idx];
+ if (buf->file_priv != file_priv || buf->pending) {
+ DRM_ERROR("bad buffer %p %p %d\n",
+ buf->file_priv, file_priv,
+ buf->pending);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ emit_dispatch_age = 1;
+ r300_discard_buffer(dev, file_priv->master, buf);
+ break;
+
+ case R300_CMD_WAIT:
+ DRM_DEBUG("R300_CMD_WAIT\n");
+ r300_cmd_wait(dev_priv, *header);
+ break;
+
+ case R300_CMD_SCRATCH:
+ DRM_DEBUG("R300_CMD_SCRATCH\n");
+ ret = r300_scratch(dev_priv, cmdbuf, *header);
+ if (ret) {
+ DRM_ERROR("r300_scratch failed\n");
+ goto cleanup;
+ }
+ break;
+
+ case R300_CMD_R500FP:
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
+ DRM_ERROR("Calling r500 command on r300 card\n");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ DRM_DEBUG("R300_CMD_R500FP\n");
+ ret = r300_emit_r500fp(dev_priv, cmdbuf, *header);
+ if (ret) {
+ DRM_ERROR("r300_emit_r500fp failed\n");
+ goto cleanup;
+ }
+ break;
+ default:
+ DRM_ERROR("bad cmd_type %i at byte %d\n",
+ header->header.cmd_type,
+ cmdbuf->buffer->iterator - (int)sizeof(*header));
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ DRM_DEBUG("END\n");
+
+ cleanup:
+ r300_pacify(dev_priv);
+
+ /* We emit the vertex buffer age here, outside the pacifier "brackets"
+ * for two reasons:
+ * (1) This may coalesce multiple age emissions into a single one and
+ * (2) more importantly, some chips lock up hard when scratch registers
+ * are written inside the pacifier bracket.
+ */
+ if (emit_dispatch_age) {
+ RING_LOCALS;
+
+ /* Emit the vertex buffer age */
+ BEGIN_RING(2);
+ RADEON_DISPATCH_AGE(master_priv->sarea_priv->last_dispatch);
+ ADVANCE_RING();
+ }
+
+ COMMIT_RING();
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
new file mode 100644
index 0000000..00c0d2b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -0,0 +1,1789 @@
+/*
+ * Copyright 2005 Nicolai Haehnle et al.
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Nicolai Haehnle
+ * Jerome Glisse
+ */
+#ifndef _R300_REG_H_
+#define _R300_REG_H_
+
+#define R300_SURF_TILE_MACRO (1<<16)
+#define R300_SURF_TILE_MICRO (2<<16)
+#define R300_SURF_TILE_BOTH (3<<16)
+
+
+#define R300_MC_INIT_MISC_LAT_TIMER 0x180
+# define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0
+# define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT 4
+# define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT 8
+# define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT 12
+# define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT 16
+# define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT 20
+# define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT 24
+# define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT 28
+
+#define R300_MC_INIT_GFX_LAT_TIMER 0x154
+# define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT 0
+# define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT 4
+# define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT 8
+# define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT 12
+# define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT 16
+# define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT 20
+# define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT 24
+# define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT 28
+
+/*
+ * This file contains registers and constants for the R300. They have been
+ * found mostly by examining command buffers captured using glxtest, as well
+ * as by extrapolating some known registers and constants from the R200.
+ * I am fairly certain that they are correct unless stated otherwise
+ * in comments.
+ */
+
+#define R300_SE_VPORT_XSCALE 0x1D98
+#define R300_SE_VPORT_XOFFSET 0x1D9C
+#define R300_SE_VPORT_YSCALE 0x1DA0
+#define R300_SE_VPORT_YOFFSET 0x1DA4
+#define R300_SE_VPORT_ZSCALE 0x1DA8
+#define R300_SE_VPORT_ZOFFSET 0x1DAC
+
+
+/*
+ * Vertex Array Processing (VAP) Control
+ * Stolen from r200 code from Christoph Brill (It's a guess!)
+ */
+#define R300_VAP_CNTL 0x2080
+
+/* This register is written directly and also starts data section
+ * in many 3d CP_PACKET3's
+ */
+#define R300_VAP_VF_CNTL 0x2084
+# define R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT 0
+# define R300_VAP_VF_CNTL__PRIM_NONE (0<<0)
+# define R300_VAP_VF_CNTL__PRIM_POINTS (1<<0)
+# define R300_VAP_VF_CNTL__PRIM_LINES (2<<0)
+# define R300_VAP_VF_CNTL__PRIM_LINE_STRIP (3<<0)
+# define R300_VAP_VF_CNTL__PRIM_TRIANGLES (4<<0)
+# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN (5<<0)
+# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP (6<<0)
+# define R300_VAP_VF_CNTL__PRIM_LINE_LOOP (12<<0)
+# define R300_VAP_VF_CNTL__PRIM_QUADS (13<<0)
+# define R300_VAP_VF_CNTL__PRIM_QUAD_STRIP (14<<0)
+# define R300_VAP_VF_CNTL__PRIM_POLYGON (15<<0)
+
+# define R300_VAP_VF_CNTL__PRIM_WALK__SHIFT 4
+ /* State based - direct writes to registers trigger vertex
+ generation */
+# define R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED (0<<4)
+# define R300_VAP_VF_CNTL__PRIM_WALK_INDICES (1<<4)
+# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST (2<<4)
+# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED (3<<4)
+
+ /* I don't think I saw these three used.. */
+# define R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT 6
+# define R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT 9
+# define R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT 10
+
+ /* index size - when not set the indices are assumed to be 16 bit */
+# define R300_VAP_VF_CNTL__INDEX_SIZE_32bit (1<<11)
+ /* number of vertices */
+# define R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT 16
+
+/* BEGIN: Wild guesses */
+#define R300_VAP_OUTPUT_VTX_FMT_0 0x2090
+# define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT (1<<0)
+# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT (1<<1)
+# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2) /* GUESS */
+# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3) /* GUESS */
+# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4) /* GUESS */
+# define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
+
+#define R300_VAP_OUTPUT_VTX_FMT_1 0x2094
+ /* each of the following is 3 bits wide, specifies number
+ of components */
+# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
+# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
+# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
+# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
+# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
+# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
+# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
+# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
+/* END: Wild guesses */
+
+#define R300_SE_VTE_CNTL 0x20b0
+# define R300_VPORT_X_SCALE_ENA 0x00000001
+# define R300_VPORT_X_OFFSET_ENA 0x00000002
+# define R300_VPORT_Y_SCALE_ENA 0x00000004
+# define R300_VPORT_Y_OFFSET_ENA 0x00000008
+# define R300_VPORT_Z_SCALE_ENA 0x00000010
+# define R300_VPORT_Z_OFFSET_ENA 0x00000020
+# define R300_VTX_XY_FMT 0x00000100
+# define R300_VTX_Z_FMT 0x00000200
+# define R300_VTX_W0_FMT 0x00000400
+# define R300_VTX_W0_NORMALIZE 0x00000800
+# define R300_VTX_ST_DENORMALIZED 0x00001000
+
+/* BEGIN: Vertex data assembly - lots of uncertainties */
+
+/* gap */
+
+#define R300_VAP_CNTL_STATUS 0x2140
+# define R300_VC_NO_SWAP (0 << 0)
+# define R300_VC_16BIT_SWAP (1 << 0)
+# define R300_VC_32BIT_SWAP (2 << 0)
+# define R300_VAP_TCL_BYPASS (1 << 8)
+
+/* gap */
+
+/* Where do we get our vertex data?
+ *
+ * Vertex data either comes either from immediate mode registers or from
+ * vertex arrays.
+ * There appears to be no mixed mode (though we can force the pitch of
+ * vertex arrays to 0, effectively reusing the same element over and over
+ * again).
+ *
+ * Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
+ * if these registers influence vertex array processing.
+ *
+ * Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
+ *
+ * In both cases, vertex attributes are then passed through INPUT_ROUTE.
+ *
+ * Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
+ * into the vertex processor's input registers.
+ * The first word routes the first input, the second word the second, etc.
+ * The corresponding input is routed into the register with the given index.
+ * The list is ended by a word with INPUT_ROUTE_END set.
+ *
+ * Always set COMPONENTS_4 in immediate mode.
+ */
+
+#define R300_VAP_INPUT_ROUTE_0_0 0x2150
+# define R300_INPUT_ROUTE_COMPONENTS_1 (0 << 0)
+# define R300_INPUT_ROUTE_COMPONENTS_2 (1 << 0)
+# define R300_INPUT_ROUTE_COMPONENTS_3 (2 << 0)
+# define R300_INPUT_ROUTE_COMPONENTS_4 (3 << 0)
+# define R300_INPUT_ROUTE_COMPONENTS_RGBA (4 << 0) /* GUESS */
+# define R300_VAP_INPUT_ROUTE_IDX_SHIFT 8
+# define R300_VAP_INPUT_ROUTE_IDX_MASK (31 << 8) /* GUESS */
+# define R300_VAP_INPUT_ROUTE_END (1 << 13)
+# define R300_INPUT_ROUTE_IMMEDIATE_MODE (0 << 14) /* GUESS */
+# define R300_INPUT_ROUTE_FLOAT (1 << 14) /* GUESS */
+# define R300_INPUT_ROUTE_UNSIGNED_BYTE (2 << 14) /* GUESS */
+# define R300_INPUT_ROUTE_FLOAT_COLOR (3 << 14) /* GUESS */
+#define R300_VAP_INPUT_ROUTE_0_1 0x2154
+#define R300_VAP_INPUT_ROUTE_0_2 0x2158
+#define R300_VAP_INPUT_ROUTE_0_3 0x215C
+#define R300_VAP_INPUT_ROUTE_0_4 0x2160
+#define R300_VAP_INPUT_ROUTE_0_5 0x2164
+#define R300_VAP_INPUT_ROUTE_0_6 0x2168
+#define R300_VAP_INPUT_ROUTE_0_7 0x216C
+
+/* gap */
+
+/* Notes:
+ * - always set up to produce at least two attributes:
+ * if vertex program uses only position, fglrx will set normal, too
+ * - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal.
+ */
+#define R300_VAP_INPUT_CNTL_0 0x2180
+# define R300_INPUT_CNTL_0_COLOR 0x00000001
+#define R300_VAP_INPUT_CNTL_1 0x2184
+# define R300_INPUT_CNTL_POS 0x00000001
+# define R300_INPUT_CNTL_NORMAL 0x00000002
+# define R300_INPUT_CNTL_COLOR 0x00000004
+# define R300_INPUT_CNTL_TC0 0x00000400
+# define R300_INPUT_CNTL_TC1 0x00000800
+# define R300_INPUT_CNTL_TC2 0x00001000 /* GUESS */
+# define R300_INPUT_CNTL_TC3 0x00002000 /* GUESS */
+# define R300_INPUT_CNTL_TC4 0x00004000 /* GUESS */
+# define R300_INPUT_CNTL_TC5 0x00008000 /* GUESS */
+# define R300_INPUT_CNTL_TC6 0x00010000 /* GUESS */
+# define R300_INPUT_CNTL_TC7 0x00020000 /* GUESS */
+
+/* gap */
+
+/* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0
+ * are set to a swizzling bit pattern, other words are 0.
+ *
+ * In immediate mode, the pattern is always set to xyzw. In vertex array
+ * mode, the swizzling pattern is e.g. used to set zw components in texture
+ * coordinates with only tweo components.
+ */
+#define R300_VAP_INPUT_ROUTE_1_0 0x21E0
+# define R300_INPUT_ROUTE_SELECT_X 0
+# define R300_INPUT_ROUTE_SELECT_Y 1
+# define R300_INPUT_ROUTE_SELECT_Z 2
+# define R300_INPUT_ROUTE_SELECT_W 3
+# define R300_INPUT_ROUTE_SELECT_ZERO 4
+# define R300_INPUT_ROUTE_SELECT_ONE 5
+# define R300_INPUT_ROUTE_SELECT_MASK 7
+# define R300_INPUT_ROUTE_X_SHIFT 0
+# define R300_INPUT_ROUTE_Y_SHIFT 3
+# define R300_INPUT_ROUTE_Z_SHIFT 6
+# define R300_INPUT_ROUTE_W_SHIFT 9
+# define R300_INPUT_ROUTE_ENABLE (15 << 12)
+#define R300_VAP_INPUT_ROUTE_1_1 0x21E4
+#define R300_VAP_INPUT_ROUTE_1_2 0x21E8
+#define R300_VAP_INPUT_ROUTE_1_3 0x21EC
+#define R300_VAP_INPUT_ROUTE_1_4 0x21F0
+#define R300_VAP_INPUT_ROUTE_1_5 0x21F4
+#define R300_VAP_INPUT_ROUTE_1_6 0x21F8
+#define R300_VAP_INPUT_ROUTE_1_7 0x21FC
+
+/* END: Vertex data assembly */
+
+/* gap */
+
+/* BEGIN: Upload vertex program and data */
+
+/*
+ * The programmable vertex shader unit has a memory bank of unknown size
+ * that can be written to in 16 byte units by writing the address into
+ * UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
+ *
+ * Pointers into the memory bank are always in multiples of 16 bytes.
+ *
+ * The memory bank is divided into areas with fixed meaning.
+ *
+ * Starting at address UPLOAD_PROGRAM: Vertex program instructions.
+ * Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
+ * whereas the difference between known addresses suggests size 512.
+ *
+ * Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
+ * Native reported limits and the VPI layout suggest size 256, whereas
+ * difference between known addresses suggests size 512.
+ *
+ * At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
+ * floating point pointsize. The exact purpose of this state is uncertain,
+ * as there is also the R300_RE_POINTSIZE register.
+ *
+ * Multiple vertex programs and parameter sets can be loaded at once,
+ * which could explain the size discrepancy.
+ */
+#define R300_VAP_PVS_UPLOAD_ADDRESS 0x2200
+# define R300_PVS_UPLOAD_PROGRAM 0x00000000
+# define R300_PVS_UPLOAD_PARAMETERS 0x00000200
+# define R300_PVS_UPLOAD_POINTSIZE 0x00000406
+
+/* gap */
+
+#define R300_VAP_PVS_UPLOAD_DATA 0x2208
+
+/* END: Upload vertex program and data */
+
+/* gap */
+
+/* I do not know the purpose of this register. However, I do know that
+ * it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
+ * for normal rendering.
+ */
+#define R300_VAP_UNKNOWN_221C 0x221C
+# define R300_221C_NORMAL 0x00000000
+# define R300_221C_CLEAR 0x0001C000
+
+/* These seem to be per-pixel and per-vertex X and Y clipping planes. The first
+ * plane is per-pixel and the second plane is per-vertex.
+ *
+ * This was determined by experimentation alone but I believe it is correct.
+ *
+ * These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest.
+ */
+#define R300_VAP_CLIP_X_0 0x2220
+#define R300_VAP_CLIP_X_1 0x2224
+#define R300_VAP_CLIP_Y_0 0x2228
+#define R300_VAP_CLIP_Y_1 0x2230
+
+/* gap */
+
+/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
+ * rendering commands and overwriting vertex program parameters.
+ * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
+ * avoids bugs caused by still running shaders reading bad data from memory.
+ */
+#define R300_VAP_PVS_STATE_FLUSH_REG 0x2284
+
+/* Absolutely no clue what this register is about. */
+#define R300_VAP_UNKNOWN_2288 0x2288
+# define R300_2288_R300 0x00750000 /* -- nh */
+# define R300_2288_RV350 0x0000FFFF /* -- Vladimir */
+
+/* gap */
+
+/* Addresses are relative to the vertex program instruction area of the
+ * memory bank. PROGRAM_END points to the last instruction of the active
+ * program
+ *
+ * The meaning of the two UNKNOWN fields is obviously not known. However,
+ * experiments so far have shown that both *must* point to an instruction
+ * inside the vertex program, otherwise the GPU locks up.
+ *
+ * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
+ * R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to
+ * position takes place.
+ *
+ * Most likely this is used to ignore rest of the program in cases
+ * where group of verts arent visible. For some reason this "section"
+ * is sometimes accepted other instruction that have no relationship with
+ * position calculations.
+ */
+#define R300_VAP_PVS_CNTL_1 0x22D0
+# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0
+# define R300_PVS_CNTL_1_POS_END_SHIFT 10
+# define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20
+/* Addresses are relative the the vertex program parameters area. */
+#define R300_VAP_PVS_CNTL_2 0x22D4
+# define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
+# define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16
+#define R300_VAP_PVS_CNTL_3 0x22D8
+# define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10
+# define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0
+
+/* The entire range from 0x2300 to 0x2AC inclusive seems to be used for
+ * immediate vertices
+ */
+#define R300_VAP_VTX_COLOR_R 0x2464
+#define R300_VAP_VTX_COLOR_G 0x2468
+#define R300_VAP_VTX_COLOR_B 0x246C
+#define R300_VAP_VTX_POS_0_X_1 0x2490 /* used for glVertex2*() */
+#define R300_VAP_VTX_POS_0_Y_1 0x2494
+#define R300_VAP_VTX_COLOR_PKD 0x249C /* RGBA */
+#define R300_VAP_VTX_POS_0_X_2 0x24A0 /* used for glVertex3*() */
+#define R300_VAP_VTX_POS_0_Y_2 0x24A4
+#define R300_VAP_VTX_POS_0_Z_2 0x24A8
+/* write 0 to indicate end of packet? */
+#define R300_VAP_VTX_END_OF_PKT 0x24AC
+
+/* gap */
+
+/* These are values from r300_reg/r300_reg.h - they are known to be correct
+ * and are here so we can use one register file instead of several
+ * - Vladimir
+ */
+#define R300_GB_VAP_RASTER_VTX_FMT_0 0x4000
+# define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT (1<<0)
+# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT (1<<1)
+# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT (1<<2)
+# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT (1<<3)
+# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT (1<<4)
+# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE (0xf<<5)
+# define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT (0x1<<16)
+
+#define R300_GB_VAP_RASTER_VTX_FMT_1 0x4004
+ /* each of the following is 3 bits wide, specifies number
+ of components */
+# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
+# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
+# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
+# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
+# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
+# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
+# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
+# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
+
+/* UNK30 seems to enables point to quad transformation on textures
+ * (or something closely related to that).
+ * This bit is rather fatal at the time being due to lackings at pixel
+ * shader side
+ */
+#define R300_GB_ENABLE 0x4008
+# define R300_GB_POINT_STUFF_ENABLE (1<<0)
+# define R300_GB_LINE_STUFF_ENABLE (1<<1)
+# define R300_GB_TRIANGLE_STUFF_ENABLE (1<<2)
+# define R300_GB_STENCIL_AUTO_ENABLE (1<<4)
+# define R300_GB_UNK31 (1<<31)
+ /* each of the following is 2 bits wide */
+#define R300_GB_TEX_REPLICATE 0
+#define R300_GB_TEX_ST 1
+#define R300_GB_TEX_STR 2
+# define R300_GB_TEX0_SOURCE_SHIFT 16
+# define R300_GB_TEX1_SOURCE_SHIFT 18
+# define R300_GB_TEX2_SOURCE_SHIFT 20
+# define R300_GB_TEX3_SOURCE_SHIFT 22
+# define R300_GB_TEX4_SOURCE_SHIFT 24
+# define R300_GB_TEX5_SOURCE_SHIFT 26
+# define R300_GB_TEX6_SOURCE_SHIFT 28
+# define R300_GB_TEX7_SOURCE_SHIFT 30
+
+/* MSPOS - positions for multisample antialiasing (?) */
+#define R300_GB_MSPOS0 0x4010
+ /* shifts - each of the fields is 4 bits */
+# define R300_GB_MSPOS0__MS_X0_SHIFT 0
+# define R300_GB_MSPOS0__MS_Y0_SHIFT 4
+# define R300_GB_MSPOS0__MS_X1_SHIFT 8
+# define R300_GB_MSPOS0__MS_Y1_SHIFT 12
+# define R300_GB_MSPOS0__MS_X2_SHIFT 16
+# define R300_GB_MSPOS0__MS_Y2_SHIFT 20
+# define R300_GB_MSPOS0__MSBD0_Y 24
+# define R300_GB_MSPOS0__MSBD0_X 28
+
+#define R300_GB_MSPOS1 0x4014
+# define R300_GB_MSPOS1__MS_X3_SHIFT 0
+# define R300_GB_MSPOS1__MS_Y3_SHIFT 4
+# define R300_GB_MSPOS1__MS_X4_SHIFT 8
+# define R300_GB_MSPOS1__MS_Y4_SHIFT 12
+# define R300_GB_MSPOS1__MS_X5_SHIFT 16
+# define R300_GB_MSPOS1__MS_Y5_SHIFT 20
+# define R300_GB_MSPOS1__MSBD1 24
+
+
+#define R300_GB_TILE_CONFIG 0x4018
+# define R300_GB_TILE_ENABLE (1<<0)
+# define R300_GB_TILE_PIPE_COUNT_RV300 0
+# define R300_GB_TILE_PIPE_COUNT_R300 (3<<1)
+# define R300_GB_TILE_PIPE_COUNT_R420 (7<<1)
+# define R300_GB_TILE_PIPE_COUNT_RV410 (3<<1)
+# define R300_GB_TILE_SIZE_8 0
+# define R300_GB_TILE_SIZE_16 (1<<4)
+# define R300_GB_TILE_SIZE_32 (2<<4)
+# define R300_GB_SUPER_SIZE_1 (0<<6)
+# define R300_GB_SUPER_SIZE_2 (1<<6)
+# define R300_GB_SUPER_SIZE_4 (2<<6)
+# define R300_GB_SUPER_SIZE_8 (3<<6)
+# define R300_GB_SUPER_SIZE_16 (4<<6)
+# define R300_GB_SUPER_SIZE_32 (5<<6)
+# define R300_GB_SUPER_SIZE_64 (6<<6)
+# define R300_GB_SUPER_SIZE_128 (7<<6)
+# define R300_GB_SUPER_X_SHIFT 9 /* 3 bits wide */
+# define R300_GB_SUPER_Y_SHIFT 12 /* 3 bits wide */
+# define R300_GB_SUPER_TILE_A 0
+# define R300_GB_SUPER_TILE_B (1<<15)
+# define R300_GB_SUBPIXEL_1_12 0
+# define R300_GB_SUBPIXEL_1_16 (1<<16)
+
+#define R300_GB_FIFO_SIZE 0x4024
+ /* each of the following is 2 bits wide */
+#define R300_GB_FIFO_SIZE_32 0
+#define R300_GB_FIFO_SIZE_64 1
+#define R300_GB_FIFO_SIZE_128 2
+#define R300_GB_FIFO_SIZE_256 3
+# define R300_SC_IFIFO_SIZE_SHIFT 0
+# define R300_SC_TZFIFO_SIZE_SHIFT 2
+# define R300_SC_BFIFO_SIZE_SHIFT 4
+
+# define R300_US_OFIFO_SIZE_SHIFT 12
+# define R300_US_WFIFO_SIZE_SHIFT 14
+ /* the following use the same constants as above, but meaning is
+ is times 2 (i.e. instead of 32 words it means 64 */
+# define R300_RS_TFIFO_SIZE_SHIFT 6
+# define R300_RS_CFIFO_SIZE_SHIFT 8
+# define R300_US_RAM_SIZE_SHIFT 10
+ /* watermarks, 3 bits wide */
+# define R300_RS_HIGHWATER_COL_SHIFT 16
+# define R300_RS_HIGHWATER_TEX_SHIFT 19
+# define R300_OFIFO_HIGHWATER_SHIFT 22 /* two bits only */
+# define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT 24
+
+#define R300_GB_SELECT 0x401C
+# define R300_GB_FOG_SELECT_C0A 0
+# define R300_GB_FOG_SELECT_C1A 1
+# define R300_GB_FOG_SELECT_C2A 2
+# define R300_GB_FOG_SELECT_C3A 3
+# define R300_GB_FOG_SELECT_1_1_W 4
+# define R300_GB_FOG_SELECT_Z 5
+# define R300_GB_DEPTH_SELECT_Z 0
+# define R300_GB_DEPTH_SELECT_1_1_W (1<<3)
+# define R300_GB_W_SELECT_1_W 0
+# define R300_GB_W_SELECT_1 (1<<4)
+
+#define R300_GB_AA_CONFIG 0x4020
+# define R300_AA_DISABLE 0x00
+# define R300_AA_ENABLE 0x01
+# define R300_AA_SUBSAMPLES_2 0
+# define R300_AA_SUBSAMPLES_3 (1<<1)
+# define R300_AA_SUBSAMPLES_4 (2<<1)
+# define R300_AA_SUBSAMPLES_6 (3<<1)
+
+/* gap */
+
+/* Zero to flush caches. */
+#define R300_TX_INVALTAGS 0x4100
+#define R300_TX_FLUSH 0x0
+
+/* The upper enable bits are guessed, based on fglrx reported limits. */
+#define R300_TX_ENABLE 0x4104
+# define R300_TX_ENABLE_0 (1 << 0)
+# define R300_TX_ENABLE_1 (1 << 1)
+# define R300_TX_ENABLE_2 (1 << 2)
+# define R300_TX_ENABLE_3 (1 << 3)
+# define R300_TX_ENABLE_4 (1 << 4)
+# define R300_TX_ENABLE_5 (1 << 5)
+# define R300_TX_ENABLE_6 (1 << 6)
+# define R300_TX_ENABLE_7 (1 << 7)
+# define R300_TX_ENABLE_8 (1 << 8)
+# define R300_TX_ENABLE_9 (1 << 9)
+# define R300_TX_ENABLE_10 (1 << 10)
+# define R300_TX_ENABLE_11 (1 << 11)
+# define R300_TX_ENABLE_12 (1 << 12)
+# define R300_TX_ENABLE_13 (1 << 13)
+# define R300_TX_ENABLE_14 (1 << 14)
+# define R300_TX_ENABLE_15 (1 << 15)
+
+/* The pointsize is given in multiples of 6. The pointsize can be
+ * enormous: Clear() renders a single point that fills the entire
+ * framebuffer.
+ */
+#define R300_RE_POINTSIZE 0x421C
+# define R300_POINTSIZE_Y_SHIFT 0
+# define R300_POINTSIZE_Y_MASK (0xFFFF << 0) /* GUESS */
+# define R300_POINTSIZE_X_SHIFT 16
+# define R300_POINTSIZE_X_MASK (0xFFFF << 16) /* GUESS */
+# define R300_POINTSIZE_MAX (R300_POINTSIZE_Y_MASK / 6)
+
+/* The line width is given in multiples of 6.
+ * In default mode lines are classified as vertical lines.
+ * HO: horizontal
+ * VE: vertical or horizontal
+ * HO & VE: no classification
+ */
+#define R300_RE_LINE_CNT 0x4234
+# define R300_LINESIZE_SHIFT 0
+# define R300_LINESIZE_MASK (0xFFFF << 0) /* GUESS */
+# define R300_LINESIZE_MAX (R300_LINESIZE_MASK / 6)
+# define R300_LINE_CNT_HO (1 << 16)
+# define R300_LINE_CNT_VE (1 << 17)
+
+/* Some sort of scale or clamp value for texcoordless textures. */
+#define R300_RE_UNK4238 0x4238
+
+/* Something shade related */
+#define R300_RE_SHADE 0x4274
+
+#define R300_RE_SHADE_MODEL 0x4278
+# define R300_RE_SHADE_MODEL_SMOOTH 0x3aaaa
+# define R300_RE_SHADE_MODEL_FLAT 0x39595
+
+/* Dangerous */
+#define R300_RE_POLYGON_MODE 0x4288
+# define R300_PM_ENABLED (1 << 0)
+# define R300_PM_FRONT_POINT (0 << 0)
+# define R300_PM_BACK_POINT (0 << 0)
+# define R300_PM_FRONT_LINE (1 << 4)
+# define R300_PM_FRONT_FILL (1 << 5)
+# define R300_PM_BACK_LINE (1 << 7)
+# define R300_PM_BACK_FILL (1 << 8)
+
+/* Fog parameters */
+#define R300_RE_FOG_SCALE 0x4294
+#define R300_RE_FOG_START 0x4298
+
+/* Not sure why there are duplicate of factor and constant values.
+ * My best guess so far is that there are separate zbiases for test and write.
+ * Ordering might be wrong.
+ * Some of the tests indicate that fgl has a fallback implementation of zbias
+ * via pixel shaders.
+ */
+#define R300_RE_ZBIAS_CNTL 0x42A0 /* GUESS */
+#define R300_RE_ZBIAS_T_FACTOR 0x42A4
+#define R300_RE_ZBIAS_T_CONSTANT 0x42A8
+#define R300_RE_ZBIAS_W_FACTOR 0x42AC
+#define R300_RE_ZBIAS_W_CONSTANT 0x42B0
+
+/* This register needs to be set to (1<<1) for RV350 to correctly
+ * perform depth test (see --vb-triangles in r300_demo)
+ * Don't know about other chips. - Vladimir
+ * This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
+ * My guess is that there are two bits for each zbias primitive
+ * (FILL, LINE, POINT).
+ * One to enable depth test and one for depth write.
+ * Yet this doesn't explain why depth writes work ...
+ */
+#define R300_RE_OCCLUSION_CNTL 0x42B4
+# define R300_OCCLUSION_ON (1<<1)
+
+#define R300_RE_CULL_CNTL 0x42B8
+# define R300_CULL_FRONT (1 << 0)
+# define R300_CULL_BACK (1 << 1)
+# define R300_FRONT_FACE_CCW (0 << 2)
+# define R300_FRONT_FACE_CW (1 << 2)
+
+
+/* BEGIN: Rasterization / Interpolators - many guesses */
+
+/* 0_UNKNOWN_18 has always been set except for clear operations.
+ * TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
+ * on the vertex program, *not* the fragment program)
+ */
+#define R300_RS_CNTL_0 0x4300
+# define R300_RS_CNTL_TC_CNT_SHIFT 2
+# define R300_RS_CNTL_TC_CNT_MASK (7 << 2)
+ /* number of color interpolators used */
+# define R300_RS_CNTL_CI_CNT_SHIFT 7
+# define R300_RS_CNTL_0_UNKNOWN_18 (1 << 18)
+ /* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n
+ register. */
+#define R300_RS_CNTL_1 0x4304
+
+/* gap */
+
+/* Only used for texture coordinates.
+ * Use the source field to route texture coordinate input from the
+ * vertex program to the desired interpolator. Note that the source
+ * field is relative to the outputs the vertex program *actually*
+ * writes. If a vertex program only writes texcoord[1], this will
+ * be source index 0.
+ * Set INTERP_USED on all interpolators that produce data used by
+ * the fragment program. INTERP_USED looks like a swizzling mask,
+ * but I haven't seen it used that way.
+ *
+ * Note: The _UNKNOWN constants are always set in their respective
+ * register. I don't know if this is necessary.
+ */
+#define R300_RS_INTERP_0 0x4310
+#define R300_RS_INTERP_1 0x4314
+# define R300_RS_INTERP_1_UNKNOWN 0x40
+#define R300_RS_INTERP_2 0x4318
+# define R300_RS_INTERP_2_UNKNOWN 0x80
+#define R300_RS_INTERP_3 0x431C
+# define R300_RS_INTERP_3_UNKNOWN 0xC0
+#define R300_RS_INTERP_4 0x4320
+#define R300_RS_INTERP_5 0x4324
+#define R300_RS_INTERP_6 0x4328
+#define R300_RS_INTERP_7 0x432C
+# define R300_RS_INTERP_SRC_SHIFT 2
+# define R300_RS_INTERP_SRC_MASK (7 << 2)
+# define R300_RS_INTERP_USED 0x00D10000
+
+/* These DWORDs control how vertex data is routed into fragment program
+ * registers, after interpolators.
+ */
+#define R300_RS_ROUTE_0 0x4330
+#define R300_RS_ROUTE_1 0x4334
+#define R300_RS_ROUTE_2 0x4338
+#define R300_RS_ROUTE_3 0x433C /* GUESS */
+#define R300_RS_ROUTE_4 0x4340 /* GUESS */
+#define R300_RS_ROUTE_5 0x4344 /* GUESS */
+#define R300_RS_ROUTE_6 0x4348 /* GUESS */
+#define R300_RS_ROUTE_7 0x434C /* GUESS */
+# define R300_RS_ROUTE_SOURCE_INTERP_0 0
+# define R300_RS_ROUTE_SOURCE_INTERP_1 1
+# define R300_RS_ROUTE_SOURCE_INTERP_2 2
+# define R300_RS_ROUTE_SOURCE_INTERP_3 3
+# define R300_RS_ROUTE_SOURCE_INTERP_4 4
+# define R300_RS_ROUTE_SOURCE_INTERP_5 5 /* GUESS */
+# define R300_RS_ROUTE_SOURCE_INTERP_6 6 /* GUESS */
+# define R300_RS_ROUTE_SOURCE_INTERP_7 7 /* GUESS */
+# define R300_RS_ROUTE_ENABLE (1 << 3) /* GUESS */
+# define R300_RS_ROUTE_DEST_SHIFT 6
+# define R300_RS_ROUTE_DEST_MASK (31 << 6) /* GUESS */
+
+/* Special handling for color: When the fragment program uses color,
+ * the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
+ * color register index.
+ *
+ * Apperently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any
+ * R300_RS_ROUTE_0_COLOR_DEST value; this setup is used for clearing the state.
+ * See r300_ioctl.c:r300EmitClearState. I'm not sure if this setup is strictly
+ * correct or not. - Oliver.
+ */
+# define R300_RS_ROUTE_0_COLOR (1 << 14)
+# define R300_RS_ROUTE_0_COLOR_DEST_SHIFT 17
+# define R300_RS_ROUTE_0_COLOR_DEST_MASK (31 << 17) /* GUESS */
+/* As above, but for secondary color */
+# define R300_RS_ROUTE_1_COLOR1 (1 << 14)
+# define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17
+# define R300_RS_ROUTE_1_COLOR1_DEST_MASK (31 << 17)
+# define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11)
+/* END: Rasterization / Interpolators - many guesses */
+
+/* Hierarchical Z Enable */
+#define R300_SC_HYPERZ 0x43a4
+# define R300_SC_HYPERZ_DISABLE (0 << 0)
+# define R300_SC_HYPERZ_ENABLE (1 << 0)
+# define R300_SC_HYPERZ_MIN (0 << 1)
+# define R300_SC_HYPERZ_MAX (1 << 1)
+# define R300_SC_HYPERZ_ADJ_256 (0 << 2)
+# define R300_SC_HYPERZ_ADJ_128 (1 << 2)
+# define R300_SC_HYPERZ_ADJ_64 (2 << 2)
+# define R300_SC_HYPERZ_ADJ_32 (3 << 2)
+# define R300_SC_HYPERZ_ADJ_16 (4 << 2)
+# define R300_SC_HYPERZ_ADJ_8 (5 << 2)
+# define R300_SC_HYPERZ_ADJ_4 (6 << 2)
+# define R300_SC_HYPERZ_ADJ_2 (7 << 2)
+# define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5)
+# define R300_SC_HYPERZ_HZ_Z0MIN (1 << 5)
+# define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6)
+# define R300_SC_HYPERZ_HZ_Z0MAX (1 << 6)
+
+#define R300_SC_EDGERULE 0x43a8
+
+/* BEGIN: Scissors and cliprects */
+
+/* There are four clipping rectangles. Their corner coordinates are inclusive.
+ * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
+ * on whether the pixel is inside cliprects 0-3, respectively. For example,
+ * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
+ * the number 3 (binary 0011).
+ * Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
+ * the pixel is rasterized.
+ *
+ * In addition to this, there is a scissors rectangle. Only pixels inside the
+ * scissors rectangle are drawn. (coordinates are inclusive)
+ *
+ * For some reason, the top-left corner of the framebuffer is at (1440, 1440)
+ * for the purpose of clipping and scissors.
+ */
+#define R300_RE_CLIPRECT_TL_0 0x43B0
+#define R300_RE_CLIPRECT_BR_0 0x43B4
+#define R300_RE_CLIPRECT_TL_1 0x43B8
+#define R300_RE_CLIPRECT_BR_1 0x43BC
+#define R300_RE_CLIPRECT_TL_2 0x43C0
+#define R300_RE_CLIPRECT_BR_2 0x43C4
+#define R300_RE_CLIPRECT_TL_3 0x43C8
+#define R300_RE_CLIPRECT_BR_3 0x43CC
+# define R300_CLIPRECT_OFFSET 1440
+# define R300_CLIPRECT_MASK 0x1FFF
+# define R300_CLIPRECT_X_SHIFT 0
+# define R300_CLIPRECT_X_MASK (0x1FFF << 0)
+# define R300_CLIPRECT_Y_SHIFT 13
+# define R300_CLIPRECT_Y_MASK (0x1FFF << 13)
+#define R300_RE_CLIPRECT_CNTL 0x43D0
+# define R300_CLIP_OUT (1 << 0)
+# define R300_CLIP_0 (1 << 1)
+# define R300_CLIP_1 (1 << 2)
+# define R300_CLIP_10 (1 << 3)
+# define R300_CLIP_2 (1 << 4)
+# define R300_CLIP_20 (1 << 5)
+# define R300_CLIP_21 (1 << 6)
+# define R300_CLIP_210 (1 << 7)
+# define R300_CLIP_3 (1 << 8)
+# define R300_CLIP_30 (1 << 9)
+# define R300_CLIP_31 (1 << 10)
+# define R300_CLIP_310 (1 << 11)
+# define R300_CLIP_32 (1 << 12)
+# define R300_CLIP_320 (1 << 13)
+# define R300_CLIP_321 (1 << 14)
+# define R300_CLIP_3210 (1 << 15)
+
+/* gap */
+
+#define R300_RE_SCISSORS_TL 0x43E0
+#define R300_RE_SCISSORS_BR 0x43E4
+# define R300_SCISSORS_OFFSET 1440
+# define R300_SCISSORS_X_SHIFT 0
+# define R300_SCISSORS_X_MASK (0x1FFF << 0)
+# define R300_SCISSORS_Y_SHIFT 13
+# define R300_SCISSORS_Y_MASK (0x1FFF << 13)
+/* END: Scissors and cliprects */
+
+/* BEGIN: Texture specification */
+
+/*
+ * The texture specification dwords are grouped by meaning and not by texture
+ * unit. This means that e.g. the offset for texture image unit N is found in
+ * register TX_OFFSET_0 + (4*N)
+ */
+#define R300_TX_FILTER_0 0x4400
+# define R300_TX_REPEAT 0
+# define R300_TX_MIRRORED 1
+# define R300_TX_CLAMP 4
+# define R300_TX_CLAMP_TO_EDGE 2
+# define R300_TX_CLAMP_TO_BORDER 6
+# define R300_TX_WRAP_S_SHIFT 0
+# define R300_TX_WRAP_S_MASK (7 << 0)
+# define R300_TX_WRAP_T_SHIFT 3
+# define R300_TX_WRAP_T_MASK (7 << 3)
+# define R300_TX_WRAP_Q_SHIFT 6
+# define R300_TX_WRAP_Q_MASK (7 << 6)
+# define R300_TX_MAG_FILTER_NEAREST (1 << 9)
+# define R300_TX_MAG_FILTER_LINEAR (2 << 9)
+# define R300_TX_MAG_FILTER_MASK (3 << 9)
+# define R300_TX_MIN_FILTER_NEAREST (1 << 11)
+# define R300_TX_MIN_FILTER_LINEAR (2 << 11)
+# define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST (5 << 11)
+# define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR (9 << 11)
+# define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 11)
+# define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11)
+
+/* NOTE: NEAREST doesn't seem to exist.
+ * Im not seting MAG_FILTER_MASK and (3 << 11) on for all
+ * anisotropy modes because that would void selected mag filter
+ */
+# define R300_TX_MIN_FILTER_ANISO_NEAREST (0 << 13)
+# define R300_TX_MIN_FILTER_ANISO_LINEAR (0 << 13)
+# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13)
+# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (2 << 13)
+# define R300_TX_MIN_FILTER_MASK ( (15 << 11) | (3 << 13) )
+# define R300_TX_MAX_ANISO_1_TO_1 (0 << 21)
+# define R300_TX_MAX_ANISO_2_TO_1 (2 << 21)
+# define R300_TX_MAX_ANISO_4_TO_1 (4 << 21)
+# define R300_TX_MAX_ANISO_8_TO_1 (6 << 21)
+# define R300_TX_MAX_ANISO_16_TO_1 (8 << 21)
+# define R300_TX_MAX_ANISO_MASK (14 << 21)
+
+#define R300_TX_FILTER1_0 0x4440
+# define R300_CHROMA_KEY_MODE_DISABLE 0
+# define R300_CHROMA_KEY_FORCE 1
+# define R300_CHROMA_KEY_BLEND 2
+# define R300_MC_ROUND_NORMAL (0<<2)
+# define R300_MC_ROUND_MPEG4 (1<<2)
+# define R300_LOD_BIAS_MASK 0x1fff
+# define R300_EDGE_ANISO_EDGE_DIAG (0<<13)
+# define R300_EDGE_ANISO_EDGE_ONLY (1<<13)
+# define R300_MC_COORD_TRUNCATE_DISABLE (0<<14)
+# define R300_MC_COORD_TRUNCATE_MPEG (1<<14)
+# define R300_TX_TRI_PERF_0_8 (0<<15)
+# define R300_TX_TRI_PERF_1_8 (1<<15)
+# define R300_TX_TRI_PERF_1_4 (2<<15)
+# define R300_TX_TRI_PERF_3_8 (3<<15)
+# define R300_ANISO_THRESHOLD_MASK (7<<17)
+
+#define R300_TX_SIZE_0 0x4480
+# define R300_TX_WIDTHMASK_SHIFT 0
+# define R300_TX_WIDTHMASK_MASK (2047 << 0)
+# define R300_TX_HEIGHTMASK_SHIFT 11
+# define R300_TX_HEIGHTMASK_MASK (2047 << 11)
+# define R300_TX_UNK23 (1 << 23)
+# define R300_TX_MAX_MIP_LEVEL_SHIFT 26
+# define R300_TX_MAX_MIP_LEVEL_MASK (0xf << 26)
+# define R300_TX_SIZE_PROJECTED (1<<30)
+# define R300_TX_SIZE_TXPITCH_EN (1<<31)
+#define R300_TX_FORMAT_0 0x44C0
+ /* The interpretation of the format word by Wladimir van der Laan */
+ /* The X, Y, Z and W refer to the layout of the components.
+ They are given meanings as R, G, B and Alpha by the swizzle
+ specification */
+# define R300_TX_FORMAT_X8 0x0
+# define R300_TX_FORMAT_X16 0x1
+# define R300_TX_FORMAT_Y4X4 0x2
+# define R300_TX_FORMAT_Y8X8 0x3
+# define R300_TX_FORMAT_Y16X16 0x4
+# define R300_TX_FORMAT_Z3Y3X2 0x5
+# define R300_TX_FORMAT_Z5Y6X5 0x6
+# define R300_TX_FORMAT_Z6Y5X5 0x7
+# define R300_TX_FORMAT_Z11Y11X10 0x8
+# define R300_TX_FORMAT_Z10Y11X11 0x9
+# define R300_TX_FORMAT_W4Z4Y4X4 0xA
+# define R300_TX_FORMAT_W1Z5Y5X5 0xB
+# define R300_TX_FORMAT_W8Z8Y8X8 0xC
+# define R300_TX_FORMAT_W2Z10Y10X10 0xD
+# define R300_TX_FORMAT_W16Z16Y16X16 0xE
+# define R300_TX_FORMAT_DXT1 0xF
+# define R300_TX_FORMAT_DXT3 0x10
+# define R300_TX_FORMAT_DXT5 0x11
+# define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */
+# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
+# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
+# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
+ /* 0x16 - some 16 bit green format.. ?? */
+# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */
+# define R300_TX_FORMAT_CUBIC_MAP (1 << 26)
+
+ /* gap */
+ /* Floating point formats */
+ /* Note - hardware supports both 16 and 32 bit floating point */
+# define R300_TX_FORMAT_FL_I16 0x18
+# define R300_TX_FORMAT_FL_I16A16 0x19
+# define R300_TX_FORMAT_FL_R16G16B16A16 0x1A
+# define R300_TX_FORMAT_FL_I32 0x1B
+# define R300_TX_FORMAT_FL_I32A32 0x1C
+# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
+# define R300_TX_FORMAT_ATI2N 0x1F
+ /* alpha modes, convenience mostly */
+ /* if you have alpha, pick constant appropriate to the
+ number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
+# define R300_TX_FORMAT_ALPHA_1CH 0x000
+# define R300_TX_FORMAT_ALPHA_2CH 0x200
+# define R300_TX_FORMAT_ALPHA_4CH 0x600
+# define R300_TX_FORMAT_ALPHA_NONE 0xA00
+ /* Swizzling */
+ /* constants */
+# define R300_TX_FORMAT_X 0
+# define R300_TX_FORMAT_Y 1
+# define R300_TX_FORMAT_Z 2
+# define R300_TX_FORMAT_W 3
+# define R300_TX_FORMAT_ZERO 4
+# define R300_TX_FORMAT_ONE 5
+ /* 2.0*Z, everything above 1.0 is set to 0.0 */
+# define R300_TX_FORMAT_CUT_Z 6
+ /* 2.0*W, everything above 1.0 is set to 0.0 */
+# define R300_TX_FORMAT_CUT_W 7
+
+# define R300_TX_FORMAT_B_SHIFT 18
+# define R300_TX_FORMAT_G_SHIFT 15
+# define R300_TX_FORMAT_R_SHIFT 12
+# define R300_TX_FORMAT_A_SHIFT 9
+ /* Convenience macro to take care of layout and swizzling */
+# define R300_EASY_TX_FORMAT(B, G, R, A, FMT) ( \
+ ((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT) \
+ | ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT) \
+ | ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT) \
+ | ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT) \
+ | (R300_TX_FORMAT_##FMT) \
+ )
+ /* These can be ORed with result of R300_EASY_TX_FORMAT()
+ We don't really know what they do. Take values from a
+ constant color ? */
+# define R300_TX_FORMAT_CONST_X (1<<5)
+# define R300_TX_FORMAT_CONST_Y (2<<5)
+# define R300_TX_FORMAT_CONST_Z (4<<5)
+# define R300_TX_FORMAT_CONST_W (8<<5)
+
+# define R300_TX_FORMAT_YUV_MODE 0x00800000
+
+#define R300_TX_PITCH_0 0x4500 /* obvious missing in gap */
+#define R300_TX_OFFSET_0 0x4540
+ /* BEGIN: Guess from R200 */
+# define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
+# define R300_TXO_ENDIAN_BYTE_SWAP (1 << 0)
+# define R300_TXO_ENDIAN_WORD_SWAP (2 << 0)
+# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
+# define R300_TXO_MACRO_TILE (1 << 2)
+# define R300_TXO_MICRO_TILE (1 << 3)
+# define R300_TXO_MICRO_TILE_SQUARE (2 << 3)
+# define R300_TXO_OFFSET_MASK 0xffffffe0
+# define R300_TXO_OFFSET_SHIFT 5
+ /* END: Guess from R200 */
+
+/* 32 bit chroma key */
+#define R300_TX_CHROMA_KEY_0 0x4580
+/* ff00ff00 == { 0, 1.0, 0, 1.0 } */
+#define R300_TX_BORDER_COLOR_0 0x45C0
+
+/* END: Texture specification */
+
+/* BEGIN: Fragment program instruction set */
+
+/* Fragment programs are written directly into register space.
+ * There are separate instruction streams for texture instructions and ALU
+ * instructions.
+ * In order to synchronize these streams, the program is divided into up
+ * to 4 nodes. Each node begins with a number of TEX operations, followed
+ * by a number of ALU operations.
+ * The first node can have zero TEX ops, all subsequent nodes must have at
+ * least
+ * one TEX ops.
+ * All nodes must have at least one ALU op.
+ *
+ * The index of the last node is stored in PFS_CNTL_0: A value of 0 means
+ * 1 node, a value of 3 means 4 nodes.
+ * The total amount of instructions is defined in PFS_CNTL_2. The offsets are
+ * offsets into the respective instruction streams, while *_END points to the
+ * last instruction relative to this offset.
+ */
+#define R300_PFS_CNTL_0 0x4600
+# define R300_PFS_CNTL_LAST_NODES_SHIFT 0
+# define R300_PFS_CNTL_LAST_NODES_MASK (3 << 0)
+# define R300_PFS_CNTL_FIRST_NODE_HAS_TEX (1 << 3)
+#define R300_PFS_CNTL_1 0x4604
+/* There is an unshifted value here which has so far always been equal to the
+ * index of the highest used temporary register.
+ */
+#define R300_PFS_CNTL_2 0x4608
+# define R300_PFS_CNTL_ALU_OFFSET_SHIFT 0
+# define R300_PFS_CNTL_ALU_OFFSET_MASK (63 << 0)
+# define R300_PFS_CNTL_ALU_END_SHIFT 6
+# define R300_PFS_CNTL_ALU_END_MASK (63 << 6)
+# define R300_PFS_CNTL_TEX_OFFSET_SHIFT 12
+# define R300_PFS_CNTL_TEX_OFFSET_MASK (31 << 12) /* GUESS */
+# define R300_PFS_CNTL_TEX_END_SHIFT 18
+# define R300_PFS_CNTL_TEX_END_MASK (31 << 18) /* GUESS */
+
+/* gap */
+
+/* Nodes are stored backwards. The last active node is always stored in
+ * PFS_NODE_3.
+ * Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
+ * first node is stored in NODE_2, the second node is stored in NODE_3.
+ *
+ * Offsets are relative to the master offset from PFS_CNTL_2.
+ */
+#define R300_PFS_NODE_0 0x4610
+#define R300_PFS_NODE_1 0x4614
+#define R300_PFS_NODE_2 0x4618
+#define R300_PFS_NODE_3 0x461C
+# define R300_PFS_NODE_ALU_OFFSET_SHIFT 0
+# define R300_PFS_NODE_ALU_OFFSET_MASK (63 << 0)
+# define R300_PFS_NODE_ALU_END_SHIFT 6
+# define R300_PFS_NODE_ALU_END_MASK (63 << 6)
+# define R300_PFS_NODE_TEX_OFFSET_SHIFT 12
+# define R300_PFS_NODE_TEX_OFFSET_MASK (31 << 12)
+# define R300_PFS_NODE_TEX_END_SHIFT 17
+# define R300_PFS_NODE_TEX_END_MASK (31 << 17)
+# define R300_PFS_NODE_OUTPUT_COLOR (1 << 22)
+# define R300_PFS_NODE_OUTPUT_DEPTH (1 << 23)
+
+/* TEX
+ * As far as I can tell, texture instructions cannot write into output
+ * registers directly. A subsequent ALU instruction is always necessary,
+ * even if it's just MAD o0, r0, 1, 0
+ */
+#define R300_PFS_TEXI_0 0x4620
+# define R300_FPITX_SRC_SHIFT 0
+# define R300_FPITX_SRC_MASK (31 << 0)
+ /* GUESS */
+# define R300_FPITX_SRC_CONST (1 << 5)
+# define R300_FPITX_DST_SHIFT 6
+# define R300_FPITX_DST_MASK (31 << 6)
+# define R300_FPITX_IMAGE_SHIFT 11
+ /* GUESS based on layout and native limits */
+# define R300_FPITX_IMAGE_MASK (15 << 11)
+/* Unsure if these are opcodes, or some kind of bitfield, but this is how
+ * they were set when I checked
+ */
+# define R300_FPITX_OPCODE_SHIFT 15
+# define R300_FPITX_OP_TEX 1
+# define R300_FPITX_OP_KIL 2
+# define R300_FPITX_OP_TXP 3
+# define R300_FPITX_OP_TXB 4
+# define R300_FPITX_OPCODE_MASK (7 << 15)
+
+/* ALU
+ * The ALU instructions register blocks are enumerated according to the order
+ * in which fglrx. I assume there is space for 64 instructions, since
+ * each block has space for a maximum of 64 DWORDs, and this matches reported
+ * native limits.
+ *
+ * The basic functional block seems to be one MAD for each color and alpha,
+ * and an adder that adds all components after the MUL.
+ * - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
+ * - DP4: Use OUTC_DP4, OUTA_DP4
+ * - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
+ * - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
+ * - CMPH: If ARG2 > 0.5, return ARG0, else return ARG1
+ * - CMP: If ARG2 < 0, return ARG1, else return ARG0
+ * - FLR: use FRC+MAD
+ * - XPD: use MAD+MAD
+ * - SGE, SLT: use MAD+CMP
+ * - RSQ: use ABS modifier for argument
+ * - Use OUTC_REPL_ALPHA to write results of an alpha-only operation
+ * (e.g. RCP) into color register
+ * - apparently, there's no quick DST operation
+ * - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
+ * - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
+ * - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
+ *
+ * Operand selection
+ * First stage selects three sources from the available registers and
+ * constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
+ * fglrx sorts the three source fields: Registers before constants,
+ * lower indices before higher indices; I do not know whether this is
+ * necessary.
+ *
+ * fglrx fills unused sources with "read constant 0"
+ * According to specs, you cannot select more than two different constants.
+ *
+ * Second stage selects the operands from the sources. This is defined in
+ * INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
+ * zero and one.
+ * Swizzling and negation happens in this stage, as well.
+ *
+ * Important: Color and alpha seem to be mostly separate, i.e. their sources
+ * selection appears to be fully independent (the register storage is probably
+ * physically split into a color and an alpha section).
+ * However (because of the apparent physical split), there is some interaction
+ * WRT swizzling. If, for example, you want to load an R component into an
+ * Alpha operand, this R component is taken from a *color* source, not from
+ * an alpha source. The corresponding register doesn't even have to appear in
+ * the alpha sources list. (I hope this all makes sense to you)
+ *
+ * Destination selection
+ * The destination register index is in FPI1 (color) and FPI3 (alpha)
+ * together with enable bits.
+ * There are separate enable bits for writing into temporary registers
+ * (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_*
+ * /DSTA_OUTPUT). You can write to both at once, or not write at all (the
+ * same index must be used for both).
+ *
+ * Note: There is a special form for LRP
+ * - Argument order is the same as in ARB_fragment_program.
+ * - Operation is MAD
+ * - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
+ * - Set FPI0/FPI2_SPECIAL_LRP
+ * Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD
+ */
+#define R300_PFS_INSTR1_0 0x46C0
+# define R300_FPI1_SRC0C_SHIFT 0
+# define R300_FPI1_SRC0C_MASK (31 << 0)
+# define R300_FPI1_SRC0C_CONST (1 << 5)
+# define R300_FPI1_SRC1C_SHIFT 6
+# define R300_FPI1_SRC1C_MASK (31 << 6)
+# define R300_FPI1_SRC1C_CONST (1 << 11)
+# define R300_FPI1_SRC2C_SHIFT 12
+# define R300_FPI1_SRC2C_MASK (31 << 12)
+# define R300_FPI1_SRC2C_CONST (1 << 17)
+# define R300_FPI1_SRC_MASK 0x0003ffff
+# define R300_FPI1_DSTC_SHIFT 18
+# define R300_FPI1_DSTC_MASK (31 << 18)
+# define R300_FPI1_DSTC_REG_MASK_SHIFT 23
+# define R300_FPI1_DSTC_REG_X (1 << 23)
+# define R300_FPI1_DSTC_REG_Y (1 << 24)
+# define R300_FPI1_DSTC_REG_Z (1 << 25)
+# define R300_FPI1_DSTC_OUTPUT_MASK_SHIFT 26
+# define R300_FPI1_DSTC_OUTPUT_X (1 << 26)
+# define R300_FPI1_DSTC_OUTPUT_Y (1 << 27)
+# define R300_FPI1_DSTC_OUTPUT_Z (1 << 28)
+
+#define R300_PFS_INSTR3_0 0x47C0
+# define R300_FPI3_SRC0A_SHIFT 0
+# define R300_FPI3_SRC0A_MASK (31 << 0)
+# define R300_FPI3_SRC0A_CONST (1 << 5)
+# define R300_FPI3_SRC1A_SHIFT 6
+# define R300_FPI3_SRC1A_MASK (31 << 6)
+# define R300_FPI3_SRC1A_CONST (1 << 11)
+# define R300_FPI3_SRC2A_SHIFT 12
+# define R300_FPI3_SRC2A_MASK (31 << 12)
+# define R300_FPI3_SRC2A_CONST (1 << 17)
+# define R300_FPI3_SRC_MASK 0x0003ffff
+# define R300_FPI3_DSTA_SHIFT 18
+# define R300_FPI3_DSTA_MASK (31 << 18)
+# define R300_FPI3_DSTA_REG (1 << 23)
+# define R300_FPI3_DSTA_OUTPUT (1 << 24)
+# define R300_FPI3_DSTA_DEPTH (1 << 27)
+
+#define R300_PFS_INSTR0_0 0x48C0
+# define R300_FPI0_ARGC_SRC0C_XYZ 0
+# define R300_FPI0_ARGC_SRC0C_XXX 1
+# define R300_FPI0_ARGC_SRC0C_YYY 2
+# define R300_FPI0_ARGC_SRC0C_ZZZ 3
+# define R300_FPI0_ARGC_SRC1C_XYZ 4
+# define R300_FPI0_ARGC_SRC1C_XXX 5
+# define R300_FPI0_ARGC_SRC1C_YYY 6
+# define R300_FPI0_ARGC_SRC1C_ZZZ 7
+# define R300_FPI0_ARGC_SRC2C_XYZ 8
+# define R300_FPI0_ARGC_SRC2C_XXX 9
+# define R300_FPI0_ARGC_SRC2C_YYY 10
+# define R300_FPI0_ARGC_SRC2C_ZZZ 11
+# define R300_FPI0_ARGC_SRC0A 12
+# define R300_FPI0_ARGC_SRC1A 13
+# define R300_FPI0_ARGC_SRC2A 14
+# define R300_FPI0_ARGC_SRC1C_LRP 15
+# define R300_FPI0_ARGC_ZERO 20
+# define R300_FPI0_ARGC_ONE 21
+ /* GUESS */
+# define R300_FPI0_ARGC_HALF 22
+# define R300_FPI0_ARGC_SRC0C_YZX 23
+# define R300_FPI0_ARGC_SRC1C_YZX 24
+# define R300_FPI0_ARGC_SRC2C_YZX 25
+# define R300_FPI0_ARGC_SRC0C_ZXY 26
+# define R300_FPI0_ARGC_SRC1C_ZXY 27
+# define R300_FPI0_ARGC_SRC2C_ZXY 28
+# define R300_FPI0_ARGC_SRC0CA_WZY 29
+# define R300_FPI0_ARGC_SRC1CA_WZY 30
+# define R300_FPI0_ARGC_SRC2CA_WZY 31
+
+# define R300_FPI0_ARG0C_SHIFT 0
+# define R300_FPI0_ARG0C_MASK (31 << 0)
+# define R300_FPI0_ARG0C_NEG (1 << 5)
+# define R300_FPI0_ARG0C_ABS (1 << 6)
+# define R300_FPI0_ARG1C_SHIFT 7
+# define R300_FPI0_ARG1C_MASK (31 << 7)
+# define R300_FPI0_ARG1C_NEG (1 << 12)
+# define R300_FPI0_ARG1C_ABS (1 << 13)
+# define R300_FPI0_ARG2C_SHIFT 14
+# define R300_FPI0_ARG2C_MASK (31 << 14)
+# define R300_FPI0_ARG2C_NEG (1 << 19)
+# define R300_FPI0_ARG2C_ABS (1 << 20)
+# define R300_FPI0_SPECIAL_LRP (1 << 21)
+# define R300_FPI0_OUTC_MAD (0 << 23)
+# define R300_FPI0_OUTC_DP3 (1 << 23)
+# define R300_FPI0_OUTC_DP4 (2 << 23)
+# define R300_FPI0_OUTC_MIN (4 << 23)
+# define R300_FPI0_OUTC_MAX (5 << 23)
+# define R300_FPI0_OUTC_CMPH (7 << 23)
+# define R300_FPI0_OUTC_CMP (8 << 23)
+# define R300_FPI0_OUTC_FRC (9 << 23)
+# define R300_FPI0_OUTC_REPL_ALPHA (10 << 23)
+# define R300_FPI0_OUTC_SAT (1 << 30)
+# define R300_FPI0_INSERT_NOP (1 << 31)
+
+#define R300_PFS_INSTR2_0 0x49C0
+# define R300_FPI2_ARGA_SRC0C_X 0
+# define R300_FPI2_ARGA_SRC0C_Y 1
+# define R300_FPI2_ARGA_SRC0C_Z 2
+# define R300_FPI2_ARGA_SRC1C_X 3
+# define R300_FPI2_ARGA_SRC1C_Y 4
+# define R300_FPI2_ARGA_SRC1C_Z 5
+# define R300_FPI2_ARGA_SRC2C_X 6
+# define R300_FPI2_ARGA_SRC2C_Y 7
+# define R300_FPI2_ARGA_SRC2C_Z 8
+# define R300_FPI2_ARGA_SRC0A 9
+# define R300_FPI2_ARGA_SRC1A 10
+# define R300_FPI2_ARGA_SRC2A 11
+# define R300_FPI2_ARGA_SRC1A_LRP 15
+# define R300_FPI2_ARGA_ZERO 16
+# define R300_FPI2_ARGA_ONE 17
+ /* GUESS */
+# define R300_FPI2_ARGA_HALF 18
+# define R300_FPI2_ARG0A_SHIFT 0
+# define R300_FPI2_ARG0A_MASK (31 << 0)
+# define R300_FPI2_ARG0A_NEG (1 << 5)
+ /* GUESS */
+# define R300_FPI2_ARG0A_ABS (1 << 6)
+# define R300_FPI2_ARG1A_SHIFT 7
+# define R300_FPI2_ARG1A_MASK (31 << 7)
+# define R300_FPI2_ARG1A_NEG (1 << 12)
+ /* GUESS */
+# define R300_FPI2_ARG1A_ABS (1 << 13)
+# define R300_FPI2_ARG2A_SHIFT 14
+# define R300_FPI2_ARG2A_MASK (31 << 14)
+# define R300_FPI2_ARG2A_NEG (1 << 19)
+ /* GUESS */
+# define R300_FPI2_ARG2A_ABS (1 << 20)
+# define R300_FPI2_SPECIAL_LRP (1 << 21)
+# define R300_FPI2_OUTA_MAD (0 << 23)
+# define R300_FPI2_OUTA_DP4 (1 << 23)
+# define R300_FPI2_OUTA_MIN (2 << 23)
+# define R300_FPI2_OUTA_MAX (3 << 23)
+# define R300_FPI2_OUTA_CMP (6 << 23)
+# define R300_FPI2_OUTA_FRC (7 << 23)
+# define R300_FPI2_OUTA_EX2 (8 << 23)
+# define R300_FPI2_OUTA_LG2 (9 << 23)
+# define R300_FPI2_OUTA_RCP (10 << 23)
+# define R300_FPI2_OUTA_RSQ (11 << 23)
+# define R300_FPI2_OUTA_SAT (1 << 30)
+# define R300_FPI2_UNKNOWN_31 (1 << 31)
+/* END: Fragment program instruction set */
+
+/* Fog state and color */
+#define R300_RE_FOG_STATE 0x4BC0
+# define R300_FOG_ENABLE (1 << 0)
+# define R300_FOG_MODE_LINEAR (0 << 1)
+# define R300_FOG_MODE_EXP (1 << 1)
+# define R300_FOG_MODE_EXP2 (2 << 1)
+# define R300_FOG_MODE_MASK (3 << 1)
+#define R300_FOG_COLOR_R 0x4BC8
+#define R300_FOG_COLOR_G 0x4BCC
+#define R300_FOG_COLOR_B 0x4BD0
+
+#define R300_PP_ALPHA_TEST 0x4BD4
+# define R300_REF_ALPHA_MASK 0x000000ff
+# define R300_ALPHA_TEST_FAIL (0 << 8)
+# define R300_ALPHA_TEST_LESS (1 << 8)
+# define R300_ALPHA_TEST_LEQUAL (3 << 8)
+# define R300_ALPHA_TEST_EQUAL (2 << 8)
+# define R300_ALPHA_TEST_GEQUAL (6 << 8)
+# define R300_ALPHA_TEST_GREATER (4 << 8)
+# define R300_ALPHA_TEST_NEQUAL (5 << 8)
+# define R300_ALPHA_TEST_PASS (7 << 8)
+# define R300_ALPHA_TEST_OP_MASK (7 << 8)
+# define R300_ALPHA_TEST_ENABLE (1 << 11)
+
+/* gap */
+
+/* Fragment program parameters in 7.16 floating point */
+#define R300_PFS_PARAM_0_X 0x4C00
+#define R300_PFS_PARAM_0_Y 0x4C04
+#define R300_PFS_PARAM_0_Z 0x4C08
+#define R300_PFS_PARAM_0_W 0x4C0C
+/* GUESS: PARAM_31 is last, based on native limits reported by fglrx */
+#define R300_PFS_PARAM_31_X 0x4DF0
+#define R300_PFS_PARAM_31_Y 0x4DF4
+#define R300_PFS_PARAM_31_Z 0x4DF8
+#define R300_PFS_PARAM_31_W 0x4DFC
+
+/* Notes:
+ * - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in
+ * the application
+ * - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND
+ * are set to the same
+ * function (both registers are always set up completely in any case)
+ * - Most blend flags are simply copied from R200 and not tested yet
+ */
+#define R300_RB3D_CBLEND 0x4E04
+#define R300_RB3D_ABLEND 0x4E08
+/* the following only appear in CBLEND */
+# define R300_BLEND_ENABLE (1 << 0)
+# define R300_BLEND_UNKNOWN (3 << 1)
+# define R300_BLEND_NO_SEPARATE (1 << 3)
+/* the following are shared between CBLEND and ABLEND */
+# define R300_FCN_MASK (3 << 12)
+# define R300_COMB_FCN_ADD_CLAMP (0 << 12)
+# define R300_COMB_FCN_ADD_NOCLAMP (1 << 12)
+# define R300_COMB_FCN_SUB_CLAMP (2 << 12)
+# define R300_COMB_FCN_SUB_NOCLAMP (3 << 12)
+# define R300_COMB_FCN_MIN (4 << 12)
+# define R300_COMB_FCN_MAX (5 << 12)
+# define R300_COMB_FCN_RSUB_CLAMP (6 << 12)
+# define R300_COMB_FCN_RSUB_NOCLAMP (7 << 12)
+# define R300_BLEND_GL_ZERO (32)
+# define R300_BLEND_GL_ONE (33)
+# define R300_BLEND_GL_SRC_COLOR (34)
+# define R300_BLEND_GL_ONE_MINUS_SRC_COLOR (35)
+# define R300_BLEND_GL_DST_COLOR (36)
+# define R300_BLEND_GL_ONE_MINUS_DST_COLOR (37)
+# define R300_BLEND_GL_SRC_ALPHA (38)
+# define R300_BLEND_GL_ONE_MINUS_SRC_ALPHA (39)
+# define R300_BLEND_GL_DST_ALPHA (40)
+# define R300_BLEND_GL_ONE_MINUS_DST_ALPHA (41)
+# define R300_BLEND_GL_SRC_ALPHA_SATURATE (42)
+# define R300_BLEND_GL_CONST_COLOR (43)
+# define R300_BLEND_GL_ONE_MINUS_CONST_COLOR (44)
+# define R300_BLEND_GL_CONST_ALPHA (45)
+# define R300_BLEND_GL_ONE_MINUS_CONST_ALPHA (46)
+# define R300_BLEND_MASK (63)
+# define R300_SRC_BLEND_SHIFT (16)
+# define R300_DST_BLEND_SHIFT (24)
+#define R300_RB3D_BLEND_COLOR 0x4E10
+#define R300_RB3D_COLORMASK 0x4E0C
+# define R300_COLORMASK0_B (1<<0)
+# define R300_COLORMASK0_G (1<<1)
+# define R300_COLORMASK0_R (1<<2)
+# define R300_COLORMASK0_A (1<<3)
+
+/* gap */
+
+#define R300_RB3D_COLOROFFSET0 0x4E28
+# define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */
+#define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */
+#define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */
+#define R300_RB3D_COLOROFFSET3 0x4E34 /* GUESS */
+
+/* gap */
+
+/* Bit 16: Larger tiles
+ * Bit 17: 4x2 tiles
+ * Bit 18: Extremely weird tile like, but some pixels duplicated?
+ */
+#define R300_RB3D_COLORPITCH0 0x4E38
+# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
+# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
+# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
+# define R300_COLOR_MICROTILE_SQUARE_ENABLE (2 << 17)
+# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
+# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
+# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
+# define R300_COLOR_FORMAT_RGB565 (2 << 22)
+# define R300_COLOR_FORMAT_ARGB8888 (3 << 22)
+#define R300_RB3D_COLORPITCH1 0x4E3C /* GUESS */
+#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
+#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
+
+#define R300_RB3D_AARESOLVE_OFFSET 0x4E80
+#define R300_RB3D_AARESOLVE_PITCH 0x4E84
+#define R300_RB3D_AARESOLVE_CTL 0x4E88
+/* gap */
+
+/* Guess by Vladimir.
+ * Set to 0A before 3D operations, set to 02 afterwards.
+ */
+/*#define R300_RB3D_DSTCACHE_CTLSTAT 0x4E4C*/
+# define R300_RB3D_DSTCACHE_UNKNOWN_02 0x00000002
+# define R300_RB3D_DSTCACHE_UNKNOWN_0A 0x0000000A
+
+/* gap */
+/* There seems to be no "write only" setting, so use Z-test = ALWAYS
+ * for this.
+ * Bit (1<<8) is the "test" bit. so plain write is 6 - vd
+ */
+#define R300_ZB_CNTL 0x4F00
+# define R300_STENCIL_ENABLE (1 << 0)
+# define R300_Z_ENABLE (1 << 1)
+# define R300_Z_WRITE_ENABLE (1 << 2)
+# define R300_Z_SIGNED_COMPARE (1 << 3)
+# define R300_STENCIL_FRONT_BACK (1 << 4)
+
+#define R300_ZB_ZSTENCILCNTL 0x4f04
+ /* functions */
+# define R300_ZS_NEVER 0
+# define R300_ZS_LESS 1
+# define R300_ZS_LEQUAL 2
+# define R300_ZS_EQUAL 3
+# define R300_ZS_GEQUAL 4
+# define R300_ZS_GREATER 5
+# define R300_ZS_NOTEQUAL 6
+# define R300_ZS_ALWAYS 7
+# define R300_ZS_MASK 7
+ /* operations */
+# define R300_ZS_KEEP 0
+# define R300_ZS_ZERO 1
+# define R300_ZS_REPLACE 2
+# define R300_ZS_INCR 3
+# define R300_ZS_DECR 4
+# define R300_ZS_INVERT 5
+# define R300_ZS_INCR_WRAP 6
+# define R300_ZS_DECR_WRAP 7
+# define R300_Z_FUNC_SHIFT 0
+ /* front and back refer to operations done for front
+ and back faces, i.e. separate stencil function support */
+# define R300_S_FRONT_FUNC_SHIFT 3
+# define R300_S_FRONT_SFAIL_OP_SHIFT 6
+# define R300_S_FRONT_ZPASS_OP_SHIFT 9
+# define R300_S_FRONT_ZFAIL_OP_SHIFT 12
+# define R300_S_BACK_FUNC_SHIFT 15
+# define R300_S_BACK_SFAIL_OP_SHIFT 18
+# define R300_S_BACK_ZPASS_OP_SHIFT 21
+# define R300_S_BACK_ZFAIL_OP_SHIFT 24
+
+#define R300_ZB_STENCILREFMASK 0x4f08
+# define R300_STENCILREF_SHIFT 0
+# define R300_STENCILREF_MASK 0x000000ff
+# define R300_STENCILMASK_SHIFT 8
+# define R300_STENCILMASK_MASK 0x0000ff00
+# define R300_STENCILWRITEMASK_SHIFT 16
+# define R300_STENCILWRITEMASK_MASK 0x00ff0000
+
+/* gap */
+
+#define R300_ZB_FORMAT 0x4f10
+# define R300_DEPTHFORMAT_16BIT_INT_Z (0 << 0)
+# define R300_DEPTHFORMAT_16BIT_13E3 (1 << 0)
+# define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL (2 << 0)
+/* reserved up to (15 << 0) */
+# define R300_INVERT_13E3_LEADING_ONES (0 << 4)
+# define R300_INVERT_13E3_LEADING_ZEROS (1 << 4)
+
+#define R300_ZB_ZTOP 0x4F14
+# define R300_ZTOP_DISABLE (0 << 0)
+# define R300_ZTOP_ENABLE (1 << 0)
+
+/* gap */
+
+#define R300_ZB_ZCACHE_CTLSTAT 0x4f18
+# define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT (0 << 0)
+# define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0)
+# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT (0 << 1)
+# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE (1 << 1)
+# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE (0 << 31)
+# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY (1 << 31)
+
+#define R300_ZB_BW_CNTL 0x4f1c
+# define R300_HIZ_DISABLE (0 << 0)
+# define R300_HIZ_ENABLE (1 << 0)
+# define R300_HIZ_MIN (0 << 1)
+# define R300_HIZ_MAX (1 << 1)
+# define R300_FAST_FILL_DISABLE (0 << 2)
+# define R300_FAST_FILL_ENABLE (1 << 2)
+# define R300_RD_COMP_DISABLE (0 << 3)
+# define R300_RD_COMP_ENABLE (1 << 3)
+# define R300_WR_COMP_DISABLE (0 << 4)
+# define R300_WR_COMP_ENABLE (1 << 4)
+# define R300_ZB_CB_CLEAR_RMW (0 << 5)
+# define R300_ZB_CB_CLEAR_CACHE_LINEAR (1 << 5)
+# define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE (0 << 6)
+# define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE (1 << 6)
+
+# define R500_ZEQUAL_OPTIMIZE_ENABLE (0 << 7)
+# define R500_ZEQUAL_OPTIMIZE_DISABLE (1 << 7)
+# define R500_SEQUAL_OPTIMIZE_ENABLE (0 << 8)
+# define R500_SEQUAL_OPTIMIZE_DISABLE (1 << 8)
+
+# define R500_BMASK_ENABLE (0 << 10)
+# define R500_BMASK_DISABLE (1 << 10)
+# define R500_HIZ_EQUAL_REJECT_DISABLE (0 << 11)
+# define R500_HIZ_EQUAL_REJECT_ENABLE (1 << 11)
+# define R500_HIZ_FP_EXP_BITS_DISABLE (0 << 12)
+# define R500_HIZ_FP_EXP_BITS_1 (1 << 12)
+# define R500_HIZ_FP_EXP_BITS_2 (2 << 12)
+# define R500_HIZ_FP_EXP_BITS_3 (3 << 12)
+# define R500_HIZ_FP_EXP_BITS_4 (4 << 12)
+# define R500_HIZ_FP_EXP_BITS_5 (5 << 12)
+# define R500_HIZ_FP_INVERT_LEADING_ONES (0 << 15)
+# define R500_HIZ_FP_INVERT_LEADING_ZEROS (1 << 15)
+# define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE (0 << 16)
+# define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE (1 << 16)
+# define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE (0 << 17)
+# define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE (1 << 17)
+# define R500_PEQ_PACKING_DISABLE (0 << 18)
+# define R500_PEQ_PACKING_ENABLE (1 << 18)
+# define R500_COVERED_PTR_MASKING_DISABLE (0 << 18)
+# define R500_COVERED_PTR_MASKING_ENABLE (1 << 18)
+
+
+/* gap */
+
+/* Z Buffer Address Offset.
+ * Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles.
+ */
+#define R300_ZB_DEPTHOFFSET 0x4f20
+
+/* Z Buffer Pitch and Endian Control */
+#define R300_ZB_DEPTHPITCH 0x4f24
+# define R300_DEPTHPITCH_MASK 0x00003FFC
+# define R300_DEPTHMACROTILE_DISABLE (0 << 16)
+# define R300_DEPTHMACROTILE_ENABLE (1 << 16)
+# define R300_DEPTHMICROTILE_LINEAR (0 << 17)
+# define R300_DEPTHMICROTILE_TILED (1 << 17)
+# define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17)
+# define R300_DEPTHENDIAN_NO_SWAP (0 << 18)
+# define R300_DEPTHENDIAN_WORD_SWAP (1 << 18)
+# define R300_DEPTHENDIAN_DWORD_SWAP (2 << 18)
+# define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18)
+
+/* Z Buffer Clear Value */
+#define R300_ZB_DEPTHCLEARVALUE 0x4f28
+
+#define R300_ZB_ZMASK_OFFSET 0x4f30
+#define R300_ZB_ZMASK_PITCH 0x4f34
+#define R300_ZB_ZMASK_WRINDEX 0x4f38
+#define R300_ZB_ZMASK_DWORD 0x4f3c
+#define R300_ZB_ZMASK_RDINDEX 0x4f40
+
+/* Hierarchical Z Memory Offset */
+#define R300_ZB_HIZ_OFFSET 0x4f44
+
+/* Hierarchical Z Write Index */
+#define R300_ZB_HIZ_WRINDEX 0x4f48
+
+/* Hierarchical Z Data */
+#define R300_ZB_HIZ_DWORD 0x4f4c
+
+/* Hierarchical Z Read Index */
+#define R300_ZB_HIZ_RDINDEX 0x4f50
+
+/* Hierarchical Z Pitch */
+#define R300_ZB_HIZ_PITCH 0x4f54
+
+/* Z Buffer Z Pass Counter Data */
+#define R300_ZB_ZPASS_DATA 0x4f58
+
+/* Z Buffer Z Pass Counter Address */
+#define R300_ZB_ZPASS_ADDR 0x4f5c
+
+/* Depth buffer X and Y coordinate offset */
+#define R300_ZB_DEPTHXY_OFFSET 0x4f60
+# define R300_DEPTHX_OFFSET_SHIFT 1
+# define R300_DEPTHX_OFFSET_MASK 0x000007FE
+# define R300_DEPTHY_OFFSET_SHIFT 17
+# define R300_DEPTHY_OFFSET_MASK 0x07FE0000
+
+/* Sets the fifo sizes */
+#define R500_ZB_FIFO_SIZE 0x4fd0
+# define R500_OP_FIFO_SIZE_FULL (0 << 0)
+# define R500_OP_FIFO_SIZE_HALF (1 << 0)
+# define R500_OP_FIFO_SIZE_QUATER (2 << 0)
+# define R500_OP_FIFO_SIZE_EIGTHS (4 << 0)
+
+/* Stencil Reference Value and Mask for backfacing quads */
+/* R300_ZB_STENCILREFMASK handles front face */
+#define R500_ZB_STENCILREFMASK_BF 0x4fd4
+# define R500_STENCILREF_SHIFT 0
+# define R500_STENCILREF_MASK 0x000000ff
+# define R500_STENCILMASK_SHIFT 8
+# define R500_STENCILMASK_MASK 0x0000ff00
+# define R500_STENCILWRITEMASK_SHIFT 16
+# define R500_STENCILWRITEMASK_MASK 0x00ff0000
+
+/* BEGIN: Vertex program instruction set */
+
+/* Every instruction is four dwords long:
+ * DWORD 0: output and opcode
+ * DWORD 1: first argument
+ * DWORD 2: second argument
+ * DWORD 3: third argument
+ *
+ * Notes:
+ * - ABS r, a is implemented as MAX r, a, -a
+ * - MOV is implemented as ADD to zero
+ * - XPD is implemented as MUL + MAD
+ * - FLR is implemented as FRC + ADD
+ * - apparently, fglrx tries to schedule instructions so that there is at
+ * least one instruction between the write to a temporary and the first
+ * read from said temporary; however, violations of this scheduling are
+ * allowed
+ * - register indices seem to be unrelated with OpenGL aliasing to
+ * conventional state
+ * - only one attribute and one parameter can be loaded at a time; however,
+ * the same attribute/parameter can be used for more than one argument
+ * - the second software argument for POW is the third hardware argument
+ * (no idea why)
+ * - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
+ *
+ * There is some magic surrounding LIT:
+ * The single argument is replicated across all three inputs, but swizzled:
+ * First argument: xyzy
+ * Second argument: xyzx
+ * Third argument: xyzw
+ * Whenever the result is used later in the fragment program, fglrx forces
+ * x and w to be 1.0 in the input selection; I don't know whether this is
+ * strictly necessary
+ */
+#define R300_VPI_OUT_OP_DOT (1 << 0)
+#define R300_VPI_OUT_OP_MUL (2 << 0)
+#define R300_VPI_OUT_OP_ADD (3 << 0)
+#define R300_VPI_OUT_OP_MAD (4 << 0)
+#define R300_VPI_OUT_OP_DST (5 << 0)
+#define R300_VPI_OUT_OP_FRC (6 << 0)
+#define R300_VPI_OUT_OP_MAX (7 << 0)
+#define R300_VPI_OUT_OP_MIN (8 << 0)
+#define R300_VPI_OUT_OP_SGE (9 << 0)
+#define R300_VPI_OUT_OP_SLT (10 << 0)
+ /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */
+#define R300_VPI_OUT_OP_UNK12 (12 << 0)
+#define R300_VPI_OUT_OP_ARL (13 << 0)
+#define R300_VPI_OUT_OP_EXP (65 << 0)
+#define R300_VPI_OUT_OP_LOG (66 << 0)
+ /* Used in fog computations, scalar(scalar) */
+#define R300_VPI_OUT_OP_UNK67 (67 << 0)
+#define R300_VPI_OUT_OP_LIT (68 << 0)
+#define R300_VPI_OUT_OP_POW (69 << 0)
+#define R300_VPI_OUT_OP_RCP (70 << 0)
+#define R300_VPI_OUT_OP_RSQ (72 << 0)
+ /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */
+#define R300_VPI_OUT_OP_UNK73 (73 << 0)
+#define R300_VPI_OUT_OP_EX2 (75 << 0)
+#define R300_VPI_OUT_OP_LG2 (76 << 0)
+#define R300_VPI_OUT_OP_MAD_2 (128 << 0)
+ /* all temps, vector(scalar, vector, vector) */
+#define R300_VPI_OUT_OP_UNK129 (129 << 0)
+
+#define R300_VPI_OUT_REG_CLASS_TEMPORARY (0 << 8)
+#define R300_VPI_OUT_REG_CLASS_ADDR (1 << 8)
+#define R300_VPI_OUT_REG_CLASS_RESULT (2 << 8)
+#define R300_VPI_OUT_REG_CLASS_MASK (31 << 8)
+
+#define R300_VPI_OUT_REG_INDEX_SHIFT 13
+ /* GUESS based on fglrx native limits */
+#define R300_VPI_OUT_REG_INDEX_MASK (31 << 13)
+
+#define R300_VPI_OUT_WRITE_X (1 << 20)
+#define R300_VPI_OUT_WRITE_Y (1 << 21)
+#define R300_VPI_OUT_WRITE_Z (1 << 22)
+#define R300_VPI_OUT_WRITE_W (1 << 23)
+
+#define R300_VPI_IN_REG_CLASS_TEMPORARY (0 << 0)
+#define R300_VPI_IN_REG_CLASS_ATTRIBUTE (1 << 0)
+#define R300_VPI_IN_REG_CLASS_PARAMETER (2 << 0)
+#define R300_VPI_IN_REG_CLASS_NONE (9 << 0)
+#define R300_VPI_IN_REG_CLASS_MASK (31 << 0)
+
+#define R300_VPI_IN_REG_INDEX_SHIFT 5
+ /* GUESS based on fglrx native limits */
+#define R300_VPI_IN_REG_INDEX_MASK (255 << 5)
+
+/* The R300 can select components from the input register arbitrarily.
+ * Use the following constants, shifted by the component shift you
+ * want to select
+ */
+#define R300_VPI_IN_SELECT_X 0
+#define R300_VPI_IN_SELECT_Y 1
+#define R300_VPI_IN_SELECT_Z 2
+#define R300_VPI_IN_SELECT_W 3
+#define R300_VPI_IN_SELECT_ZERO 4
+#define R300_VPI_IN_SELECT_ONE 5
+#define R300_VPI_IN_SELECT_MASK 7
+
+#define R300_VPI_IN_X_SHIFT 13
+#define R300_VPI_IN_Y_SHIFT 16
+#define R300_VPI_IN_Z_SHIFT 19
+#define R300_VPI_IN_W_SHIFT 22
+
+#define R300_VPI_IN_NEG_X (1 << 25)
+#define R300_VPI_IN_NEG_Y (1 << 26)
+#define R300_VPI_IN_NEG_Z (1 << 27)
+#define R300_VPI_IN_NEG_W (1 << 28)
+/* END: Vertex program instruction set */
+
+/* BEGIN: Packet 3 commands */
+
+/* A primitive emission dword. */
+#define R300_PRIM_TYPE_NONE (0 << 0)
+#define R300_PRIM_TYPE_POINT (1 << 0)
+#define R300_PRIM_TYPE_LINE (2 << 0)
+#define R300_PRIM_TYPE_LINE_STRIP (3 << 0)
+#define R300_PRIM_TYPE_TRI_LIST (4 << 0)
+#define R300_PRIM_TYPE_TRI_FAN (5 << 0)
+#define R300_PRIM_TYPE_TRI_STRIP (6 << 0)
+#define R300_PRIM_TYPE_TRI_TYPE2 (7 << 0)
+#define R300_PRIM_TYPE_RECT_LIST (8 << 0)
+#define R300_PRIM_TYPE_3VRT_POINT_LIST (9 << 0)
+#define R300_PRIM_TYPE_3VRT_LINE_LIST (10 << 0)
+ /* GUESS (based on r200) */
+#define R300_PRIM_TYPE_POINT_SPRITES (11 << 0)
+#define R300_PRIM_TYPE_LINE_LOOP (12 << 0)
+#define R300_PRIM_TYPE_QUADS (13 << 0)
+#define R300_PRIM_TYPE_QUAD_STRIP (14 << 0)
+#define R300_PRIM_TYPE_POLYGON (15 << 0)
+#define R300_PRIM_TYPE_MASK 0xF
+#define R300_PRIM_WALK_IND (1 << 4)
+#define R300_PRIM_WALK_LIST (2 << 4)
+#define R300_PRIM_WALK_RING (3 << 4)
+#define R300_PRIM_WALK_MASK (3 << 4)
+ /* GUESS (based on r200) */
+#define R300_PRIM_COLOR_ORDER_BGRA (0 << 6)
+#define R300_PRIM_COLOR_ORDER_RGBA (1 << 6)
+#define R300_PRIM_NUM_VERTICES_SHIFT 16
+#define R300_PRIM_NUM_VERTICES_MASK 0xffff
+
+/* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
+ * Two parameter dwords:
+ * 0. The first parameter appears to be always 0
+ * 1. The second parameter is a standard primitive emission dword.
+ */
+#define R300_PACKET3_3D_DRAW_VBUF 0x00002800
+
+/* Specify the full set of vertex arrays as (address, stride).
+ * The first parameter is the number of vertex arrays specified.
+ * The rest of the command is a variable length list of blocks, where
+ * each block is three dwords long and specifies two arrays.
+ * The first dword of a block is split into two words, the lower significant
+ * word refers to the first array, the more significant word to the second
+ * array in the block.
+ * The low byte of each word contains the size of an array entry in dwords,
+ * the high byte contains the stride of the array.
+ * The second dword of a block contains the pointer to the first array,
+ * the third dword of a block contains the pointer to the second array.
+ * Note that if the total number of arrays is odd, the third dword of
+ * the last block is omitted.
+ */
+#define R300_PACKET3_3D_LOAD_VBPNTR 0x00002F00
+
+#define R300_PACKET3_INDX_BUFFER 0x00003300
+# define R300_EB_UNK1_SHIFT 24
+# define R300_EB_UNK1 (0x80<<24)
+# define R300_EB_UNK2 0x0810
+#define R300_PACKET3_3D_DRAW_VBUF_2 0x00003400
+#define R300_PACKET3_3D_DRAW_INDX_2 0x00003600
+
+/* END: Packet 3 commands */
+
+
+/* Color formats for 2d packets
+ */
+#define R300_CP_COLOR_FORMAT_CI8 2
+#define R300_CP_COLOR_FORMAT_ARGB1555 3
+#define R300_CP_COLOR_FORMAT_RGB565 4
+#define R300_CP_COLOR_FORMAT_ARGB8888 6
+#define R300_CP_COLOR_FORMAT_RGB332 7
+#define R300_CP_COLOR_FORMAT_RGB8 9
+#define R300_CP_COLOR_FORMAT_ARGB4444 15
+
+/*
+ * CP type-3 packets
+ */
+#define R300_CP_CMD_BITBLT_MULTI 0xC0009B00
+
+#define R500_VAP_INDEX_OFFSET 0x208c
+
+#define R500_GA_US_VECTOR_INDEX 0x4250
+#define R500_GA_US_VECTOR_DATA 0x4254
+
+#define R500_RS_IP_0 0x4074
+#define R500_RS_INST_0 0x4320
+
+#define R500_US_CONFIG 0x4600
+
+#define R500_US_FC_CTRL 0x4624
+#define R500_US_CODE_ADDR 0x4630
+
+#define R500_RB3D_COLOR_CLEAR_VALUE_AR 0x46c0
+#define R500_RB3D_CONSTANT_COLOR_AR 0x4ef8
+
+#define R300_SU_REG_DEST 0x42c8
+#define RV530_FG_ZBREG_DEST 0x4be8
+#define R300_ZB_ZPASS_DATA 0x4f58
+#define R300_ZB_ZPASS_ADDR 0x4f5c
+
+#endif /* _R300_REG_H */
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
new file mode 100644
index 0000000..ff229a0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -0,0 +1,343 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __R300D_H__
+#define __R300D_H__
+
+#define CP_PACKET0 0x00000000
+#define PACKET0_BASE_INDEX_SHIFT 0
+#define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
+#define PACKET0_COUNT_SHIFT 16
+#define PACKET0_COUNT_MASK (0x3fff << 16)
+#define CP_PACKET1 0x40000000
+#define CP_PACKET2 0x80000000
+#define PACKET2_PAD_SHIFT 0
+#define PACKET2_PAD_MASK (0x3fffffff << 0)
+#define CP_PACKET3 0xC0000000
+#define PACKET3_IT_OPCODE_SHIFT 8
+#define PACKET3_IT_OPCODE_MASK (0xff << 8)
+#define PACKET3_COUNT_SHIFT 16
+#define PACKET3_COUNT_MASK (0x3fff << 16)
+/* PACKET3 op code */
+#define PACKET3_NOP 0x10
+#define PACKET3_3D_DRAW_VBUF 0x28
+#define PACKET3_3D_DRAW_IMMD 0x29
+#define PACKET3_3D_DRAW_INDX 0x2A
+#define PACKET3_3D_LOAD_VBPNTR 0x2F
+#define PACKET3_3D_CLEAR_ZMASK 0x32
+#define PACKET3_INDX_BUFFER 0x33
+#define PACKET3_3D_DRAW_VBUF_2 0x34
+#define PACKET3_3D_DRAW_IMMD_2 0x35
+#define PACKET3_3D_DRAW_INDX_2 0x36
+#define PACKET3_3D_CLEAR_HIZ 0x37
+#define PACKET3_3D_CLEAR_CMASK 0x38
+#define PACKET3_BITBLT_MULTI 0x9B
+
+#define PACKET0(reg, n) (CP_PACKET0 | \
+ REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) | \
+ REG_SET(PACKET0_COUNT, (n)))
+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define PACKET3(op, n) (CP_PACKET3 | \
+ REG_SET(PACKET3_IT_OPCODE, (op)) | \
+ REG_SET(PACKET3_COUNT, (n)))
+
+/* Registers */
+#define R_000148_MC_FB_LOCATION 0x000148
+#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000148_MC_FB_START 0xFFFF0000
+#define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_000148_MC_FB_TOP 0x0000FFFF
+#define R_00014C_MC_AGP_LOCATION 0x00014C
+#define S_00014C_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
+#define G_00014C_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
+#define C_00014C_MC_AGP_START 0xFFFF0000
+#define S_00014C_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_00014C_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_00014C_MC_AGP_TOP 0x0000FFFF
+#define R_00015C_AGP_BASE_2 0x00015C
+#define S_00015C_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
+#define G_00015C_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
+#define C_00015C_AGP_BASE_ADDR_2 0xFFFFFFF0
+#define R_000170_AGP_BASE 0x000170
+#define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_000170_AGP_BASE_ADDR 0x00000000
+#define R_0007C0_CP_STAT 0x0007C0
+#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
+#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
+#define C_0007C0_MRU_BUSY 0xFFFFFFFE
+#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
+#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
+#define C_0007C0_MWU_BUSY 0xFFFFFFFD
+#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
+#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
+#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
+#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
+#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
+#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
+#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
+#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
+#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
+#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
+#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
+#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
+#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
+#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
+#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
+#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
+#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
+#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
+#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
+#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
+#define C_0007C0_CSI_BUSY 0xFFFFDFFF
+#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
+#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
+#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
+#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
+#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
+#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
+#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
+#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
+#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
+#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
+#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
+#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
+#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
+#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
+#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
+#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
+#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
+#define C_0007C0_CP_BUSY 0x7FFFFFFF
+#define R_000E40_RBBM_STATUS 0x000E40
+#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
+#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
+#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
+#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
+#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
+#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
+#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
+#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
+#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
+#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
+#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
+#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
+#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
+#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
+#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
+#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
+#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
+#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
+#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
+#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
+#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
+#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
+#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
+#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
+#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
+#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
+#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
+#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
+#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
+#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
+#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
+#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
+#define C_000E40_E2_BUSY 0xFFFDFFFF
+#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
+#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
+#define C_000E40_RB2D_BUSY 0xFFFBFFFF
+#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
+#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
+#define C_000E40_RB3D_BUSY 0xFFF7FFFF
+#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
+#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
+#define C_000E40_VAP_BUSY 0xFFEFFFFF
+#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
+#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
+#define C_000E40_RE_BUSY 0xFFDFFFFF
+#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
+#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
+#define C_000E40_TAM_BUSY 0xFFBFFFFF
+#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
+#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
+#define C_000E40_TDM_BUSY 0xFF7FFFFF
+#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
+#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
+#define C_000E40_PB_BUSY 0xFEFFFFFF
+#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
+#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
+#define C_000E40_TIM_BUSY 0xFDFFFFFF
+#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
+#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
+#define C_000E40_GA_BUSY 0xFBFFFFFF
+#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
+#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
+#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
+#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
+#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
+#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
+
+#define R_00000D_SCLK_CNTL 0x00000D
+#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
+#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7)
+#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8
+#define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3)
+#define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1)
+#define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7
+#define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4)
+#define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1)
+#define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF
+#define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5)
+#define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1)
+#define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF
+#define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6)
+#define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1)
+#define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF
+#define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7)
+#define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1)
+#define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F
+#define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8)
+#define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1)
+#define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF
+#define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9)
+#define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1)
+#define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF
+#define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10)
+#define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1)
+#define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF
+#define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11)
+#define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1)
+#define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF
+#define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12)
+#define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1)
+#define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF
+#define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13)
+#define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1)
+#define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF
+#define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14)
+#define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1)
+#define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF
+#define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15)
+#define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1)
+#define C_00000D_FORCE_DISP2 0xFFFF7FFF
+#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16)
+#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1)
+#define C_00000D_FORCE_CP 0xFFFEFFFF
+#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17)
+#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1)
+#define C_00000D_FORCE_HDP 0xFFFDFFFF
+#define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18)
+#define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1)
+#define C_00000D_FORCE_DISP1 0xFFFBFFFF
+#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19)
+#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1)
+#define C_00000D_FORCE_TOP 0xFFF7FFFF
+#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
+#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
+#define C_00000D_FORCE_E2 0xFFEFFFFF
+#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21)
+#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1)
+#define C_00000D_FORCE_SE 0xFFDFFFFF
+#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
+#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
+#define C_00000D_FORCE_IDCT 0xFFBFFFFF
+#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23)
+#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1)
+#define C_00000D_FORCE_VIP 0xFF7FFFFF
+#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
+#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
+#define C_00000D_FORCE_RE 0xFEFFFFFF
+#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25)
+#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1)
+#define C_00000D_FORCE_PB 0xFDFFFFFF
+#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26)
+#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1)
+#define C_00000D_FORCE_TAM 0xFBFFFFFF
+#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27)
+#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1)
+#define C_00000D_FORCE_TDM 0xF7FFFFFF
+#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28)
+#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
+#define C_00000D_FORCE_RB 0xEFFFFFFF
+#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29)
+#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1)
+#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF
+#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30)
+#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1)
+#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF
+#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31)
+#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1)
+#define C_00000D_FORCE_OV0 0x7FFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
new file mode 100644
index 0000000..4e796ec
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -0,0 +1,491 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "r100d.h"
+#include "r420d.h"
+#include "r420_reg_safe.h"
+
+void r420_pm_init_profile(struct radeon_device *rdev)
+{
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
+static void r420_set_reg_safe(struct radeon_device *rdev)
+{
+ rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
+ rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
+}
+
+void r420_pipes_init(struct radeon_device *rdev)
+{
+ unsigned tmp;
+ unsigned gb_pipe_select;
+ unsigned num_pipes;
+
+ /* GA_ENHANCE workaround TCL deadlock issue */
+ WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL |
+ (1 << 2) | (1 << 3));
+ /* add idle wait as per freedesktop.org bug 24041 */
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+ /* get max number of pipes */
+ gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+ num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
+
+ /* SE chips have 1 pipe */
+ if ((rdev->pdev->device == 0x5e4c) ||
+ (rdev->pdev->device == 0x5e4f))
+ num_pipes = 1;
+
+ rdev->num_gb_pipes = num_pipes;
+ tmp = 0;
+ switch (num_pipes) {
+ default:
+ /* force to 1 pipe */
+ num_pipes = 1;
+ case 1:
+ tmp = (0 << 1);
+ break;
+ case 2:
+ tmp = (3 << 1);
+ break;
+ case 3:
+ tmp = (6 << 1);
+ break;
+ case 4:
+ tmp = (7 << 1);
+ break;
+ }
+ WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
+ /* Sub pixel 1/12 so we can have 4K rendering according to doc */
+ tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING;
+ WREG32(R300_GB_TILE_CONFIG, tmp);
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+
+ tmp = RREG32(R300_DST_PIPE_CONFIG);
+ WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
+
+ WREG32(R300_RB2D_DSTCACHE_MODE,
+ RREG32(R300_RB2D_DSTCACHE_MODE) |
+ R300_DC_AUTOFLUSH_ENABLE |
+ R300_DC_DC_DISABLE_IGNORE_PE);
+
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+
+ if (rdev->family == CHIP_RV530) {
+ tmp = RREG32(RV530_GB_PIPE_SELECT2);
+ if ((tmp & 3) == 3)
+ rdev->num_z_pipes = 2;
+ else
+ rdev->num_z_pipes = 1;
+ } else
+ rdev->num_z_pipes = 1;
+
+ DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n",
+ rdev->num_gb_pipes, rdev->num_z_pipes);
+}
+
+u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
+{
+ u32 r;
+
+ WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
+ r = RREG32(R_0001FC_MC_IND_DATA);
+ return r;
+}
+
+void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+ WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
+ S_0001F8_MC_IND_WR_EN(1));
+ WREG32(R_0001FC_MC_IND_DATA, v);
+}
+
+static void r420_debugfs(struct radeon_device *rdev)
+{
+ if (r100_debugfs_rbbm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+ }
+ if (r420_debugfs_pipes_info_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for pipes !\n");
+ }
+}
+
+static void r420_clock_resume(struct radeon_device *rdev)
+{
+ u32 sclk_cntl;
+
+ if (radeon_dynclks != -1 && radeon_dynclks)
+ radeon_atom_set_clock_gating(rdev, 1);
+ sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL);
+ sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
+ if (rdev->family == CHIP_R420)
+ sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1);
+ WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
+}
+
+static void r420_cp_errata_init(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+ /* RV410 and R420 can lock up if CP DMA to host memory happens
+ * while the 2D engine is busy.
+ *
+ * The proper workaround is to queue a RESYNC at the beginning
+ * of the CP init, apparently.
+ */
+ radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
+ radeon_ring_lock(rdev, ring, 8);
+ radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
+ radeon_ring_write(ring, rdev->config.r300.resync_scratch);
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+}
+
+static void r420_cp_errata_fini(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+ /* Catch the RESYNC we dispatched all the way back,
+ * at the very beginning of the CP init.
+ */
+ radeon_ring_lock(rdev, ring, 8);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_RB3D_DC_FINISH);
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
+}
+
+static int r420_startup(struct radeon_device *rdev)
+{
+ int r;
+
+ /* set common regs */
+ r100_set_common_regs(rdev);
+ /* program mc */
+ r300_mc_program(rdev);
+ /* Resume clock */
+ r420_clock_resume(rdev);
+ /* Initialize GART (initialize after TTM so we can allocate
+ * memory through TTM but finalize after TTM) */
+ if (rdev->flags & RADEON_IS_PCIE) {
+ r = rv370_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ }
+ if (rdev->flags & RADEON_IS_PCI) {
+ r = r100_pci_gart_enable(rdev);
+ if (r)
+ return r;
+ }
+ r420_pipes_init(rdev);
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ /* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
+ r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+ r = r100_cp_init(rdev, 1024 * 1024);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+ return r;
+ }
+ r420_cp_errata_init(rdev);
+
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ return r;
+ }
+
+ return 0;
+}
+
+int r420_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Make sur GART are not working */
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_disable(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_disable(rdev);
+ /* Resume clock before doing reset */
+ r420_clock_resume(rdev);
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_asic_reset(rdev)) {
+ dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* check if cards are posted or not */
+ if (rdev->is_atom_bios) {
+ atom_asic_init(rdev->mode_info.atom_context);
+ } else {
+ radeon_combios_asic_init(rdev->ddev);
+ }
+ /* Resume clock after posting */
+ r420_clock_resume(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
+ r = r420_startup(rdev);
+ if (r) {
+ rdev->accel_working = false;
+ }
+ return r;
+}
+
+int r420_suspend(struct radeon_device *rdev)
+{
+ r420_cp_errata_fini(rdev);
+ r100_cp_disable(rdev);
+ radeon_wb_disable(rdev);
+ r100_irq_disable(rdev);
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_disable(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_disable(rdev);
+ return 0;
+}
+
+void r420_fini(struct radeon_device *rdev)
+{
+ r100_cp_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_gem_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_fini(rdev);
+ radeon_agp_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_bo_fini(rdev);
+ if (rdev->is_atom_bios) {
+ radeon_atombios_fini(rdev);
+ } else {
+ radeon_combios_fini(rdev);
+ }
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
+int r420_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Initialize scratch registers */
+ radeon_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* TODO: disable VGA need to use VGA request */
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
+ /* BIOS*/
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ if (rdev->is_atom_bios) {
+ r = radeon_atombios_init(rdev);
+ if (r) {
+ return r;
+ }
+ } else {
+ r = radeon_combios_init(rdev);
+ if (r) {
+ return r;
+ }
+ }
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_asic_reset(rdev)) {
+ dev_warn(rdev->dev,
+ "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* check if cards are posted or not */
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
+ }
+ /* initialize memory controller */
+ r300_mc_init(rdev);
+ r420_debugfs(rdev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r) {
+ return r;
+ }
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r) {
+ return r;
+ }
+ if (rdev->family == CHIP_R420)
+ r100_enable_bm(rdev);
+
+ if (rdev->flags & RADEON_IS_PCIE) {
+ r = rv370_pcie_gart_init(rdev);
+ if (r)
+ return r;
+ }
+ if (rdev->flags & RADEON_IS_PCI) {
+ r = r100_pci_gart_init(rdev);
+ if (r)
+ return r;
+ }
+ r420_set_reg_safe(rdev);
+
+ rdev->accel_working = true;
+ r = r420_startup(rdev);
+ if (r) {
+ /* Somethings want wront with the accel init stop accel */
+ dev_err(rdev->dev, "Disabling GPU acceleration\n");
+ r100_cp_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_fini(rdev);
+ radeon_agp_fini(rdev);
+ rdev->accel_working = false;
+ }
+ return 0;
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int r420_debugfs_pipes_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = RREG32(R400_GB_PIPE_SELECT);
+ seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
+ tmp = RREG32(R300_GB_TILE_CONFIG);
+ seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
+ tmp = RREG32(R300_DST_PIPE_CONFIG);
+ seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
+ return 0;
+}
+
+static struct drm_info_list r420_pipes_info_list[] = {
+ {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL},
+};
+#endif
+
+int r420_debugfs_pipes_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1);
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/r420d.h b/drivers/gpu/drm/radeon/r420d.h
new file mode 100644
index 0000000..fc78d31
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r420d.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef R420D_H
+#define R420D_H
+
+#define R_0001F8_MC_IND_INDEX 0x0001F8
+#define S_0001F8_MC_IND_ADDR(x) (((x) & 0x7F) << 0)
+#define G_0001F8_MC_IND_ADDR(x) (((x) >> 0) & 0x7F)
+#define C_0001F8_MC_IND_ADDR 0xFFFFFF80
+#define S_0001F8_MC_IND_WR_EN(x) (((x) & 0x1) << 8)
+#define G_0001F8_MC_IND_WR_EN(x) (((x) >> 8) & 0x1)
+#define C_0001F8_MC_IND_WR_EN 0xFFFFFEFF
+#define R_0001FC_MC_IND_DATA 0x0001FC
+#define S_0001FC_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_0001FC_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_0001FC_MC_IND_DATA 0x00000000
+#define R_0007C0_CP_STAT 0x0007C0
+#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
+#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
+#define C_0007C0_MRU_BUSY 0xFFFFFFFE
+#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
+#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
+#define C_0007C0_MWU_BUSY 0xFFFFFFFD
+#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
+#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
+#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
+#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
+#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
+#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
+#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
+#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
+#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
+#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
+#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
+#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
+#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
+#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
+#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
+#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
+#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
+#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
+#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
+#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
+#define C_0007C0_CSI_BUSY 0xFFFFDFFF
+#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
+#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
+#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
+#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
+#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
+#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
+#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
+#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
+#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
+#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
+#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
+#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
+#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
+#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
+#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
+#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
+#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
+#define C_0007C0_CP_BUSY 0x7FFFFFFF
+#define R_000E40_RBBM_STATUS 0x000E40
+#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
+#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
+#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
+#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
+#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
+#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
+#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
+#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
+#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
+#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
+#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
+#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
+#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
+#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
+#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
+#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
+#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
+#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
+#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
+#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
+#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
+#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
+#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
+#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
+#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
+#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
+#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
+#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
+#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
+#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
+#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
+#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
+#define C_000E40_E2_BUSY 0xFFFDFFFF
+#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
+#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
+#define C_000E40_RB2D_BUSY 0xFFFBFFFF
+#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
+#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
+#define C_000E40_RB3D_BUSY 0xFFF7FFFF
+#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
+#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
+#define C_000E40_VAP_BUSY 0xFFEFFFFF
+#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
+#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
+#define C_000E40_RE_BUSY 0xFFDFFFFF
+#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
+#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
+#define C_000E40_TAM_BUSY 0xFFBFFFFF
+#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
+#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
+#define C_000E40_TDM_BUSY 0xFF7FFFFF
+#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
+#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
+#define C_000E40_PB_BUSY 0xFEFFFFFF
+#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
+#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
+#define C_000E40_TIM_BUSY 0xFDFFFFFF
+#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
+#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
+#define C_000E40_GA_BUSY 0xFBFFFFFF
+#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
+#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
+#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
+#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
+#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
+#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
+
+/* CLK registers */
+#define R_00000D_SCLK_CNTL 0x00000D
+#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
+#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7)
+#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8
+#define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3)
+#define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1)
+#define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7
+#define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4)
+#define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1)
+#define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF
+#define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5)
+#define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1)
+#define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF
+#define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6)
+#define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1)
+#define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF
+#define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7)
+#define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1)
+#define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F
+#define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8)
+#define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1)
+#define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF
+#define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9)
+#define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1)
+#define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF
+#define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10)
+#define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1)
+#define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF
+#define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11)
+#define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1)
+#define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF
+#define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12)
+#define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1)
+#define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF
+#define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13)
+#define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1)
+#define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF
+#define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14)
+#define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1)
+#define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF
+#define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15)
+#define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1)
+#define C_00000D_FORCE_DISP2 0xFFFF7FFF
+#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16)
+#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1)
+#define C_00000D_FORCE_CP 0xFFFEFFFF
+#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17)
+#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1)
+#define C_00000D_FORCE_HDP 0xFFFDFFFF
+#define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18)
+#define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1)
+#define C_00000D_FORCE_DISP1 0xFFFBFFFF
+#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19)
+#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1)
+#define C_00000D_FORCE_TOP 0xFFF7FFFF
+#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
+#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
+#define C_00000D_FORCE_E2 0xFFEFFFFF
+#define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21)
+#define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1)
+#define C_00000D_FORCE_VAP 0xFFDFFFFF
+#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
+#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
+#define C_00000D_FORCE_IDCT 0xFFBFFFFF
+#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23)
+#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1)
+#define C_00000D_FORCE_VIP 0xFF7FFFFF
+#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
+#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
+#define C_00000D_FORCE_RE 0xFEFFFFFF
+#define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25)
+#define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1)
+#define C_00000D_FORCE_SR 0xFDFFFFFF
+#define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26)
+#define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1)
+#define C_00000D_FORCE_PX 0xFBFFFFFF
+#define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27)
+#define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1)
+#define C_00000D_FORCE_TX 0xF7FFFFFF
+#define S_00000D_FORCE_US(x) (((x) & 0x1) << 28)
+#define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1)
+#define C_00000D_FORCE_US 0xEFFFFFFF
+#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29)
+#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1)
+#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF
+#define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30)
+#define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1)
+#define C_00000D_FORCE_SU 0xBFFFFFFF
+#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31)
+#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1)
+#define C_00000D_FORCE_OV0 0x7FFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
new file mode 100644
index 0000000..1dd0d32
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -0,0 +1,800 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __R500_REG_H__
+#define __R500_REG_H__
+
+/* pipe config regs */
+#define R300_GA_POLY_MODE 0x4288
+# define R300_FRONT_PTYPE_POINT (0 << 4)
+# define R300_FRONT_PTYPE_LINE (1 << 4)
+# define R300_FRONT_PTYPE_TRIANGE (2 << 4)
+# define R300_BACK_PTYPE_POINT (0 << 7)
+# define R300_BACK_PTYPE_LINE (1 << 7)
+# define R300_BACK_PTYPE_TRIANGE (2 << 7)
+#define R300_GA_ROUND_MODE 0x428c
+# define R300_GEOMETRY_ROUND_TRUNC (0 << 0)
+# define R300_GEOMETRY_ROUND_NEAREST (1 << 0)
+# define R300_COLOR_ROUND_TRUNC (0 << 2)
+# define R300_COLOR_ROUND_NEAREST (1 << 2)
+#define R300_GB_MSPOS0 0x4010
+# define R300_MS_X0_SHIFT 0
+# define R300_MS_Y0_SHIFT 4
+# define R300_MS_X1_SHIFT 8
+# define R300_MS_Y1_SHIFT 12
+# define R300_MS_X2_SHIFT 16
+# define R300_MS_Y2_SHIFT 20
+# define R300_MSBD0_Y_SHIFT 24
+# define R300_MSBD0_X_SHIFT 28
+#define R300_GB_MSPOS1 0x4014
+# define R300_MS_X3_SHIFT 0
+# define R300_MS_Y3_SHIFT 4
+# define R300_MS_X4_SHIFT 8
+# define R300_MS_Y4_SHIFT 12
+# define R300_MS_X5_SHIFT 16
+# define R300_MS_Y5_SHIFT 20
+# define R300_MSBD1_SHIFT 24
+
+#define R300_GA_ENHANCE 0x4274
+# define R300_GA_DEADLOCK_CNTL (1 << 0)
+# define R300_GA_FASTSYNC_CNTL (1 << 1)
+#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
+# define R300_RB3D_DC_FLUSH (2 << 0)
+# define R300_RB3D_DC_FREE (2 << 2)
+# define R300_RB3D_DC_FINISH (1 << 4)
+#define R300_RB3D_ZCACHE_CTLSTAT 0x4f18
+# define R300_ZC_FLUSH (1 << 0)
+# define R300_ZC_FREE (1 << 1)
+# define R300_ZC_FLUSH_ALL 0x3
+#define R400_GB_PIPE_SELECT 0x402c
+#define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */
+#define R500_SU_REG_DEST 0x42c8
+#define R300_GB_TILE_CONFIG 0x4018
+# define R300_ENABLE_TILING (1 << 0)
+# define R300_PIPE_COUNT_RV350 (0 << 1)
+# define R300_PIPE_COUNT_R300 (3 << 1)
+# define R300_PIPE_COUNT_R420_3P (6 << 1)
+# define R300_PIPE_COUNT_R420 (7 << 1)
+# define R300_TILE_SIZE_8 (0 << 4)
+# define R300_TILE_SIZE_16 (1 << 4)
+# define R300_TILE_SIZE_32 (2 << 4)
+# define R300_SUBPIXEL_1_12 (0 << 16)
+# define R300_SUBPIXEL_1_16 (1 << 16)
+#define R300_DST_PIPE_CONFIG 0x170c
+# define R300_PIPE_AUTO_CONFIG (1 << 31)
+#define R300_RB2D_DSTCACHE_MODE 0x3428
+# define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
+# define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
+
+#define RADEON_CP_STAT 0x7C0
+#define RADEON_RBBM_CMDFIFO_ADDR 0xE70
+#define RADEON_RBBM_CMDFIFO_DATA 0xE74
+#define RADEON_ISYNC_CNTL 0x1724
+# define RADEON_ISYNC_ANY2D_IDLE3D (1 << 0)
+# define RADEON_ISYNC_ANY3D_IDLE2D (1 << 1)
+# define RADEON_ISYNC_TRIG2D_IDLE3D (1 << 2)
+# define RADEON_ISYNC_TRIG3D_IDLE2D (1 << 3)
+# define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4)
+# define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5)
+
+#define RS480_NB_MC_INDEX 0x168
+# define RS480_NB_MC_IND_WR_EN (1 << 8)
+#define RS480_NB_MC_DATA 0x16c
+
+/*
+ * RS690
+ */
+#define RS690_MCCFG_FB_LOCATION 0x100
+#define RS690_MC_FB_START_MASK 0x0000FFFF
+#define RS690_MC_FB_START_SHIFT 0
+#define RS690_MC_FB_TOP_MASK 0xFFFF0000
+#define RS690_MC_FB_TOP_SHIFT 16
+#define RS690_MCCFG_AGP_LOCATION 0x101
+#define RS690_MC_AGP_START_MASK 0x0000FFFF
+#define RS690_MC_AGP_START_SHIFT 0
+#define RS690_MC_AGP_TOP_MASK 0xFFFF0000
+#define RS690_MC_AGP_TOP_SHIFT 16
+#define RS690_MCCFG_AGP_BASE 0x102
+#define RS690_MCCFG_AGP_BASE_2 0x103
+#define RS690_MC_INIT_MISC_LAT_TIMER 0x104
+#define RS690_HDP_FB_LOCATION 0x0134
+#define RS690_MC_INDEX 0x78
+# define RS690_MC_INDEX_MASK 0x1ff
+# define RS690_MC_INDEX_WR_EN (1 << 9)
+# define RS690_MC_INDEX_WR_ACK 0x7f
+#define RS690_MC_DATA 0x7c
+#define RS690_MC_STATUS 0x90
+#define RS690_MC_STATUS_IDLE (1 << 0)
+#define RS480_AGP_BASE_2 0x0164
+#define RS480_MC_MISC_CNTL 0x18
+# define RS480_DISABLE_GTW (1 << 1)
+# define RS480_GART_INDEX_REG_EN (1 << 12)
+# define RS690_BLOCK_GFX_D3_EN (1 << 14)
+#define RS480_GART_FEATURE_ID 0x2b
+# define RS480_HANG_EN (1 << 11)
+# define RS480_TLB_ENABLE (1 << 18)
+# define RS480_P2P_ENABLE (1 << 19)
+# define RS480_GTW_LAC_EN (1 << 25)
+# define RS480_2LEVEL_GART (0 << 30)
+# define RS480_1LEVEL_GART (1 << 30)
+# define RS480_PDC_EN (1 << 31)
+#define RS480_GART_BASE 0x2c
+#define RS480_GART_CACHE_CNTRL 0x2e
+# define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
+#define RS480_AGP_ADDRESS_SPACE_SIZE 0x38
+# define RS480_GART_EN (1 << 0)
+# define RS480_VA_SIZE_32MB (0 << 1)
+# define RS480_VA_SIZE_64MB (1 << 1)
+# define RS480_VA_SIZE_128MB (2 << 1)
+# define RS480_VA_SIZE_256MB (3 << 1)
+# define RS480_VA_SIZE_512MB (4 << 1)
+# define RS480_VA_SIZE_1GB (5 << 1)
+# define RS480_VA_SIZE_2GB (6 << 1)
+#define RS480_AGP_MODE_CNTL 0x39
+# define RS480_POST_GART_Q_SIZE (1 << 18)
+# define RS480_NONGART_SNOOP (1 << 19)
+# define RS480_AGP_RD_BUF_SIZE (1 << 20)
+# define RS480_REQ_TYPE_SNOOP_SHIFT 22
+# define RS480_REQ_TYPE_SNOOP_MASK 0x3
+# define RS480_REQ_TYPE_SNOOP_DIS (1 << 24)
+
+#define RS690_AIC_CTRL_SCRATCH 0x3A
+# define RS690_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1)
+
+/*
+ * RS600
+ */
+#define RS600_MC_STATUS 0x0
+#define RS600_MC_STATUS_IDLE (1 << 0)
+#define RS600_MC_INDEX 0x70
+# define RS600_MC_ADDR_MASK 0xffff
+# define RS600_MC_IND_SEQ_RBS_0 (1 << 16)
+# define RS600_MC_IND_SEQ_RBS_1 (1 << 17)
+# define RS600_MC_IND_SEQ_RBS_2 (1 << 18)
+# define RS600_MC_IND_SEQ_RBS_3 (1 << 19)
+# define RS600_MC_IND_AIC_RBS (1 << 20)
+# define RS600_MC_IND_CITF_ARB0 (1 << 21)
+# define RS600_MC_IND_CITF_ARB1 (1 << 22)
+# define RS600_MC_IND_WR_EN (1 << 23)
+#define RS600_MC_DATA 0x74
+#define RS600_MC_STATUS 0x0
+# define RS600_MC_IDLE (1 << 1)
+#define RS600_MC_FB_LOCATION 0x4
+#define RS600_MC_FB_START_MASK 0x0000FFFF
+#define RS600_MC_FB_START_SHIFT 0
+#define RS600_MC_FB_TOP_MASK 0xFFFF0000
+#define RS600_MC_FB_TOP_SHIFT 16
+#define RS600_MC_AGP_LOCATION 0x5
+#define RS600_MC_AGP_START_MASK 0x0000FFFF
+#define RS600_MC_AGP_START_SHIFT 0
+#define RS600_MC_AGP_TOP_MASK 0xFFFF0000
+#define RS600_MC_AGP_TOP_SHIFT 16
+#define RS600_MC_AGP_BASE 0x6
+#define RS600_MC_AGP_BASE_2 0x7
+#define RS600_MC_CNTL1 0x9
+# define RS600_ENABLE_PAGE_TABLES (1 << 26)
+#define RS600_MC_PT0_CNTL 0x100
+# define RS600_ENABLE_PT (1 << 0)
+# define RS600_EFFECTIVE_L2_CACHE_SIZE(x) ((x) << 15)
+# define RS600_EFFECTIVE_L2_QUEUE_SIZE(x) ((x) << 21)
+# define RS600_INVALIDATE_ALL_L1_TLBS (1 << 28)
+# define RS600_INVALIDATE_L2_CACHE (1 << 29)
+#define RS600_MC_PT0_CONTEXT0_CNTL 0x102
+# define RS600_ENABLE_PAGE_TABLE (1 << 0)
+# define RS600_PAGE_TABLE_TYPE_FLAT (0 << 1)
+#define RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x112
+#define RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x114
+#define RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x11c
+#define RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x12c
+#define RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x13c
+#define RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x14c
+#define RS600_MC_PT0_CLIENT0_CNTL 0x16c
+# define RS600_ENABLE_TRANSLATION_MODE_OVERRIDE (1 << 0)
+# define RS600_TRANSLATION_MODE_OVERRIDE (1 << 1)
+# define RS600_SYSTEM_ACCESS_MODE_MASK (3 << 8)
+# define RS600_SYSTEM_ACCESS_MODE_PA_ONLY (0 << 8)
+# define RS600_SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 8)
+# define RS600_SYSTEM_ACCESS_MODE_IN_SYS (2 << 8)
+# define RS600_SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 8)
+# define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH (0 << 10)
+# define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 10)
+# define RS600_EFFECTIVE_L1_CACHE_SIZE(x) ((x) << 11)
+# define RS600_ENABLE_FRAGMENT_PROCESSING (1 << 14)
+# define RS600_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15)
+# define RS600_INVALIDATE_L1_TLB (1 << 20)
+/* rs600/rs690/rs740 */
+# define RS600_BUS_MASTER_DIS (1 << 14)
+# define RS600_MSI_REARM (1 << 20)
+/* see RS400_MSI_REARM in AIC_CNTL for rs480 */
+
+
+
+#define RV515_MC_FB_LOCATION 0x01
+#define RV515_MC_FB_START_MASK 0x0000FFFF
+#define RV515_MC_FB_START_SHIFT 0
+#define RV515_MC_FB_TOP_MASK 0xFFFF0000
+#define RV515_MC_FB_TOP_SHIFT 16
+#define RV515_MC_AGP_LOCATION 0x02
+#define RV515_MC_AGP_START_MASK 0x0000FFFF
+#define RV515_MC_AGP_START_SHIFT 0
+#define RV515_MC_AGP_TOP_MASK 0xFFFF0000
+#define RV515_MC_AGP_TOP_SHIFT 16
+#define RV515_MC_AGP_BASE 0x03
+#define RV515_MC_AGP_BASE_2 0x04
+
+#define R520_MC_FB_LOCATION 0x04
+#define R520_MC_FB_START_MASK 0x0000FFFF
+#define R520_MC_FB_START_SHIFT 0
+#define R520_MC_FB_TOP_MASK 0xFFFF0000
+#define R520_MC_FB_TOP_SHIFT 16
+#define R520_MC_AGP_LOCATION 0x05
+#define R520_MC_AGP_START_MASK 0x0000FFFF
+#define R520_MC_AGP_START_SHIFT 0
+#define R520_MC_AGP_TOP_MASK 0xFFFF0000
+#define R520_MC_AGP_TOP_SHIFT 16
+#define R520_MC_AGP_BASE 0x06
+#define R520_MC_AGP_BASE_2 0x07
+
+
+#define AVIVO_MC_INDEX 0x0070
+#define R520_MC_STATUS 0x00
+#define R520_MC_STATUS_IDLE (1<<1)
+#define RV515_MC_STATUS 0x08
+#define RV515_MC_STATUS_IDLE (1<<4)
+#define RV515_MC_INIT_MISC_LAT_TIMER 0x09
+#define AVIVO_MC_DATA 0x0074
+
+#define R520_MC_IND_INDEX 0x70
+#define R520_MC_IND_WR_EN (1 << 24)
+#define R520_MC_IND_DATA 0x74
+
+#define RV515_MC_CNTL 0x5
+# define RV515_MEM_NUM_CHANNELS_MASK 0x3
+#define R520_MC_CNTL0 0x8
+# define R520_MEM_NUM_CHANNELS_MASK (0x3 << 24)
+# define R520_MEM_NUM_CHANNELS_SHIFT 24
+# define R520_MC_CHANNEL_SIZE (1 << 23)
+
+#define AVIVO_CP_DYN_CNTL 0x000f /* PLL */
+# define AVIVO_CP_FORCEON (1 << 0)
+#define AVIVO_E2_DYN_CNTL 0x0011 /* PLL */
+# define AVIVO_E2_FORCEON (1 << 0)
+#define AVIVO_IDCT_DYN_CNTL 0x0013 /* PLL */
+# define AVIVO_IDCT_FORCEON (1 << 0)
+
+#define AVIVO_HDP_FB_LOCATION 0x134
+
+#define AVIVO_VGA_RENDER_CONTROL 0x0300
+# define AVIVO_VGA_VSTATUS_CNTL_MASK (3 << 16)
+#define AVIVO_D1VGA_CONTROL 0x0330
+# define AVIVO_DVGA_CONTROL_MODE_ENABLE (1<<0)
+# define AVIVO_DVGA_CONTROL_TIMING_SELECT (1<<8)
+# define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1<<9)
+# define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1<<10)
+# define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1<<16)
+# define AVIVO_DVGA_CONTROL_ROTATE (1<<24)
+#define AVIVO_D2VGA_CONTROL 0x0338
+
+#define AVIVO_EXT1_PPLL_REF_DIV_SRC 0x400
+#define AVIVO_EXT1_PPLL_REF_DIV 0x404
+#define AVIVO_EXT1_PPLL_UPDATE_LOCK 0x408
+#define AVIVO_EXT1_PPLL_UPDATE_CNTL 0x40c
+
+#define AVIVO_EXT2_PPLL_REF_DIV_SRC 0x410
+#define AVIVO_EXT2_PPLL_REF_DIV 0x414
+#define AVIVO_EXT2_PPLL_UPDATE_LOCK 0x418
+#define AVIVO_EXT2_PPLL_UPDATE_CNTL 0x41c
+
+#define AVIVO_EXT1_PPLL_FB_DIV 0x430
+#define AVIVO_EXT2_PPLL_FB_DIV 0x434
+
+#define AVIVO_EXT1_PPLL_POST_DIV_SRC 0x438
+#define AVIVO_EXT1_PPLL_POST_DIV 0x43c
+
+#define AVIVO_EXT2_PPLL_POST_DIV_SRC 0x440
+#define AVIVO_EXT2_PPLL_POST_DIV 0x444
+
+#define AVIVO_EXT1_PPLL_CNTL 0x448
+#define AVIVO_EXT2_PPLL_CNTL 0x44c
+
+#define AVIVO_P1PLL_CNTL 0x450
+#define AVIVO_P2PLL_CNTL 0x454
+#define AVIVO_P1PLL_INT_SS_CNTL 0x458
+#define AVIVO_P2PLL_INT_SS_CNTL 0x45c
+#define AVIVO_P1PLL_TMDSA_CNTL 0x460
+#define AVIVO_P2PLL_LVTMA_CNTL 0x464
+
+#define AVIVO_PCLK_CRTC1_CNTL 0x480
+#define AVIVO_PCLK_CRTC2_CNTL 0x484
+
+#define AVIVO_D1CRTC_H_TOTAL 0x6000
+#define AVIVO_D1CRTC_H_BLANK_START_END 0x6004
+#define AVIVO_D1CRTC_H_SYNC_A 0x6008
+#define AVIVO_D1CRTC_H_SYNC_A_CNTL 0x600c
+#define AVIVO_D1CRTC_H_SYNC_B 0x6010
+#define AVIVO_D1CRTC_H_SYNC_B_CNTL 0x6014
+
+#define AVIVO_D1CRTC_V_TOTAL 0x6020
+#define AVIVO_D1CRTC_V_BLANK_START_END 0x6024
+#define AVIVO_D1CRTC_V_SYNC_A 0x6028
+#define AVIVO_D1CRTC_V_SYNC_A_CNTL 0x602c
+#define AVIVO_D1CRTC_V_SYNC_B 0x6030
+#define AVIVO_D1CRTC_V_SYNC_B_CNTL 0x6034
+
+#define AVIVO_D1CRTC_CONTROL 0x6080
+# define AVIVO_CRTC_EN (1 << 0)
+# define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
+#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
+#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
+#define AVIVO_D1CRTC_STATUS 0x609c
+# define AVIVO_D1CRTC_V_BLANK (1 << 0)
+#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
+#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
+#define AVIVO_D1CRTC_STATUS_HV_COUNT 0x60ac
+#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
+
+#define AVIVO_D1MODE_MASTER_UPDATE_LOCK 0x60e0
+#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
+#define AVIVO_D1CRTC_UPDATE_LOCK 0x60e8
+
+/* master controls */
+#define AVIVO_DC_CRTC_MASTER_EN 0x60f8
+#define AVIVO_DC_CRTC_TV_CONTROL 0x60fc
+
+#define AVIVO_D1GRPH_ENABLE 0x6100
+#define AVIVO_D1GRPH_CONTROL 0x6104
+# define AVIVO_D1GRPH_CONTROL_DEPTH_8BPP (0 << 0)
+# define AVIVO_D1GRPH_CONTROL_DEPTH_16BPP (1 << 0)
+# define AVIVO_D1GRPH_CONTROL_DEPTH_32BPP (2 << 0)
+# define AVIVO_D1GRPH_CONTROL_DEPTH_64BPP (3 << 0)
+
+# define AVIVO_D1GRPH_CONTROL_8BPP_INDEXED (0 << 8)
+
+# define AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555 (0 << 8)
+# define AVIVO_D1GRPH_CONTROL_16BPP_RGB565 (1 << 8)
+# define AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444 (2 << 8)
+# define AVIVO_D1GRPH_CONTROL_16BPP_AI88 (3 << 8)
+# define AVIVO_D1GRPH_CONTROL_16BPP_MONO16 (4 << 8)
+
+# define AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888 (0 << 8)
+# define AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010 (1 << 8)
+# define AVIVO_D1GRPH_CONTROL_32BPP_DIGITAL (2 << 8)
+# define AVIVO_D1GRPH_CONTROL_32BPP_8B_ARGB2101010 (3 << 8)
+
+
+# define AVIVO_D1GRPH_CONTROL_64BPP_ARGB16161616 (0 << 8)
+
+# define AVIVO_D1GRPH_SWAP_RB (1 << 16)
+# define AVIVO_D1GRPH_TILED (1 << 20)
+# define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21)
+
+# define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL (0 << 20)
+# define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED (1 << 20)
+# define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1 (2 << 20)
+# define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1 (4 << 20)
+
+/* The R7xx *_HIGH surface regs are backwards; the D1 regs are in the D2
+ * block and vice versa. This applies to GRPH, CUR, etc.
+ */
+#define AVIVO_D1GRPH_LUT_SEL 0x6108
+#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
+#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
+#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
+#define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
+#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c
+#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c
+#define AVIVO_D1GRPH_PITCH 0x6120
+#define AVIVO_D1GRPH_SURFACE_OFFSET_X 0x6124
+#define AVIVO_D1GRPH_SURFACE_OFFSET_Y 0x6128
+#define AVIVO_D1GRPH_X_START 0x612c
+#define AVIVO_D1GRPH_Y_START 0x6130
+#define AVIVO_D1GRPH_X_END 0x6134
+#define AVIVO_D1GRPH_Y_END 0x6138
+#define AVIVO_D1GRPH_UPDATE 0x6144
+# define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING (1 << 2)
+# define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16)
+#define AVIVO_D1GRPH_FLIP_CONTROL 0x6148
+# define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
+
+#define AVIVO_D1CUR_CONTROL 0x6400
+# define AVIVO_D1CURSOR_EN (1 << 0)
+# define AVIVO_D1CURSOR_MODE_SHIFT 8
+# define AVIVO_D1CURSOR_MODE_MASK (3 << 8)
+# define AVIVO_D1CURSOR_MODE_24BPP 2
+#define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408
+#define R700_D1CUR_SURFACE_ADDRESS_HIGH 0x6c0c
+#define R700_D2CUR_SURFACE_ADDRESS_HIGH 0x640c
+#define AVIVO_D1CUR_SIZE 0x6410
+#define AVIVO_D1CUR_POSITION 0x6414
+#define AVIVO_D1CUR_HOT_SPOT 0x6418
+#define AVIVO_D1CUR_UPDATE 0x6424
+# define AVIVO_D1CURSOR_UPDATE_LOCK (1 << 16)
+
+#define AVIVO_DC_LUT_RW_SELECT 0x6480
+#define AVIVO_DC_LUT_RW_MODE 0x6484
+#define AVIVO_DC_LUT_RW_INDEX 0x6488
+#define AVIVO_DC_LUT_SEQ_COLOR 0x648c
+#define AVIVO_DC_LUT_PWL_DATA 0x6490
+#define AVIVO_DC_LUT_30_COLOR 0x6494
+#define AVIVO_DC_LUT_READ_PIPE_SELECT 0x6498
+#define AVIVO_DC_LUT_WRITE_EN_MASK 0x649c
+#define AVIVO_DC_LUT_AUTOFILL 0x64a0
+
+#define AVIVO_DC_LUTA_CONTROL 0x64c0
+#define AVIVO_DC_LUTA_BLACK_OFFSET_BLUE 0x64c4
+#define AVIVO_DC_LUTA_BLACK_OFFSET_GREEN 0x64c8
+#define AVIVO_DC_LUTA_BLACK_OFFSET_RED 0x64cc
+#define AVIVO_DC_LUTA_WHITE_OFFSET_BLUE 0x64d0
+#define AVIVO_DC_LUTA_WHITE_OFFSET_GREEN 0x64d4
+#define AVIVO_DC_LUTA_WHITE_OFFSET_RED 0x64d8
+
+#define AVIVO_DC_LB_MEMORY_SPLIT 0x6520
+# define AVIVO_DC_LB_MEMORY_SPLIT_MASK 0x3
+# define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT 0
+# define AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
+# define AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
+# define AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY 2
+# define AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
+# define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
+# define AVIVO_DC_LB_DISP1_END_ADR_SHIFT 4
+# define AVIVO_DC_LB_DISP1_END_ADR_MASK 0x7ff
+
+#define AVIVO_D1MODE_DATA_FORMAT 0x6528
+# define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0)
+#define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C
+#define AVIVO_D1MODE_VBLANK_STATUS 0x6534
+# define AVIVO_VBLANK_ACK (1 << 4)
+#define AVIVO_D1MODE_VLINE_START_END 0x6538
+#define AVIVO_D1MODE_VLINE_STATUS 0x653c
+# define AVIVO_D1MODE_VLINE_STAT (1 << 12)
+#define AVIVO_DxMODE_INT_MASK 0x6540
+# define AVIVO_D1MODE_INT_MASK (1 << 0)
+# define AVIVO_D2MODE_INT_MASK (1 << 8)
+#define AVIVO_D1MODE_VIEWPORT_START 0x6580
+#define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584
+#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588
+#define AVIVO_D1MODE_EXT_OVERSCAN_TOP_BOTTOM 0x658c
+
+#define AVIVO_D1SCL_SCALER_ENABLE 0x6590
+#define AVIVO_D1SCL_SCALER_TAP_CONTROL 0x6594
+#define AVIVO_D1SCL_UPDATE 0x65cc
+# define AVIVO_D1SCL_UPDATE_LOCK (1 << 16)
+
+/* second crtc */
+#define AVIVO_D2CRTC_H_TOTAL 0x6800
+#define AVIVO_D2CRTC_H_BLANK_START_END 0x6804
+#define AVIVO_D2CRTC_H_SYNC_A 0x6808
+#define AVIVO_D2CRTC_H_SYNC_A_CNTL 0x680c
+#define AVIVO_D2CRTC_H_SYNC_B 0x6810
+#define AVIVO_D2CRTC_H_SYNC_B_CNTL 0x6814
+
+#define AVIVO_D2CRTC_V_TOTAL 0x6820
+#define AVIVO_D2CRTC_V_BLANK_START_END 0x6824
+#define AVIVO_D2CRTC_V_SYNC_A 0x6828
+#define AVIVO_D2CRTC_V_SYNC_A_CNTL 0x682c
+#define AVIVO_D2CRTC_V_SYNC_B 0x6830
+#define AVIVO_D2CRTC_V_SYNC_B_CNTL 0x6834
+
+#define AVIVO_D2CRTC_CONTROL 0x6880
+#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884
+#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888
+#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c
+#define AVIVO_D2CRTC_STATUS_POSITION 0x68a0
+#define AVIVO_D2CRTC_FRAME_COUNT 0x68a4
+#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4
+
+#define AVIVO_D2GRPH_ENABLE 0x6900
+#define AVIVO_D2GRPH_CONTROL 0x6904
+#define AVIVO_D2GRPH_LUT_SEL 0x6908
+#define AVIVO_D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910
+#define AVIVO_D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918
+#define AVIVO_D2GRPH_PITCH 0x6920
+#define AVIVO_D2GRPH_SURFACE_OFFSET_X 0x6924
+#define AVIVO_D2GRPH_SURFACE_OFFSET_Y 0x6928
+#define AVIVO_D2GRPH_X_START 0x692c
+#define AVIVO_D2GRPH_Y_START 0x6930
+#define AVIVO_D2GRPH_X_END 0x6934
+#define AVIVO_D2GRPH_Y_END 0x6938
+#define AVIVO_D2GRPH_UPDATE 0x6944
+#define AVIVO_D2GRPH_FLIP_CONTROL 0x6948
+
+#define AVIVO_D2CUR_CONTROL 0x6c00
+#define AVIVO_D2CUR_SURFACE_ADDRESS 0x6c08
+#define AVIVO_D2CUR_SIZE 0x6c10
+#define AVIVO_D2CUR_POSITION 0x6c14
+
+#define AVIVO_D2MODE_VBLANK_STATUS 0x6d34
+#define AVIVO_D2MODE_VLINE_START_END 0x6d38
+#define AVIVO_D2MODE_VLINE_STATUS 0x6d3c
+#define AVIVO_D2MODE_VIEWPORT_START 0x6d80
+#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84
+#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88
+#define AVIVO_D2MODE_EXT_OVERSCAN_TOP_BOTTOM 0x6d8c
+
+#define AVIVO_D2SCL_SCALER_ENABLE 0x6d90
+#define AVIVO_D2SCL_SCALER_TAP_CONTROL 0x6d94
+
+#define AVIVO_DDIA_BIT_DEPTH_CONTROL 0x7214
+
+#define AVIVO_DACA_ENABLE 0x7800
+# define AVIVO_DAC_ENABLE (1 << 0)
+#define AVIVO_DACA_SOURCE_SELECT 0x7804
+# define AVIVO_DAC_SOURCE_CRTC1 (0 << 0)
+# define AVIVO_DAC_SOURCE_CRTC2 (1 << 0)
+# define AVIVO_DAC_SOURCE_TV (2 << 0)
+
+#define AVIVO_DACA_FORCE_OUTPUT_CNTL 0x783c
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_FORCE_DATA_EN (1 << 0)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT (8)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE (1 << 0)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN (1 << 1)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_RED (1 << 2)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY (1 << 24)
+#define AVIVO_DACA_POWERDOWN 0x7850
+# define AVIVO_DACA_POWERDOWN_POWERDOWN (1 << 0)
+# define AVIVO_DACA_POWERDOWN_BLUE (1 << 8)
+# define AVIVO_DACA_POWERDOWN_GREEN (1 << 16)
+# define AVIVO_DACA_POWERDOWN_RED (1 << 24)
+
+#define AVIVO_DACB_ENABLE 0x7a00
+#define AVIVO_DACB_SOURCE_SELECT 0x7a04
+#define AVIVO_DACB_FORCE_OUTPUT_CNTL 0x7a3c
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_FORCE_DATA_EN (1 << 0)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT (8)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE (1 << 0)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN (1 << 1)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_RED (1 << 2)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY (1 << 24)
+#define AVIVO_DACB_POWERDOWN 0x7a50
+# define AVIVO_DACB_POWERDOWN_POWERDOWN (1 << 0)
+# define AVIVO_DACB_POWERDOWN_BLUE (1 << 8)
+# define AVIVO_DACB_POWERDOWN_GREEN (1 << 16)
+# define AVIVO_DACB_POWERDOWN_RED
+
+#define AVIVO_TMDSA_CNTL 0x7880
+# define AVIVO_TMDSA_CNTL_ENABLE (1 << 0)
+# define AVIVO_TMDSA_CNTL_HDMI_EN (1 << 2)
+# define AVIVO_TMDSA_CNTL_HPD_MASK (1 << 4)
+# define AVIVO_TMDSA_CNTL_HPD_SELECT (1 << 8)
+# define AVIVO_TMDSA_CNTL_SYNC_PHASE (1 << 12)
+# define AVIVO_TMDSA_CNTL_PIXEL_ENCODING (1 << 16)
+# define AVIVO_TMDSA_CNTL_DUAL_LINK_ENABLE (1 << 24)
+# define AVIVO_TMDSA_CNTL_SWAP (1 << 28)
+#define AVIVO_TMDSA_SOURCE_SELECT 0x7884
+/* 78a8 appears to be some kind of (reasonably tolerant) clock?
+ * 78d0 definitely hits the transmitter, definitely clock. */
+/* MYSTERY1 This appears to control dithering? */
+#define AVIVO_TMDSA_BIT_DEPTH_CONTROL 0x7894
+# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN (1 << 0)
+# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH (1 << 4)
+# define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN (1 << 8)
+# define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH (1 << 12)
+# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN (1 << 16)
+# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
+# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL (1 << 24)
+# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
+#define AVIVO_TMDSA_DCBALANCER_CONTROL 0x78d0
+# define AVIVO_TMDSA_DCBALANCER_CONTROL_EN (1 << 0)
+# define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_EN (1 << 8)
+# define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_IN_SHIFT (16)
+# define AVIVO_TMDSA_DCBALANCER_CONTROL_FORCE (1 << 24)
+#define AVIVO_TMDSA_DATA_SYNCHRONIZATION 0x78d8
+# define AVIVO_TMDSA_DATA_SYNCHRONIZATION_DSYNSEL (1 << 0)
+# define AVIVO_TMDSA_DATA_SYNCHRONIZATION_PFREQCHG (1 << 8)
+#define AVIVO_TMDSA_CLOCK_ENABLE 0x7900
+#define AVIVO_TMDSA_TRANSMITTER_ENABLE 0x7904
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX0_ENABLE (1 << 0)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKC0EN (1 << 1)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD00EN (1 << 2)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD01EN (1 << 3)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD02EN (1 << 4)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX1_ENABLE (1 << 8)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD10EN (1 << 10)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD11EN (1 << 11)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD12EN (1 << 12)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX_ENABLE_HPD_MASK (1 << 16)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK (1 << 17)
+# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK (1 << 18)
+
+#define AVIVO_TMDSA_TRANSMITTER_CONTROL 0x7910
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_ENABLE (1 << 0)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_RESET (1 << 1)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_IDSCKSEL (1 << 4)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_BGSLEEP (1 << 5)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN (1 << 6)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK (1 << 8)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS (1 << 13)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK (1 << 14)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS (1 << 15)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_BYPASS_PLL (1 << 28)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_USE_CLK_DATA (1 << 29)
+# define AVIVO_TMDSA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1 << 31)
+
+#define AVIVO_LVTMA_CNTL 0x7a80
+# define AVIVO_LVTMA_CNTL_ENABLE (1 << 0)
+# define AVIVO_LVTMA_CNTL_HDMI_EN (1 << 2)
+# define AVIVO_LVTMA_CNTL_HPD_MASK (1 << 4)
+# define AVIVO_LVTMA_CNTL_HPD_SELECT (1 << 8)
+# define AVIVO_LVTMA_CNTL_SYNC_PHASE (1 << 12)
+# define AVIVO_LVTMA_CNTL_PIXEL_ENCODING (1 << 16)
+# define AVIVO_LVTMA_CNTL_DUAL_LINK_ENABLE (1 << 24)
+# define AVIVO_LVTMA_CNTL_SWAP (1 << 28)
+#define AVIVO_LVTMA_SOURCE_SELECT 0x7a84
+#define AVIVO_LVTMA_COLOR_FORMAT 0x7a88
+#define AVIVO_LVTMA_BIT_DEPTH_CONTROL 0x7a94
+# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN (1 << 0)
+# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH (1 << 4)
+# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN (1 << 8)
+# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH (1 << 12)
+# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN (1 << 16)
+# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
+# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL (1 << 24)
+# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
+
+
+
+#define AVIVO_LVTMA_DCBALANCER_CONTROL 0x7ad0
+# define AVIVO_LVTMA_DCBALANCER_CONTROL_EN (1 << 0)
+# define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_EN (1 << 8)
+# define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_IN_SHIFT (16)
+# define AVIVO_LVTMA_DCBALANCER_CONTROL_FORCE (1 << 24)
+
+#define AVIVO_LVTMA_DATA_SYNCHRONIZATION 0x78d8
+# define AVIVO_LVTMA_DATA_SYNCHRONIZATION_DSYNSEL (1 << 0)
+# define AVIVO_LVTMA_DATA_SYNCHRONIZATION_PFREQCHG (1 << 8)
+#define R500_LVTMA_CLOCK_ENABLE 0x7b00
+#define R600_LVTMA_CLOCK_ENABLE 0x7b04
+
+#define R500_LVTMA_TRANSMITTER_ENABLE 0x7b04
+#define R600_LVTMA_TRANSMITTER_ENABLE 0x7b08
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC0EN (1 << 1)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD00EN (1 << 2)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD01EN (1 << 3)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD02EN (1 << 4)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD03EN (1 << 5)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC1EN (1 << 9)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD10EN (1 << 10)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD11EN (1 << 11)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD12EN (1 << 12)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK (1 << 17)
+# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK (1 << 18)
+
+#define R500_LVTMA_TRANSMITTER_CONTROL 0x7b10
+#define R600_LVTMA_TRANSMITTER_CONTROL 0x7b14
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_ENABLE (1 << 0)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_RESET (1 << 1)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_IDSCKSEL (1 << 4)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_BGSLEEP (1 << 5)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN (1 << 6)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK (1 << 8)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS (1 << 13)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK (1 << 14)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS (1 << 15)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_BYPASS_PLL (1 << 28)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_USE_CLK_DATA (1 << 29)
+# define AVIVO_LVTMA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1 << 31)
+
+#define R500_LVTMA_PWRSEQ_CNTL 0x7af0
+#define R600_LVTMA_PWRSEQ_CNTL 0x7af4
+# define AVIVO_LVTMA_PWRSEQ_EN (1 << 0)
+# define AVIVO_LVTMA_PWRSEQ_PLL_ENABLE_MASK (1 << 2)
+# define AVIVO_LVTMA_PWRSEQ_PLL_RESET_MASK (1 << 3)
+# define AVIVO_LVTMA_PWRSEQ_TARGET_STATE (1 << 4)
+# define AVIVO_LVTMA_SYNCEN (1 << 8)
+# define AVIVO_LVTMA_SYNCEN_OVRD (1 << 9)
+# define AVIVO_LVTMA_SYNCEN_POL (1 << 10)
+# define AVIVO_LVTMA_DIGON (1 << 16)
+# define AVIVO_LVTMA_DIGON_OVRD (1 << 17)
+# define AVIVO_LVTMA_DIGON_POL (1 << 18)
+# define AVIVO_LVTMA_BLON (1 << 24)
+# define AVIVO_LVTMA_BLON_OVRD (1 << 25)
+# define AVIVO_LVTMA_BLON_POL (1 << 26)
+
+#define R500_LVTMA_PWRSEQ_STATE 0x7af4
+#define R600_LVTMA_PWRSEQ_STATE 0x7af8
+# define AVIVO_LVTMA_PWRSEQ_STATE_TARGET_STATE_R (1 << 0)
+# define AVIVO_LVTMA_PWRSEQ_STATE_DIGON (1 << 1)
+# define AVIVO_LVTMA_PWRSEQ_STATE_SYNCEN (1 << 2)
+# define AVIVO_LVTMA_PWRSEQ_STATE_BLON (1 << 3)
+# define AVIVO_LVTMA_PWRSEQ_STATE_DONE (1 << 4)
+# define AVIVO_LVTMA_PWRSEQ_STATE_STATUS_SHIFT (8)
+
+#define AVIVO_LVDS_BACKLIGHT_CNTL 0x7af8
+# define AVIVO_LVDS_BACKLIGHT_CNTL_EN (1 << 0)
+# define AVIVO_LVDS_BACKLIGHT_LEVEL_MASK 0x0000ff00
+# define AVIVO_LVDS_BACKLIGHT_LEVEL_SHIFT 8
+
+#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
+
+#define AVIVO_DC_GPIO_HPD_A 0x7e94
+#define AVIVO_DC_GPIO_HPD_Y 0x7e9c
+
+#define AVIVO_DC_I2C_STATUS1 0x7d30
+# define AVIVO_DC_I2C_DONE (1 << 0)
+# define AVIVO_DC_I2C_NACK (1 << 1)
+# define AVIVO_DC_I2C_HALT (1 << 2)
+# define AVIVO_DC_I2C_GO (1 << 3)
+#define AVIVO_DC_I2C_RESET 0x7d34
+# define AVIVO_DC_I2C_SOFT_RESET (1 << 0)
+# define AVIVO_DC_I2C_ABORT (1 << 8)
+#define AVIVO_DC_I2C_CONTROL1 0x7d38
+# define AVIVO_DC_I2C_START (1 << 0)
+# define AVIVO_DC_I2C_STOP (1 << 1)
+# define AVIVO_DC_I2C_RECEIVE (1 << 2)
+# define AVIVO_DC_I2C_EN (1 << 8)
+# define AVIVO_DC_I2C_PIN_SELECT(x) ((x) << 16)
+# define AVIVO_SEL_DDC1 0
+# define AVIVO_SEL_DDC2 1
+# define AVIVO_SEL_DDC3 2
+#define AVIVO_DC_I2C_CONTROL2 0x7d3c
+# define AVIVO_DC_I2C_ADDR_COUNT(x) ((x) << 0)
+# define AVIVO_DC_I2C_DATA_COUNT(x) ((x) << 8)
+#define AVIVO_DC_I2C_CONTROL3 0x7d40
+# define AVIVO_DC_I2C_DATA_DRIVE_EN (1 << 0)
+# define AVIVO_DC_I2C_DATA_DRIVE_SEL (1 << 1)
+# define AVIVO_DC_I2C_CLK_DRIVE_EN (1 << 7)
+# define AVIVO_DC_I2C_RD_INTRA_BYTE_DELAY(x) ((x) << 8)
+# define AVIVO_DC_I2C_WR_INTRA_BYTE_DELAY(x) ((x) << 16)
+# define AVIVO_DC_I2C_TIME_LIMIT(x) ((x) << 24)
+#define AVIVO_DC_I2C_DATA 0x7d44
+#define AVIVO_DC_I2C_INTERRUPT_CONTROL 0x7d48
+# define AVIVO_DC_I2C_INTERRUPT_STATUS (1 << 0)
+# define AVIVO_DC_I2C_INTERRUPT_AK (1 << 8)
+# define AVIVO_DC_I2C_INTERRUPT_ENABLE (1 << 16)
+#define AVIVO_DC_I2C_ARBITRATION 0x7d50
+# define AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C (1 << 0)
+# define AVIVO_DC_I2C_SW_CAN_USE_I2C (1 << 1)
+# define AVIVO_DC_I2C_SW_DONE_USING_I2C (1 << 8)
+# define AVIVO_DC_I2C_HW_NEEDS_I2C (1 << 9)
+# define AVIVO_DC_I2C_ABORT_HDCP_I2C (1 << 16)
+# define AVIVO_DC_I2C_HW_USING_I2C (1 << 17)
+
+#define AVIVO_DC_GPIO_DDC1_MASK 0x7e40
+#define AVIVO_DC_GPIO_DDC1_A 0x7e44
+#define AVIVO_DC_GPIO_DDC1_EN 0x7e48
+#define AVIVO_DC_GPIO_DDC1_Y 0x7e4c
+
+#define AVIVO_DC_GPIO_DDC2_MASK 0x7e50
+#define AVIVO_DC_GPIO_DDC2_A 0x7e54
+#define AVIVO_DC_GPIO_DDC2_EN 0x7e58
+#define AVIVO_DC_GPIO_DDC2_Y 0x7e5c
+
+#define AVIVO_DC_GPIO_DDC3_MASK 0x7e60
+#define AVIVO_DC_GPIO_DDC3_A 0x7e64
+#define AVIVO_DC_GPIO_DDC3_EN 0x7e68
+#define AVIVO_DC_GPIO_DDC3_Y 0x7e6c
+
+#define AVIVO_DISP_INTERRUPT_STATUS 0x7edc
+# define AVIVO_D1_VBLANK_INTERRUPT (1 << 4)
+# define AVIVO_D2_VBLANK_INTERRUPT (1 << 5)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
new file mode 100644
index 0000000..e1aece7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "r520d.h"
+
+/* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */
+
+int r520_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32_MC(R520_MC_STATUS);
+ if (tmp & R520_MC_STATUS_IDLE) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
+static void r520_gpu_init(struct radeon_device *rdev)
+{
+ unsigned pipe_select_current, gb_pipe_select, tmp;
+
+ rv515_vga_render_disable(rdev);
+ /*
+ * DST_PIPE_CONFIG 0x170C
+ * GB_TILE_CONFIG 0x4018
+ * GB_FIFO_SIZE 0x4024
+ * GB_PIPE_SELECT 0x402C
+ * GB_PIPE_SELECT2 0x4124
+ * Z_PIPE_SHIFT 0
+ * Z_PIPE_MASK 0x000000003
+ * GB_FIFO_SIZE2 0x4128
+ * SC_SFIFO_SIZE_SHIFT 0
+ * SC_SFIFO_SIZE_MASK 0x000000003
+ * SC_MFIFO_SIZE_SHIFT 2
+ * SC_MFIFO_SIZE_MASK 0x00000000C
+ * FG_SFIFO_SIZE_SHIFT 4
+ * FG_SFIFO_SIZE_MASK 0x000000030
+ * ZB_MFIFO_SIZE_SHIFT 6
+ * ZB_MFIFO_SIZE_MASK 0x0000000C0
+ * GA_ENHANCE 0x4274
+ * SU_REG_DEST 0x42C8
+ */
+ /* workaround for RV530 */
+ if (rdev->family == CHIP_RV530) {
+ WREG32(0x4128, 0xFF);
+ }
+ r420_pipes_init(rdev);
+ gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+ tmp = RREG32(R300_DST_PIPE_CONFIG);
+ pipe_select_current = (tmp >> 2) & 3;
+ tmp = (1 << pipe_select_current) |
+ (((gb_pipe_select >> 8) & 0xF) << 4);
+ WREG32_PLL(0x000D, tmp);
+ if (r520_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+}
+
+static void r520_vram_get_type(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ rdev->mc.vram_width = 128;
+ rdev->mc.vram_is_ddr = true;
+ tmp = RREG32_MC(R520_MC_CNTL0);
+ switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) {
+ case 0:
+ rdev->mc.vram_width = 32;
+ break;
+ case 1:
+ rdev->mc.vram_width = 64;
+ break;
+ case 2:
+ rdev->mc.vram_width = 128;
+ break;
+ case 3:
+ rdev->mc.vram_width = 256;
+ break;
+ default:
+ rdev->mc.vram_width = 128;
+ break;
+ }
+ if (tmp & R520_MC_CHANNEL_SIZE)
+ rdev->mc.vram_width *= 2;
+}
+
+static void r520_mc_init(struct radeon_device *rdev)
+{
+
+ r520_vram_get_type(rdev);
+ r100_vram_init_sizes(rdev);
+ radeon_vram_location(rdev, &rdev->mc, 0);
+ rdev->mc.gtt_base_align = 0;
+ if (!(rdev->flags & RADEON_IS_AGP))
+ radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
+}
+
+static void r520_mc_program(struct radeon_device *rdev)
+{
+ struct rv515_mc_save save;
+
+ /* Stops all mc clients */
+ rv515_mc_stop(rdev, &save);
+
+ /* Wait for mc idle */
+ if (r520_mc_wait_for_idle(rdev))
+ dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+ /* Write VRAM size in case we are limiting it */
+ WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+ /* Program MC, should be a 32bits limited address space */
+ WREG32_MC(R_000004_MC_FB_LOCATION,
+ S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
+ S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
+ WREG32(R_000134_HDP_FB_LOCATION,
+ S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+ if (rdev->flags & RADEON_IS_AGP) {
+ WREG32_MC(R_000005_MC_AGP_LOCATION,
+ S_000005_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+ S_000005_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+ WREG32_MC(R_000006_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+ WREG32_MC(R_000007_AGP_BASE_2,
+ S_000007_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
+ } else {
+ WREG32_MC(R_000005_MC_AGP_LOCATION, 0xFFFFFFFF);
+ WREG32_MC(R_000006_AGP_BASE, 0);
+ WREG32_MC(R_000007_AGP_BASE_2, 0);
+ }
+
+ rv515_mc_resume(rdev, &save);
+}
+
+static int r520_startup(struct radeon_device *rdev)
+{
+ int r;
+
+ r520_mc_program(rdev);
+ /* Resume clock */
+ rv515_clock_startup(rdev);
+ /* Initialize GPU configuration (# pipes, ...) */
+ r520_gpu_init(rdev);
+ /* Initialize GART (initialize after TTM so we can allocate
+ * memory through TTM but finalize after TTM) */
+ if (rdev->flags & RADEON_IS_PCIE) {
+ r = rv370_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ }
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ /* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
+ rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+ r = r100_cp_init(rdev, 1024 * 1024);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ return r;
+ }
+
+ return 0;
+}
+
+int r520_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Make sur GART are not working */
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_disable(rdev);
+ /* Resume clock before doing reset */
+ rv515_clock_startup(rdev);
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_asic_reset(rdev)) {
+ dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* post */
+ atom_asic_init(rdev->mode_info.atom_context);
+ /* Resume clock after posting */
+ rv515_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
+ r = r520_startup(rdev);
+ if (r) {
+ rdev->accel_working = false;
+ }
+ return r;
+}
+
+int r520_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Initialize scratch registers */
+ radeon_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
+ /* TODO: disable VGA need to use VGA request */
+ /* BIOS*/
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ if (rdev->is_atom_bios) {
+ r = radeon_atombios_init(rdev);
+ if (r)
+ return r;
+ } else {
+ dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
+ return -EINVAL;
+ }
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_asic_reset(rdev)) {
+ dev_warn(rdev->dev,
+ "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* check if cards are posted or not */
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
+ if (!radeon_card_posted(rdev) && rdev->bios) {
+ DRM_INFO("GPU not posted. posting now...\n");
+ atom_asic_init(rdev->mode_info.atom_context);
+ }
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
+ }
+ /* initialize memory controller */
+ r520_mc_init(rdev);
+ rv515_debugfs(rdev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+ r = rv370_pcie_gart_init(rdev);
+ if (r)
+ return r;
+ rv515_set_safe_registers(rdev);
+
+ rdev->accel_working = true;
+ r = r520_startup(rdev);
+ if (r) {
+ /* Somethings want wront with the accel init stop accel */
+ dev_err(rdev->dev, "Disabling GPU acceleration\n");
+ r100_cp_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ rv370_pcie_gart_fini(rdev);
+ radeon_agp_fini(rdev);
+ rdev->accel_working = false;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/r520d.h b/drivers/gpu/drm/radeon/r520d.h
new file mode 100644
index 0000000..61af61f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r520d.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __R520D_H__
+#define __R520D_H__
+
+/* Registers */
+#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
+#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_0000F8_CONFIG_MEMSIZE 0x00000000
+#define R_000134_HDP_FB_LOCATION 0x000134
+#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000134_HDP_FB_START 0xFFFF0000
+#define R_0007C0_CP_STAT 0x0007C0
+#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
+#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
+#define C_0007C0_MRU_BUSY 0xFFFFFFFE
+#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
+#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
+#define C_0007C0_MWU_BUSY 0xFFFFFFFD
+#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
+#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
+#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
+#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
+#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
+#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
+#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
+#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
+#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
+#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
+#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
+#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
+#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
+#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
+#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
+#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
+#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
+#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
+#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
+#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
+#define C_0007C0_CSI_BUSY 0xFFFFDFFF
+#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
+#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
+#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
+#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
+#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
+#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
+#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
+#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
+#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
+#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
+#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
+#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
+#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
+#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
+#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
+#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
+#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
+#define C_0007C0_CP_BUSY 0x7FFFFFFF
+#define R_000E40_RBBM_STATUS 0x000E40
+#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
+#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
+#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
+#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
+#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
+#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
+#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
+#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
+#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
+#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
+#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
+#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
+#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
+#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
+#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
+#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
+#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
+#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
+#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
+#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
+#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
+#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
+#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
+#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
+#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
+#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
+#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
+#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
+#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
+#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
+#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
+#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
+#define C_000E40_E2_BUSY 0xFFFDFFFF
+#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
+#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
+#define C_000E40_RB2D_BUSY 0xFFFBFFFF
+#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
+#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
+#define C_000E40_RB3D_BUSY 0xFFF7FFFF
+#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
+#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
+#define C_000E40_VAP_BUSY 0xFFEFFFFF
+#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
+#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
+#define C_000E40_RE_BUSY 0xFFDFFFFF
+#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
+#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
+#define C_000E40_TAM_BUSY 0xFFBFFFFF
+#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
+#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
+#define C_000E40_TDM_BUSY 0xFF7FFFFF
+#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
+#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
+#define C_000E40_PB_BUSY 0xFEFFFFFF
+#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
+#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
+#define C_000E40_TIM_BUSY 0xFDFFFFFF
+#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
+#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
+#define C_000E40_GA_BUSY 0xFBFFFFFF
+#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
+#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
+#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
+#define S_000E40_RBBM_HIBUSY(x) (((x) & 0x1) << 28)
+#define G_000E40_RBBM_HIBUSY(x) (((x) >> 28) & 0x1)
+#define C_000E40_RBBM_HIBUSY 0xEFFFFFFF
+#define S_000E40_SKID_CFBUSY(x) (((x) & 0x1) << 29)
+#define G_000E40_SKID_CFBUSY(x) (((x) >> 29) & 0x1)
+#define C_000E40_SKID_CFBUSY 0xDFFFFFFF
+#define S_000E40_VAP_VF_BUSY(x) (((x) & 0x1) << 30)
+#define G_000E40_VAP_VF_BUSY(x) (((x) >> 30) & 0x1)
+#define C_000E40_VAP_VF_BUSY 0xBFFFFFFF
+#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
+#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
+#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
+
+
+#define R_000004_MC_FB_LOCATION 0x000004
+#define S_000004_MC_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_000004_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000004_MC_FB_START 0xFFFF0000
+#define S_000004_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_000004_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_000004_MC_FB_TOP 0x0000FFFF
+#define R_000005_MC_AGP_LOCATION 0x000005
+#define S_000005_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
+#define G_000005_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000005_MC_AGP_START 0xFFFF0000
+#define S_000005_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_000005_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_000005_MC_AGP_TOP 0x0000FFFF
+#define R_000006_AGP_BASE 0x000006
+#define S_000006_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_000006_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_000006_AGP_BASE_ADDR 0x00000000
+#define R_000007_AGP_BASE_2 0x000007
+#define S_000007_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
+#define G_000007_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
+#define C_000007_AGP_BASE_ADDR_2 0xFFFFFFF0
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
new file mode 100644
index 0000000..f19620b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -0,0 +1,4833 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_mode.h"
+#include "r600d.h"
+#include "atom.h"
+#include "avivod.h"
+
+#define PFP_UCODE_SIZE 576
+#define PM4_UCODE_SIZE 1792
+#define RLC_UCODE_SIZE 768
+#define R700_PFP_UCODE_SIZE 848
+#define R700_PM4_UCODE_SIZE 1360
+#define R700_RLC_UCODE_SIZE 1024
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
+#define CAYMAN_RLC_UCODE_SIZE 1024
+#define ARUBA_RLC_UCODE_SIZE 1536
+
+/* Firmware Names */
+MODULE_FIRMWARE("radeon/R600_pfp.bin");
+MODULE_FIRMWARE("radeon/R600_me.bin");
+MODULE_FIRMWARE("radeon/RV610_pfp.bin");
+MODULE_FIRMWARE("radeon/RV610_me.bin");
+MODULE_FIRMWARE("radeon/RV630_pfp.bin");
+MODULE_FIRMWARE("radeon/RV630_me.bin");
+MODULE_FIRMWARE("radeon/RV620_pfp.bin");
+MODULE_FIRMWARE("radeon/RV620_me.bin");
+MODULE_FIRMWARE("radeon/RV635_pfp.bin");
+MODULE_FIRMWARE("radeon/RV635_me.bin");
+MODULE_FIRMWARE("radeon/RV670_pfp.bin");
+MODULE_FIRMWARE("radeon/RV670_me.bin");
+MODULE_FIRMWARE("radeon/RS780_pfp.bin");
+MODULE_FIRMWARE("radeon/RS780_me.bin");
+MODULE_FIRMWARE("radeon/RV770_pfp.bin");
+MODULE_FIRMWARE("radeon/RV770_me.bin");
+MODULE_FIRMWARE("radeon/RV730_pfp.bin");
+MODULE_FIRMWARE("radeon/RV730_me.bin");
+MODULE_FIRMWARE("radeon/RV710_pfp.bin");
+MODULE_FIRMWARE("radeon/RV710_me.bin");
+MODULE_FIRMWARE("radeon/R600_rlc.bin");
+MODULE_FIRMWARE("radeon/R700_rlc.bin");
+MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
+MODULE_FIRMWARE("radeon/CEDAR_me.bin");
+MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
+MODULE_FIRMWARE("radeon/PALM_pfp.bin");
+MODULE_FIRMWARE("radeon/PALM_me.bin");
+MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
+MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
+MODULE_FIRMWARE("radeon/SUMO_me.bin");
+MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
+MODULE_FIRMWARE("radeon/SUMO2_me.bin");
+
+static const u32 crtc_offsets[2] =
+{
+ 0,
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
+int r600_debugfs_mc_info_init(struct radeon_device *rdev);
+
+/* r600,rv610,rv630,rv620,rv635,rv670 */
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
+static void r600_gpu_init(struct radeon_device *rdev);
+void r600_fini(struct radeon_device *rdev);
+void r600_irq_disable(struct radeon_device *rdev);
+static void r600_pcie_gen2_enable(struct radeon_device *rdev);
+
+/**
+ * r600_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (r6xx, IGPs, APUs).
+ */
+u32 r600_get_xclk(struct radeon_device *rdev)
+{
+ return rdev->clock.spll.reference_freq;
+}
+
+/* get temperature in millidegrees */
+int rv6xx_get_temp(struct radeon_device *rdev)
+{
+ u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
+ ASIC_T_SHIFT;
+ int actual_temp = temp & 0xff;
+
+ if (temp & 0x100)
+ actual_temp -= 256;
+
+ return actual_temp * 1000;
+}
+
+void r600_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+ int i;
+
+ rdev->pm.dynpm_can_upclock = true;
+ rdev->pm.dynpm_can_downclock = true;
+
+ /* power state array is low to high, default is first */
+ if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
+ int min_power_state_index = 0;
+
+ if (rdev->pm.num_power_states > 2)
+ min_power_state_index = 1;
+
+ switch (rdev->pm.dynpm_planned_action) {
+ case DYNPM_ACTION_MINIMUM:
+ rdev->pm.requested_power_state_index = min_power_state_index;
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ break;
+ case DYNPM_ACTION_DOWNCLOCK:
+ if (rdev->pm.current_power_state_index == min_power_state_index) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_downclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i >= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else {
+ if (rdev->pm.current_power_state_index == 0)
+ rdev->pm.requested_power_state_index =
+ rdev->pm.num_power_states - 1;
+ else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index - 1;
+ }
+ }
+ rdev->pm.requested_clock_mode_index = 0;
+ /* don't use the power state if crtcs are active and no display flag is set */
+ if ((rdev->pm.active_crtc_count > 0) &&
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].flags &
+ RADEON_PM_MODE_NO_DISPLAY)) {
+ rdev->pm.requested_power_state_index++;
+ }
+ break;
+ case DYNPM_ACTION_UPCLOCK:
+ if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_upclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i <= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index + 1;
+ }
+ rdev->pm.requested_clock_mode_index = 0;
+ break;
+ case DYNPM_ACTION_DEFAULT:
+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_upclock = false;
+ break;
+ case DYNPM_ACTION_NONE:
+ default:
+ DRM_ERROR("Requested mode for not defined action\n");
+ return;
+ }
+ } else {
+ /* XXX select a power state based on AC/DC, single/dualhead, etc. */
+ /* for now just select the first power state and switch between clock modes */
+ /* power state array is low to high, default is first (0) */
+ if (rdev->pm.active_crtc_count > 1) {
+ rdev->pm.requested_power_state_index = -1;
+ /* start at 1 as we don't want the default mode */
+ for (i = 1; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
+ (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ /* if nothing selected, grab the default state. */
+ if (rdev->pm.requested_power_state_index == -1)
+ rdev->pm.requested_power_state_index = 0;
+ } else
+ rdev->pm.requested_power_state_index = 1;
+
+ switch (rdev->pm.dynpm_planned_action) {
+ case DYNPM_ACTION_MINIMUM:
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ break;
+ case DYNPM_ACTION_DOWNCLOCK:
+ if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+ if (rdev->pm.current_clock_mode_index == 0) {
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ } else
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.current_clock_mode_index - 1;
+ } else {
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ }
+ /* don't use the power state if crtcs are active and no display flag is set */
+ if ((rdev->pm.active_crtc_count > 0) &&
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].flags &
+ RADEON_PM_MODE_NO_DISPLAY)) {
+ rdev->pm.requested_clock_mode_index++;
+ }
+ break;
+ case DYNPM_ACTION_UPCLOCK:
+ if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+ if (rdev->pm.current_clock_mode_index ==
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
+ rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
+ rdev->pm.dynpm_can_upclock = false;
+ } else
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.current_clock_mode_index + 1;
+ } else {
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
+ rdev->pm.dynpm_can_upclock = false;
+ }
+ break;
+ case DYNPM_ACTION_DEFAULT:
+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_upclock = false;
+ break;
+ case DYNPM_ACTION_NONE:
+ default:
+ DRM_ERROR("Requested mode for not defined action\n");
+ return;
+ }
+ }
+
+ DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].sclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].mclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ pcie_lanes);
+}
+
+void rs780_pm_init_profile(struct radeon_device *rdev)
+{
+ if (rdev->pm.num_power_states == 2) {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ } else if (rdev->pm.num_power_states == 3) {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ } else {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ }
+}
+
+void r600_pm_init_profile(struct radeon_device *rdev)
+{
+ int idx;
+
+ if (rdev->family == CHIP_R600) {
+ /* XXX */
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ } else {
+ if (rdev->pm.num_power_states < 4) {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+ } else {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+ /* low sh */
+ if (rdev->flags & RADEON_IS_MOBILITY)
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ else
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* mid sh */
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+ /* high sh */
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+ /* low mh */
+ if (rdev->flags & RADEON_IS_MOBILITY)
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ else
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* mid mh */
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+ /* high mh */
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+ }
+ }
+}
+
+void r600_pm_misc(struct radeon_device *rdev)
+{
+ int req_ps_idx = rdev->pm.requested_power_state_index;
+ int req_cm_idx = rdev->pm.requested_clock_mode_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+ if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+ /* 0xff01 is a flag rather then an actual voltage */
+ if (voltage->voltage == 0xff01)
+ return;
+ if (voltage->voltage != rdev->pm.current_vddc) {
+ radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
+ rdev->pm.current_vddc = voltage->voltage;
+ DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
+ }
+ }
+}
+
+bool r600_gui_idle(struct radeon_device *rdev)
+{
+ if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
+ return false;
+ else
+ return true;
+}
+
+/* hpd for digital panel detect/disconnect */
+bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+ bool connected = false;
+
+ if (ASIC_IS_DCE3(rdev)) {
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_3:
+ if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_4:
+ if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ /* DCE 3.2 */
+ case RADEON_HPD_5:
+ if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_6:
+ if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_3:
+ if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+ }
+ return connected;
+}
+
+void r600_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = r600_hpd_sense(rdev, hpd);
+
+ if (ASIC_IS_DCE3(rdev)) {
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_3:
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_4:
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_5:
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ break;
+ /* DCE 3.2 */
+ case RADEON_HPD_6:
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ else
+ tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ else
+ tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_3:
+ tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ else
+ tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void r600_hpd_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+ unsigned enable = 0;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ */
+ continue;
+ }
+ if (ASIC_IS_DCE3(rdev)) {
+ u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
+ if (ASIC_IS_DCE32(rdev))
+ tmp |= DC_HPDx_EN;
+
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, tmp);
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, tmp);
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, tmp);
+ break;
+ /* DCE 3.2 */
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, tmp);
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+ break;
+ default:
+ break;
+ }
+ }
+ enable |= 1 << radeon_connector->hpd.hpd;
+ radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+ }
+ radeon_irq_kms_enable_hpd(rdev, enable);
+}
+
+void r600_hpd_fini(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+ unsigned disable = 0;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ if (ASIC_IS_DCE3(rdev)) {
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, 0);
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, 0);
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, 0);
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, 0);
+ break;
+ /* DCE 3.2 */
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, 0);
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, 0);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
+ break;
+ default:
+ break;
+ }
+ }
+ disable |= 1 << radeon_connector->hpd.hpd;
+ }
+ radeon_irq_kms_disable_hpd(rdev, disable);
+}
+
+/*
+ * R600 PCIE GART
+ */
+void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+ unsigned i;
+ u32 tmp;
+
+ /* flush hdp cache so updates hit vram */
+ if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
+ !(rdev->flags & RADEON_IS_AGP)) {
+ void __iomem *ptr = (void *)rdev->gart.ptr;
+ u32 tmp;
+
+ /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
+ * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
+ * This seems to cause problems on some AGP cards. Just use the old
+ * method for them.
+ */
+ WREG32(HDP_DEBUG1, 0);
+ tmp = readl((void __iomem *)ptr);
+ } else
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+ WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
+ WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
+ WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
+ tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
+ if (tmp == 2) {
+ printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
+ return;
+ }
+ if (tmp) {
+ return;
+ }
+ udelay(1);
+ }
+}
+
+int r600_pcie_gart_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->gart.robj) {
+ WARN(1, "R600 PCIE GART already initialized\n");
+ return 0;
+ }
+ /* Initialize common gart structure */
+ r = radeon_gart_init(rdev);
+ if (r)
+ return r;
+ rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
+ return radeon_gart_table_vram_alloc(rdev);
+}
+
+static int r600_pcie_gart_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int r, i;
+
+ if (rdev->gart.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
+ return r;
+ radeon_gart_restore(rdev);
+
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
+ /* Setup TLB control */
+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
+ ENABLE_WAIT_L2_QUERY;
+ WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
+ WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+ WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+ WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(rdev->dummy_page.addr >> 12));
+ for (i = 1; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+ r600_pcie_gart_tlb_flush(rdev);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)rdev->gart.table_addr);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+static void r600_pcie_gart_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int i;
+
+ /* Disable all tables */
+ for (i = 0; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+ /* Disable L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
+ /* Setup L1 TLB control */
+ tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
+ ENABLE_WAIT_L2_QUERY;
+ WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+ radeon_gart_table_vram_unpin(rdev);
+}
+
+static void r600_pcie_gart_fini(struct radeon_device *rdev)
+{
+ radeon_gart_fini(rdev);
+ r600_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+}
+
+static void r600_agp_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int i;
+
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
+ /* Setup TLB control */
+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
+ ENABLE_WAIT_L2_QUERY;
+ WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
+ WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+ WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+ for (i = 0; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+}
+
+int r600_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ u32 tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
+ if (!tmp)
+ return 0;
+ udelay(1);
+ }
+ return -1;
+}
+
+uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t r;
+
+ WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
+ r = RREG32(R_0028FC_MC_DATA);
+ WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
+ return r;
+}
+
+void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
+ S_0028F8_MC_IND_WR_EN(1));
+ WREG32(R_0028FC_MC_DATA, v);
+ WREG32(R_0028F8_MC_INDEX, 0x7F);
+}
+
+static void r600_mc_program(struct radeon_device *rdev)
+{
+ struct rv515_mc_save save;
+ u32 tmp;
+ int i, j;
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+ WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+ rv515_mc_stop(rdev, &save);
+ if (r600_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ /* Lockout access through VGA aperture (doesn't exist before R600) */
+ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+ /* Update configuration */
+ if (rdev->flags & RADEON_IS_AGP) {
+ if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+ /* VRAM before AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.gtt_end >> 12);
+ } else {
+ /* VRAM after AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.gtt_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.vram_end >> 12);
+ }
+ } else {
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
+ }
+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+ tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+ tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+ WREG32(MC_VM_FB_LOCATION, tmp);
+ WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+ WREG32(HDP_NONSURFACE_INFO, (2 << 7));
+ WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+ if (rdev->flags & RADEON_IS_AGP) {
+ WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
+ WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
+ WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
+ } else {
+ WREG32(MC_VM_AGP_BASE, 0);
+ WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+ WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+ }
+ if (r600_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ rv515_mc_resume(rdev, &save);
+ /* we need to own VRAM, so turn off the VGA renderer here
+ * to stop it overwriting our objects */
+ rv515_vga_render_disable(rdev);
+}
+
+/**
+ * r600_vram_gtt_location - try to find VRAM & GTT location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+ * Function will place try to place VRAM at same place as in CPU (PCI)
+ * address space as some GPU seems to have issue when we reprogram at
+ * different address space.
+ *
+ * If there is not enough space to fit the unvisible VRAM after the
+ * aperture then we limit the VRAM size to the aperture.
+ *
+ * If we are using AGP then place VRAM adjacent to AGP aperture are we need
+ * them to be in one from GPU point of view so that we can program GPU to
+ * catch access outside them (weird GPU policy see ??).
+ *
+ * This function will never fails, worst case are limiting VRAM or GTT.
+ *
+ * Note: GTT start, end, size should be initialized before calling this
+ * function on AGP platform.
+ */
+static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+ u64 size_bf, size_af;
+
+ if (mc->mc_vram_size > 0xE0000000) {
+ /* leave room for at least 512M GTT */
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = 0xE0000000;
+ mc->mc_vram_size = 0xE0000000;
+ }
+ if (rdev->flags & RADEON_IS_AGP) {
+ size_bf = mc->gtt_start;
+ size_af = mc->mc_mask - mc->gtt_end;
+ if (size_bf > size_af) {
+ if (mc->mc_vram_size > size_bf) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_bf;
+ mc->mc_vram_size = size_bf;
+ }
+ mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+ } else {
+ if (mc->mc_vram_size > size_af) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_af;
+ mc->mc_vram_size = size_af;
+ }
+ mc->vram_start = mc->gtt_end + 1;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+ } else {
+ u64 base = 0;
+ if (rdev->flags & RADEON_IS_IGP) {
+ base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
+ base <<= 24;
+ }
+ radeon_vram_location(rdev, &rdev->mc, base);
+ rdev->mc.gtt_base_align = 0;
+ radeon_gtt_location(rdev, mc);
+ }
+}
+
+static int r600_mc_init(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int chansize, numchan;
+ uint32_t h_addr, l_addr;
+ unsigned long long k8_addr;
+
+ /* Get VRAM informations */
+ rdev->mc.vram_is_ddr = true;
+ tmp = RREG32(RAMCFG);
+ if (tmp & CHANSIZE_OVERRIDE) {
+ chansize = 16;
+ } else if (tmp & CHANSIZE_MASK) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ }
+ rdev->mc.vram_width = numchan * chansize;
+ /* Could aper size report 0 ? */
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+ /* Setup GPU memory space */
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ r600_vram_gtt_location(rdev, &rdev->mc);
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ rs690_pm_info(rdev);
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+
+ if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
+ /* Use K8 direct mapping for fast fb access. */
+ rdev->fastfb_working = false;
+ h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
+ l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
+ k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
+#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+ if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
+#endif
+ {
+ /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
+ * memory is present.
+ */
+ if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
+ DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
+ (unsigned long long)rdev->mc.aper_base, k8_addr);
+ rdev->mc.aper_base = (resource_size_t)k8_addr;
+ rdev->fastfb_working = true;
+ }
+ }
+ }
+ }
+
+ radeon_update_bandwidth_info(rdev);
+ return 0;
+}
+
+int r600_vram_scratch_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->vram_scratch.robj == NULL) {
+ r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
+ PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ NULL, &rdev->vram_scratch.robj);
+ if (r) {
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->vram_scratch.robj,
+ RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->vram_scratch.robj,
+ (void **)&rdev->vram_scratch.ptr);
+ if (r)
+ radeon_bo_unpin(rdev->vram_scratch.robj);
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+
+ return r;
+}
+
+void r600_vram_scratch_fini(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->vram_scratch.robj == NULL) {
+ return;
+ }
+ r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->vram_scratch.robj);
+ radeon_bo_unpin(rdev->vram_scratch.robj);
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+ }
+ radeon_bo_unref(&rdev->vram_scratch.robj);
+}
+
+void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
+{
+ u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
+
+ if (hung)
+ tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+ else
+ tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+ WREG32(R600_BIOS_3_SCRATCH, tmp);
+}
+
+static void r600_print_gpu_status_regs(struct radeon_device *rdev)
+{
+ dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
+ RREG32(R_008010_GRBM_STATUS));
+ dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
+ RREG32(R_008014_GRBM_STATUS2));
+ dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
+ RREG32(R_000E50_SRBM_STATUS));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+}
+
+static bool r600_is_display_hung(struct radeon_device *rdev)
+{
+ u32 crtc_hung = 0;
+ u32 crtc_status[2];
+ u32 i, j, tmp;
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
+ crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ crtc_hung |= (1 << i);
+ }
+ }
+
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (crtc_hung & (1 << i)) {
+ tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ if (tmp != crtc_status[i])
+ crtc_hung &= ~(1 << i);
+ }
+ }
+ if (crtc_hung == 0)
+ return false;
+ udelay(100);
+ }
+
+ return true;
+}
+
+static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
+{
+ u32 reset_mask = 0;
+ u32 tmp;
+
+ /* GRBM_STATUS */
+ tmp = RREG32(R_008010_GRBM_STATUS);
+ if (rdev->family >= CHIP_RV770) {
+ if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
+ G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
+ G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
+ G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
+ G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
+ reset_mask |= RADEON_RESET_GFX;
+ } else {
+ if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
+ G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
+ G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
+ G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
+ G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
+ reset_mask |= RADEON_RESET_GFX;
+ }
+
+ if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
+ G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
+ reset_mask |= RADEON_RESET_CP;
+
+ if (G_008010_GRBM_EE_BUSY(tmp))
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
+
+ /* DMA_STATUS_REG */
+ tmp = RREG32(DMA_STATUS_REG);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
+
+ /* SRBM_STATUS */
+ tmp = RREG32(R_000E50_SRBM_STATUS);
+ if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
+ reset_mask |= RADEON_RESET_RLC;
+
+ if (G_000E50_IH_BUSY(tmp))
+ reset_mask |= RADEON_RESET_IH;
+
+ if (G_000E50_SEM_BUSY(tmp))
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (G_000E50_GRBM_RQ_PENDING(tmp))
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (G_000E50_VMC_BUSY(tmp))
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
+ G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
+ G_000E50_MCDW_BUSY(tmp))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (r600_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+
+ /* Skip MC reset as it's mostly likely not hung, just busy */
+ if (reset_mask & RADEON_RESET_MC) {
+ DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+ reset_mask &= ~RADEON_RESET_MC;
+ }
+
+ return reset_mask;
+}
+
+static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct rv515_mc_save save;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
+
+ if (reset_mask == 0)
+ return;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ r600_print_gpu_status_regs(rdev);
+
+ /* Disable CP parsing/prefetching */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
+ else
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+
+ /* disable the RLC */
+ WREG32(RLC_CNTL, 0);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+ }
+
+ mdelay(50);
+
+ rv515_mc_stop(rdev, &save);
+ if (r600_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+ if (rdev->family >= CHIP_RV770)
+ grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
+ S_008020_SOFT_RESET_CB(1) |
+ S_008020_SOFT_RESET_PA(1) |
+ S_008020_SOFT_RESET_SC(1) |
+ S_008020_SOFT_RESET_SPI(1) |
+ S_008020_SOFT_RESET_SX(1) |
+ S_008020_SOFT_RESET_SH(1) |
+ S_008020_SOFT_RESET_TC(1) |
+ S_008020_SOFT_RESET_TA(1) |
+ S_008020_SOFT_RESET_VC(1) |
+ S_008020_SOFT_RESET_VGT(1);
+ else
+ grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
+ S_008020_SOFT_RESET_DB(1) |
+ S_008020_SOFT_RESET_CB(1) |
+ S_008020_SOFT_RESET_PA(1) |
+ S_008020_SOFT_RESET_SC(1) |
+ S_008020_SOFT_RESET_SMX(1) |
+ S_008020_SOFT_RESET_SPI(1) |
+ S_008020_SOFT_RESET_SX(1) |
+ S_008020_SOFT_RESET_SH(1) |
+ S_008020_SOFT_RESET_TC(1) |
+ S_008020_SOFT_RESET_TA(1) |
+ S_008020_SOFT_RESET_VC(1) |
+ S_008020_SOFT_RESET_VGT(1);
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
+ S_008020_SOFT_RESET_VGT(1);
+
+ srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
+ }
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ if (rdev->family >= CHIP_RV770)
+ srbm_soft_reset |= RV770_SOFT_RESET_DMA;
+ else
+ srbm_soft_reset |= SOFT_RESET_DMA;
+ }
+
+ if (reset_mask & RADEON_RESET_RLC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
+ }
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
+
+ /* Wait a little for things to settle down */
+ mdelay(1);
+
+ rv515_mc_resume(rdev, &save);
+ udelay(50);
+
+ r600_print_gpu_status_regs(rdev);
+}
+
+int r600_asic_reset(struct radeon_device *rdev)
+{
+ u32 reset_mask;
+
+ reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ r600_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
+ return 0;
+}
+
+/**
+ * r600_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * r600_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & RADEON_RESET_DMA)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+ u32 tiling_pipe_num,
+ u32 max_rb_num,
+ u32 total_max_rb_num,
+ u32 disabled_rb_mask)
+{
+ u32 rendering_pipe_num, rb_num_width, req_rb_num;
+ u32 pipe_rb_ratio, pipe_rb_remain, tmp;
+ u32 data = 0, mask = 1 << (max_rb_num - 1);
+ unsigned i, j;
+
+ /* mask out the RBs that don't exist on that asic */
+ tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
+ /* make sure at least one RB is available */
+ if ((tmp & 0xff) != 0xff)
+ disabled_rb_mask = tmp;
+
+ rendering_pipe_num = 1 << tiling_pipe_num;
+ req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
+ BUG_ON(rendering_pipe_num < req_rb_num);
+
+ pipe_rb_ratio = rendering_pipe_num / req_rb_num;
+ pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
+
+ if (rdev->family <= CHIP_RV740) {
+ /* r6xx/r7xx */
+ rb_num_width = 2;
+ } else {
+ /* eg+ */
+ rb_num_width = 4;
+ }
+
+ for (i = 0; i < max_rb_num; i++) {
+ if (!(mask & disabled_rb_mask)) {
+ for (j = 0; j < pipe_rb_ratio; j++) {
+ data <<= rb_num_width;
+ data |= max_rb_num - i - 1;
+ }
+ if (pipe_rb_remain) {
+ data <<= rb_num_width;
+ data |= max_rb_num - i - 1;
+ pipe_rb_remain--;
+ }
+ }
+ mask >>= 1;
+ }
+
+ return data;
+}
+
+int r600_count_pipe_bits(uint32_t val)
+{
+ return hweight32(val);
+}
+
+static void r600_gpu_init(struct radeon_device *rdev)
+{
+ u32 tiling_config;
+ u32 ramcfg;
+ u32 cc_rb_backend_disable;
+ u32 cc_gc_shader_pipe_config;
+ u32 tmp;
+ int i, j;
+ u32 sq_config;
+ u32 sq_gpr_resource_mgmt_1 = 0;
+ u32 sq_gpr_resource_mgmt_2 = 0;
+ u32 sq_thread_resource_mgmt = 0;
+ u32 sq_stack_resource_mgmt_1 = 0;
+ u32 sq_stack_resource_mgmt_2 = 0;
+ u32 disabled_rb_mask;
+
+ rdev->config.r600.tiling_group_size = 256;
+ switch (rdev->family) {
+ case CHIP_R600:
+ rdev->config.r600.max_pipes = 4;
+ rdev->config.r600.max_tile_pipes = 8;
+ rdev->config.r600.max_simds = 4;
+ rdev->config.r600.max_backends = 4;
+ rdev->config.r600.max_gprs = 256;
+ rdev->config.r600.max_threads = 192;
+ rdev->config.r600.max_stack_entries = 256;
+ rdev->config.r600.max_hw_contexts = 8;
+ rdev->config.r600.max_gs_threads = 16;
+ rdev->config.r600.sx_max_export_size = 128;
+ rdev->config.r600.sx_max_export_pos_size = 16;
+ rdev->config.r600.sx_max_export_smx_size = 128;
+ rdev->config.r600.sq_num_cf_insts = 2;
+ break;
+ case CHIP_RV630:
+ case CHIP_RV635:
+ rdev->config.r600.max_pipes = 2;
+ rdev->config.r600.max_tile_pipes = 2;
+ rdev->config.r600.max_simds = 3;
+ rdev->config.r600.max_backends = 1;
+ rdev->config.r600.max_gprs = 128;
+ rdev->config.r600.max_threads = 192;
+ rdev->config.r600.max_stack_entries = 128;
+ rdev->config.r600.max_hw_contexts = 8;
+ rdev->config.r600.max_gs_threads = 4;
+ rdev->config.r600.sx_max_export_size = 128;
+ rdev->config.r600.sx_max_export_pos_size = 16;
+ rdev->config.r600.sx_max_export_smx_size = 128;
+ rdev->config.r600.sq_num_cf_insts = 2;
+ break;
+ case CHIP_RV610:
+ case CHIP_RV620:
+ case CHIP_RS780:
+ case CHIP_RS880:
+ rdev->config.r600.max_pipes = 1;
+ rdev->config.r600.max_tile_pipes = 1;
+ rdev->config.r600.max_simds = 2;
+ rdev->config.r600.max_backends = 1;
+ rdev->config.r600.max_gprs = 128;
+ rdev->config.r600.max_threads = 192;
+ rdev->config.r600.max_stack_entries = 128;
+ rdev->config.r600.max_hw_contexts = 4;
+ rdev->config.r600.max_gs_threads = 4;
+ rdev->config.r600.sx_max_export_size = 128;
+ rdev->config.r600.sx_max_export_pos_size = 16;
+ rdev->config.r600.sx_max_export_smx_size = 128;
+ rdev->config.r600.sq_num_cf_insts = 1;
+ break;
+ case CHIP_RV670:
+ rdev->config.r600.max_pipes = 4;
+ rdev->config.r600.max_tile_pipes = 4;
+ rdev->config.r600.max_simds = 4;
+ rdev->config.r600.max_backends = 4;
+ rdev->config.r600.max_gprs = 192;
+ rdev->config.r600.max_threads = 192;
+ rdev->config.r600.max_stack_entries = 256;
+ rdev->config.r600.max_hw_contexts = 8;
+ rdev->config.r600.max_gs_threads = 16;
+ rdev->config.r600.sx_max_export_size = 128;
+ rdev->config.r600.sx_max_export_pos_size = 16;
+ rdev->config.r600.sx_max_export_smx_size = 128;
+ rdev->config.r600.sq_num_cf_insts = 2;
+ break;
+ default:
+ break;
+ }
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+
+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+ /* Setup tiling */
+ tiling_config = 0;
+ ramcfg = RREG32(RAMCFG);
+ switch (rdev->config.r600.max_tile_pipes) {
+ case 1:
+ tiling_config |= PIPE_TILING(0);
+ break;
+ case 2:
+ tiling_config |= PIPE_TILING(1);
+ break;
+ case 4:
+ tiling_config |= PIPE_TILING(2);
+ break;
+ case 8:
+ tiling_config |= PIPE_TILING(3);
+ break;
+ default:
+ break;
+ }
+ rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
+ rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
+ tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
+ tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+
+ tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+ if (tmp > 3) {
+ tiling_config |= ROW_TILING(3);
+ tiling_config |= SAMPLE_SPLIT(3);
+ } else {
+ tiling_config |= ROW_TILING(tmp);
+ tiling_config |= SAMPLE_SPLIT(tmp);
+ }
+ tiling_config |= BANK_SWAPS(1);
+
+ cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ tmp = R6XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
+ if (tmp < rdev->config.r600.max_backends) {
+ rdev->config.r600.max_backends = tmp;
+ }
+
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
+ tmp = R6XX_MAX_PIPES -
+ r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
+ if (tmp < rdev->config.r600.max_pipes) {
+ rdev->config.r600.max_pipes = tmp;
+ }
+ tmp = R6XX_MAX_SIMDS -
+ r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
+ if (tmp < rdev->config.r600.max_simds) {
+ rdev->config.r600.max_simds = tmp;
+ }
+
+ disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
+ tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+ tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
+ R6XX_MAX_BACKENDS, disabled_rb_mask);
+ tiling_config |= tmp << 16;
+ rdev->config.r600.backend_map = tmp;
+
+ rdev->config.r600.tile_config = tiling_config;
+ WREG32(GB_TILING_CONFIG, tiling_config);
+ WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
+ WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
+ WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
+
+ tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
+ WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
+ WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
+
+ /* Setup some CP states */
+ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
+ WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
+
+ WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
+ SYNC_WALKER | SYNC_ALIGNER));
+ /* Setup various GPU states */
+ if (rdev->family == CHIP_RV670)
+ WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
+
+ tmp = RREG32(SX_DEBUG_1);
+ tmp |= SMX_EVENT_RELEASE;
+ if ((rdev->family > CHIP_R600))
+ tmp |= ENABLE_NEW_SMX_ADDRESS;
+ WREG32(SX_DEBUG_1, tmp);
+
+ if (((rdev->family) == CHIP_R600) ||
+ ((rdev->family) == CHIP_RV630) ||
+ ((rdev->family) == CHIP_RV610) ||
+ ((rdev->family) == CHIP_RV620) ||
+ ((rdev->family) == CHIP_RS780) ||
+ ((rdev->family) == CHIP_RS880)) {
+ WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
+ } else {
+ WREG32(DB_DEBUG, 0);
+ }
+ WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
+ DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
+
+ WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
+ WREG32(VGT_NUM_INSTANCES, 0);
+
+ WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
+
+ tmp = RREG32(SQ_MS_FIFO_SIZES);
+ if (((rdev->family) == CHIP_RV610) ||
+ ((rdev->family) == CHIP_RV620) ||
+ ((rdev->family) == CHIP_RS780) ||
+ ((rdev->family) == CHIP_RS880)) {
+ tmp = (CACHE_FIFO_SIZE(0xa) |
+ FETCH_FIFO_HIWATER(0xa) |
+ DONE_FIFO_HIWATER(0xe0) |
+ ALU_UPDATE_FIFO_HIWATER(0x8));
+ } else if (((rdev->family) == CHIP_R600) ||
+ ((rdev->family) == CHIP_RV630)) {
+ tmp &= ~DONE_FIFO_HIWATER(0xff);
+ tmp |= DONE_FIFO_HIWATER(0x4);
+ }
+ WREG32(SQ_MS_FIFO_SIZES, tmp);
+
+ /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
+ * should be adjusted as needed by the 2D/3D drivers. This just sets default values
+ */
+ sq_config = RREG32(SQ_CONFIG);
+ sq_config &= ~(PS_PRIO(3) |
+ VS_PRIO(3) |
+ GS_PRIO(3) |
+ ES_PRIO(3));
+ sq_config |= (DX9_CONSTS |
+ VC_ENABLE |
+ PS_PRIO(0) |
+ VS_PRIO(1) |
+ GS_PRIO(2) |
+ ES_PRIO(3));
+
+ if ((rdev->family) == CHIP_R600) {
+ sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
+ NUM_VS_GPRS(124) |
+ NUM_CLAUSE_TEMP_GPRS(4));
+ sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
+ NUM_ES_GPRS(0));
+ sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
+ NUM_VS_THREADS(48) |
+ NUM_GS_THREADS(4) |
+ NUM_ES_THREADS(4));
+ sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
+ NUM_VS_STACK_ENTRIES(128));
+ sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
+ NUM_ES_STACK_ENTRIES(0));
+ } else if (((rdev->family) == CHIP_RV610) ||
+ ((rdev->family) == CHIP_RV620) ||
+ ((rdev->family) == CHIP_RS780) ||
+ ((rdev->family) == CHIP_RS880)) {
+ /* no vertex cache */
+ sq_config &= ~VC_ENABLE;
+
+ sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
+ NUM_VS_GPRS(44) |
+ NUM_CLAUSE_TEMP_GPRS(2));
+ sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
+ NUM_ES_GPRS(17));
+ sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
+ NUM_VS_THREADS(78) |
+ NUM_GS_THREADS(4) |
+ NUM_ES_THREADS(31));
+ sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
+ NUM_VS_STACK_ENTRIES(40));
+ sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
+ NUM_ES_STACK_ENTRIES(16));
+ } else if (((rdev->family) == CHIP_RV630) ||
+ ((rdev->family) == CHIP_RV635)) {
+ sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
+ NUM_VS_GPRS(44) |
+ NUM_CLAUSE_TEMP_GPRS(2));
+ sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
+ NUM_ES_GPRS(18));
+ sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
+ NUM_VS_THREADS(78) |
+ NUM_GS_THREADS(4) |
+ NUM_ES_THREADS(31));
+ sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
+ NUM_VS_STACK_ENTRIES(40));
+ sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
+ NUM_ES_STACK_ENTRIES(16));
+ } else if ((rdev->family) == CHIP_RV670) {
+ sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
+ NUM_VS_GPRS(44) |
+ NUM_CLAUSE_TEMP_GPRS(2));
+ sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
+ NUM_ES_GPRS(17));
+ sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
+ NUM_VS_THREADS(78) |
+ NUM_GS_THREADS(4) |
+ NUM_ES_THREADS(31));
+ sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
+ NUM_VS_STACK_ENTRIES(64));
+ sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
+ NUM_ES_STACK_ENTRIES(64));
+ }
+
+ WREG32(SQ_CONFIG, sq_config);
+ WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
+ WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
+ WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+ WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+ WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+
+ if (((rdev->family) == CHIP_RV610) ||
+ ((rdev->family) == CHIP_RV620) ||
+ ((rdev->family) == CHIP_RS780) ||
+ ((rdev->family) == CHIP_RS880)) {
+ WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
+ } else {
+ WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
+ }
+
+ /* More default values. 2D/3D driver should adjust as needed */
+ WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
+ S1_X(0x4) | S1_Y(0xc)));
+ WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
+ S1_X(0x2) | S1_Y(0x2) |
+ S2_X(0xa) | S2_Y(0x6) |
+ S3_X(0x6) | S3_Y(0xa)));
+ WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
+ S1_X(0x4) | S1_Y(0xc) |
+ S2_X(0x1) | S2_Y(0x6) |
+ S3_X(0xa) | S3_Y(0xe)));
+ WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
+ S5_X(0x0) | S5_Y(0x0) |
+ S6_X(0xb) | S6_Y(0x4) |
+ S7_X(0x7) | S7_Y(0x8)));
+
+ WREG32(VGT_STRMOUT_EN, 0);
+ tmp = rdev->config.r600.max_pipes * 16;
+ switch (rdev->family) {
+ case CHIP_RV610:
+ case CHIP_RV620:
+ case CHIP_RS780:
+ case CHIP_RS880:
+ tmp += 32;
+ break;
+ case CHIP_RV670:
+ tmp += 128;
+ break;
+ default:
+ break;
+ }
+ if (tmp > 256) {
+ tmp = 256;
+ }
+ WREG32(VGT_ES_PER_GS, 128);
+ WREG32(VGT_GS_PER_ES, tmp);
+ WREG32(VGT_GS_PER_VS, 2);
+ WREG32(VGT_GS_VERTEX_REUSE, 16);
+
+ /* more default values. 2D/3D driver should adjust as needed */
+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+ WREG32(VGT_STRMOUT_EN, 0);
+ WREG32(SX_MISC, 0);
+ WREG32(PA_SC_MODE_CNTL, 0);
+ WREG32(PA_SC_AA_CONFIG, 0);
+ WREG32(PA_SC_LINE_STIPPLE, 0);
+ WREG32(SPI_INPUT_Z, 0);
+ WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
+ WREG32(CB_COLOR7_FRAG, 0);
+
+ /* Clear render buffer base addresses */
+ WREG32(CB_COLOR0_BASE, 0);
+ WREG32(CB_COLOR1_BASE, 0);
+ WREG32(CB_COLOR2_BASE, 0);
+ WREG32(CB_COLOR3_BASE, 0);
+ WREG32(CB_COLOR4_BASE, 0);
+ WREG32(CB_COLOR5_BASE, 0);
+ WREG32(CB_COLOR6_BASE, 0);
+ WREG32(CB_COLOR7_BASE, 0);
+ WREG32(CB_COLOR7_FRAG, 0);
+
+ switch (rdev->family) {
+ case CHIP_RV610:
+ case CHIP_RV620:
+ case CHIP_RS780:
+ case CHIP_RS880:
+ tmp = TC_L2_SIZE(8);
+ break;
+ case CHIP_RV630:
+ case CHIP_RV635:
+ tmp = TC_L2_SIZE(4);
+ break;
+ case CHIP_R600:
+ tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
+ break;
+ default:
+ tmp = TC_L2_SIZE(0);
+ break;
+ }
+ WREG32(TC_CNTL, tmp);
+
+ tmp = RREG32(HDP_HOST_PATH_CNTL);
+ WREG32(HDP_HOST_PATH_CNTL, tmp);
+
+ tmp = RREG32(ARB_POP);
+ tmp |= ENABLE_TC128;
+ WREG32(ARB_POP, tmp);
+
+ WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
+ WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
+ NUM_CLIP_SEQ(3)));
+ WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
+ WREG32(VC_ENHANCE, 0);
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
+{
+ u32 r;
+
+ WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+ (void)RREG32(PCIE_PORT_INDEX);
+ r = RREG32(PCIE_PORT_DATA);
+ return r;
+}
+
+void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+ WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+ (void)RREG32(PCIE_PORT_INDEX);
+ WREG32(PCIE_PORT_DATA, (v));
+ (void)RREG32(PCIE_PORT_DATA);
+}
+
+/*
+ * CP & Ring
+ */
+void r600_cp_stop(struct radeon_device *rdev)
+{
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+ WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+}
+
+int r600_init_microcode(struct radeon_device *rdev)
+{
+ struct platform_device *pdev;
+ const char *chip_name;
+ const char *rlc_chip_name;
+ size_t pfp_req_size, me_req_size, rlc_req_size;
+ char fw_name[30];
+ int err;
+
+ DRM_DEBUG("\n");
+
+ pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
+ err = IS_ERR(pdev);
+ if (err) {
+ printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
+ return -EINVAL;
+ }
+
+ switch (rdev->family) {
+ case CHIP_R600:
+ chip_name = "R600";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV610:
+ chip_name = "RV610";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV630:
+ chip_name = "RV630";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV620:
+ chip_name = "RV620";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV635:
+ chip_name = "RV635";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV670:
+ chip_name = "RV670";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RS780:
+ case CHIP_RS880:
+ chip_name = "RS780";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV770:
+ chip_name = "RV770";
+ rlc_chip_name = "R700";
+ break;
+ case CHIP_RV730:
+ case CHIP_RV740:
+ chip_name = "RV730";
+ rlc_chip_name = "R700";
+ break;
+ case CHIP_RV710:
+ chip_name = "RV710";
+ rlc_chip_name = "R700";
+ break;
+ case CHIP_CEDAR:
+ chip_name = "CEDAR";
+ rlc_chip_name = "CEDAR";
+ break;
+ case CHIP_REDWOOD:
+ chip_name = "REDWOOD";
+ rlc_chip_name = "REDWOOD";
+ break;
+ case CHIP_JUNIPER:
+ chip_name = "JUNIPER";
+ rlc_chip_name = "JUNIPER";
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ chip_name = "CYPRESS";
+ rlc_chip_name = "CYPRESS";
+ break;
+ case CHIP_PALM:
+ chip_name = "PALM";
+ rlc_chip_name = "SUMO";
+ break;
+ case CHIP_SUMO:
+ chip_name = "SUMO";
+ rlc_chip_name = "SUMO";
+ break;
+ case CHIP_SUMO2:
+ chip_name = "SUMO2";
+ rlc_chip_name = "SUMO";
+ break;
+ default: BUG();
+ }
+
+ if (rdev->family >= CHIP_CEDAR) {
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ } else if (rdev->family >= CHIP_RV770) {
+ pfp_req_size = R700_PFP_UCODE_SIZE * 4;
+ me_req_size = R700_PM4_UCODE_SIZE * 4;
+ rlc_req_size = R700_RLC_UCODE_SIZE * 4;
+ } else {
+ pfp_req_size = PFP_UCODE_SIZE * 4;
+ me_req_size = PM4_UCODE_SIZE * 12;
+ rlc_req_size = RLC_UCODE_SIZE * 4;
+ }
+
+ DRM_INFO("Loading %s Microcode\n", chip_name);
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->pfp_fw->size != pfp_req_size) {
+ printk(KERN_ERR
+ "r600_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->pfp_fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->me_fw->size != me_req_size) {
+ printk(KERN_ERR
+ "r600_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->me_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
+out:
+ platform_device_unregister(pdev);
+
+ if (err) {
+ if (err != -EINVAL)
+ printk(KERN_ERR
+ "r600_cp: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->pfp_fw);
+ rdev->pfp_fw = NULL;
+ release_firmware(rdev->me_fw);
+ rdev->me_fw = NULL;
+ release_firmware(rdev->rlc_fw);
+ rdev->rlc_fw = NULL;
+ }
+ return err;
+}
+
+static int r600_cp_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ int i;
+
+ if (!rdev->me_fw || !rdev->pfp_fw)
+ return -EINVAL;
+
+ r600_cp_stop(rdev);
+
+ WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+ BUF_SWAP_32BIT |
+#endif
+ RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+
+ /* Reset cp */
+ WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
+ RREG32(GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(GRBM_SOFT_RESET, 0);
+
+ WREG32(CP_ME_RAM_WADDR, 0);
+
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
+ WREG32(CP_ME_RAM_DATA,
+ be32_to_cpup(fw_data++));
+
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA,
+ be32_to_cpup(fw_data++));
+
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(CP_ME_RAM_RADDR, 0);
+ return 0;
+}
+
+int r600_cp_start(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ int r;
+ uint32_t cp_me;
+
+ r = radeon_ring_lock(rdev, ring, 7);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(ring, 0x1);
+ if (rdev->family >= CHIP_RV770) {
+ radeon_ring_write(ring, 0x0);
+ radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
+ } else {
+ radeon_ring_write(ring, 0x3);
+ radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
+ }
+ radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_unlock_commit(rdev, ring);
+
+ cp_me = 0xff;
+ WREG32(R_0086D8_CP_ME_CNTL, cp_me);
+ return 0;
+}
+
+int r600_cp_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ u32 tmp;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset cp */
+ WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
+ RREG32(GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(GRBM_SOFT_RESET, 0);
+
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(ring->ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB_CNTL, tmp);
+ WREG32(CP_SEM_WAIT_TIMER, 0x0);
+
+ /* Set the write pointer delay */
+ WREG32(CP_RB_WPTR_DELAY, 0);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(CP_RB_RPTR_WR, 0);
+ ring->wptr = 0;
+ WREG32(CP_RB_WPTR, ring->wptr);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(CP_RB_RPTR_ADDR,
+ ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
+ WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+ WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+ if (rdev->wb.enabled)
+ WREG32(SCRATCH_UMSK, 0xff);
+ else {
+ tmp |= RB_NO_UPDATE;
+ WREG32(SCRATCH_UMSK, 0);
+ }
+
+ mdelay(1);
+ WREG32(CP_RB_CNTL, tmp);
+
+ WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
+ WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+
+ ring->rptr = RREG32(CP_RB_RPTR);
+
+ r600_cp_start(rdev);
+ ring->ready = true;
+ r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+ return 0;
+}
+
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
+{
+ u32 rb_bufsz;
+ int r;
+
+ /* Align ring size */
+ rb_bufsz = drm_order(ring_size / 8);
+ ring_size = (1 << (rb_bufsz + 1)) * 4;
+ ring->ring_size = ring_size;
+ ring->align_mask = 16 - 1;
+
+ if (radeon_ring_supports_scratch_reg(rdev, ring)) {
+ r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
+ if (r) {
+ DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
+ ring->rptr_save_reg = 0;
+ }
+ }
+}
+
+void r600_cp_fini(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ r600_cp_stop(rdev);
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
+}
+
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ */
+/**
+ * r600_dma_stop - stop the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine (r6xx-evergreen).
+ */
+void r600_dma_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl = RREG32(DMA_RB_CNTL);
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+}
+
+/**
+ * r600_dma_resume - setup and start the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ u32 rb_cntl, dma_cntl, ib_cntl;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset dma */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+ else
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = drm_order(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR, 0);
+ WREG32(DMA_RB_WPTR, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI,
+ upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO,
+ ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ ib_cntl = DMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL, ib_cntl);
+
+ dma_cntl = RREG32(DMA_CNTL);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL, dma_cntl);
+
+ if (rdev->family >= CHIP_RV770)
+ WREG32(DMA_MODE, 1);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR, ring->wptr << 2);
+
+ ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
+
+ WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * r600_dma_fini - tear down the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine and free the ring (r6xx-evergreen).
+ */
+void r600_dma_fini(struct radeon_device *rdev)
+{
+ r600_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+}
+
+/*
+ * UVD
+ */
+int r600_uvd_rbc_start(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ uint64_t rptr_addr;
+ uint32_t rb_bufsz, tmp;
+ int r;
+
+ rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET;
+
+ if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) {
+ DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n");
+ return -EINVAL;
+ }
+
+ /* force RBC into idle state */
+ WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+ /* Set the write pointer delay */
+ WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
+
+ /* set the wb address */
+ WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2);
+
+ /* programm the 4GB memory segment for rptr and ring buffer */
+ WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) |
+ (0x7 << 16) | (0x1 << 31));
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(UVD_RBC_RB_RPTR, 0x0);
+
+ ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
+ WREG32(UVD_RBC_RB_WPTR, ring->wptr);
+
+ /* set the ring address */
+ WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
+
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(ring->ring_size);
+ rb_bufsz = (0x1 << 8) | rb_bufsz;
+ WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
+
+ ring->ready = true;
+ r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ r = radeon_ring_lock(rdev, ring, 10);
+ if (r) {
+ DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
+ return r;
+ }
+
+ tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ /* Clear timeout status bits */
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
+ radeon_ring_write(ring, 0x8);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
+ radeon_ring_write(ring, 3);
+
+ radeon_ring_unlock_commit(rdev, ring);
+
+ return 0;
+}
+
+void r600_uvd_stop(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+
+ /* force RBC into idle state */
+ WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+ mdelay(1);
+
+ /* put VCPU into reset */
+ WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+ mdelay(5);
+
+ /* disable VCPU clock */
+ WREG32(UVD_VCPU_CNTL, 0x0);
+
+ /* Unstall UMC and register bus */
+ WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+
+ ring->ready = false;
+}
+
+int r600_uvd_init(struct radeon_device *rdev)
+{
+ int i, j, r;
+ /* disable byte swapping */
+ u32 lmi_swap_cntl = 0;
+ u32 mp_swap_cntl = 0;
+
+ /* raise clocks while booting up the VCPU */
+ radeon_set_uvd_clocks(rdev, 53300, 40000);
+
+ /* disable clock gating */
+ WREG32(UVD_CGC_GATE, 0);
+
+ /* disable interupt */
+ WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+ mdelay(1);
+
+ /* put LMI, VCPU, RBC etc... into reset */
+ WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
+ LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
+ CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
+ mdelay(5);
+
+ /* take UVD block out of reset */
+ WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
+ mdelay(5);
+
+ /* initialize UVD memory controller */
+ WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
+ (1 << 21) | (1 << 9) | (1 << 20));
+
+#ifdef __BIG_ENDIAN
+ /* swap (8 in 32) RB and IB */
+ lmi_swap_cntl = 0xa;
+ mp_swap_cntl = 0;
+#endif
+ WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
+ WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
+
+ WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
+ WREG32(UVD_MPC_SET_MUXA1, 0x0);
+ WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
+ WREG32(UVD_MPC_SET_MUXB1, 0x0);
+ WREG32(UVD_MPC_SET_ALU, 0);
+ WREG32(UVD_MPC_SET_MUX, 0x88);
+
+ /* take all subblocks out of reset, except VCPU */
+ WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+ mdelay(5);
+
+ /* enable VCPU clock */
+ WREG32(UVD_VCPU_CNTL, 1 << 9);
+
+ /* enable UMC */
+ WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+
+ /* boot up the VCPU */
+ WREG32(UVD_SOFT_RESET, 0);
+ mdelay(10);
+
+ WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+
+ for (i = 0; i < 10; ++i) {
+ uint32_t status;
+ for (j = 0; j < 100; ++j) {
+ status = RREG32(UVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ }
+ r = 0;
+ if (status & 2)
+ break;
+
+ DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
+ WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
+ mdelay(10);
+ WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
+ mdelay(10);
+ r = -1;
+ }
+
+ if (r) {
+ DRM_ERROR("UVD not responding, giving up!!!\n");
+ radeon_set_uvd_clocks(rdev, 0, 0);
+ return r;
+ }
+
+ /* enable interupt */
+ WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
+
+ r = r600_uvd_rbc_start(rdev);
+ if (!r)
+ DRM_INFO("UVD initialized successfully.\n");
+
+ /* lower clocks again */
+ radeon_set_uvd_clocks(rdev, 0, 0);
+
+ return r;
+}
+
+/*
+ * GPU scratch registers helpers function.
+ */
+void r600_scratch_init(struct radeon_device *rdev)
+{
+ int i;
+
+ rdev->scratch.num_reg = 7;
+ rdev->scratch.reg_base = SCRATCH_REG0;
+ for (i = 0; i < rdev->scratch.num_reg; i++) {
+ rdev->scratch.free[i] = true;
+ rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
+ }
+}
+
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ uint32_t scratch;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ r = radeon_scratch_get(rdev, &scratch);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+ r = radeon_ring_lock(rdev, ring, 3);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
+ radeon_scratch_free(rdev, scratch);
+ return r;
+ }
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+ ring->idx, scratch, tmp);
+ r = -EINVAL;
+ }
+ radeon_scratch_free(rdev, scratch);
+ return r;
+}
+
+/**
+ * r600_dma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (r6xx-SI).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_ring_test(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ring_lock(rdev, ring, 4);
+ if (r) {
+ DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
+int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
+ r = radeon_ring_lock(rdev, ring, 3);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
+ ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(UVD_CONTEXT_ID);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
+/*
+ * CP fences/semaphores
+ */
+
+void r600_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+
+ if (rdev->wb.use_event) {
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ /* flush read cache over gart */
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
+ PACKET3_VC_ACTION_ENA |
+ PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+ /* EVENT_WRITE_EOP - flush caches, send int */
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, 0);
+ } else {
+ /* flush read cache over gart */
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
+ PACKET3_VC_ACTION_ENA |
+ PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+ radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
+ /* wait for 3D idle clean */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
+ /* Emit fence sequence & fire IRQ */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+ radeon_ring_write(ring, fence->seq);
+ /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
+ radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
+ radeon_ring_write(ring, RB_INT_STAT);
+ }
+}
+
+void r600_uvd_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+ radeon_ring_write(ring, 2);
+ return;
+}
+
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+ unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
+
+ if (rdev->family < CHIP_CAYMAN)
+ sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
+}
+
+/*
+ * DMA fences/semaphores
+ */
+
+/**
+ * r600_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (r6xx-r7xx).
+ */
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, lower_32_bits(fence->seq));
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+}
+
+/**
+ * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (r6xx-SI).
+ */
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ u64 addr = semaphore->gpu_addr;
+ u32 s = emit_wait ? 0 : 1;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+}
+
+void r600_uvd_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+ radeon_ring_write(ring, emit_wait ? 1 : 0);
+}
+
+int r600_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ struct radeon_sa_bo *vb = NULL;
+ int r;
+
+ r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
+ if (r) {
+ return r;
+ }
+ r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
+ r600_blit_done_copy(rdev, fence, vb, sem);
+ return 0;
+}
+
+/**
+ * r600_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r6xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int r600_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
+ r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFE)
+ cur_size_in_dw = 0xFFFE;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
+ (upper_32_bits(src_offset) & 0xff)));
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
+int r600_set_surface_reg(struct radeon_device *rdev, int reg,
+ uint32_t tiling_flags, uint32_t pitch,
+ uint32_t offset, uint32_t obj_size)
+{
+ /* FIXME: implement */
+ return 0;
+}
+
+void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
+{
+ /* FIXME: implement */
+}
+
+static int r600_startup(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ int r;
+
+ /* enable pcie gen2 link */
+ r600_pcie_gen2_enable(rdev);
+
+ r600_mc_program(rdev);
+
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
+ if (rdev->flags & RADEON_IS_AGP) {
+ r600_agp_enable(rdev);
+ } else {
+ r = r600_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ }
+ r600_gpu_init(rdev);
+ r = r600_blit_init(rdev);
+ if (r) {
+ r600_blit_fini(rdev);
+ rdev->asic->copy.copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
+ /* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
+ return r;
+ }
+ r600_irq_set(rdev);
+
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (r)
+ return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
+ r = r600_cp_load_microcode(rdev);
+ if (r)
+ return r;
+ r = r600_cp_resume(rdev);
+ if (r)
+ return r;
+
+ r = r600_dma_resume(rdev);
+ if (r)
+ return r;
+
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ return r;
+ }
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: audio init failed\n");
+ return r;
+ }
+
+ return 0;
+}
+
+void r600_vga_set_state(struct radeon_device *rdev, bool state)
+{
+ uint32_t temp;
+
+ temp = RREG32(CONFIG_CNTL);
+ if (state == false) {
+ temp &= ~(1<<0);
+ temp |= (1<<1);
+ } else {
+ temp &= ~(1<<1);
+ }
+ WREG32(CONFIG_CNTL, temp);
+}
+
+int r600_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
+ * posting will perform necessary task to bring back GPU into good
+ * shape.
+ */
+ /* post card */
+ atom_asic_init(rdev->mode_info.atom_context);
+
+ rdev->accel_working = true;
+ r = r600_startup(rdev);
+ if (r) {
+ DRM_ERROR("r600 startup failed on resume\n");
+ rdev->accel_working = false;
+ return r;
+ }
+
+ return r;
+}
+
+int r600_suspend(struct radeon_device *rdev)
+{
+ r600_audio_fini(rdev);
+ r600_cp_stop(rdev);
+ r600_dma_stop(rdev);
+ r600_irq_suspend(rdev);
+ radeon_wb_disable(rdev);
+ r600_pcie_gart_disable(rdev);
+
+ return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int r600_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (r600_debugfs_mc_info_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for mc !\n");
+ }
+ /* Read BIOS */
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ /* Must be an ATOMBIOS */
+ if (!rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
+ return -EINVAL;
+ }
+ r = radeon_atombios_init(rdev);
+ if (r)
+ return r;
+ /* Post card if necessary */
+ if (!radeon_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
+ DRM_INFO("GPU not posted. posting now...\n");
+ atom_asic_init(rdev->mode_info.atom_context);
+ }
+ /* Initialize scratch registers */
+ r600_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r)
+ radeon_agp_disable(rdev);
+ }
+ r = r600_mc_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
+
+ r = r600_pcie_gart_init(rdev);
+ if (r)
+ return r;
+
+ rdev->accel_working = true;
+ r = r600_startup(rdev);
+ if (r) {
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ r600_cp_fini(rdev);
+ r600_dma_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ r600_pcie_gart_fini(rdev);
+ rdev->accel_working = false;
+ }
+
+ return 0;
+}
+
+void r600_fini(struct radeon_device *rdev)
+{
+ r600_audio_fini(rdev);
+ r600_blit_fini(rdev);
+ r600_cp_fini(rdev);
+ r600_dma_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ r600_pcie_gart_fini(rdev);
+ r600_vram_scratch_fini(rdev);
+ radeon_agp_fini(rdev);
+ radeon_gem_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_bo_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
+
+/*
+ * CS stuff
+ */
+void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ u32 next_rptr;
+
+ if (ring->rptr_save_reg) {
+ next_rptr = ring->wptr + 3 + 4;
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((ring->rptr_save_reg -
+ PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+ radeon_ring_write(ring, next_rptr);
+ } else if (rdev->wb.enabled) {
+ next_rptr = ring->wptr + 5 + 4;
+ radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
+ radeon_ring_write(ring, next_rptr);
+ radeon_ring_write(ring, 0);
+ }
+
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+ radeon_ring_write(ring, ib->length_dw);
+}
+
+void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
+ radeon_ring_write(ring, ib->gpu_addr);
+ radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
+ radeon_ring_write(ring, ib->length_dw);
+}
+
+int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_ib ib;
+ uint32_t scratch;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ r = radeon_scratch_get(rdev, &scratch);
+ if (r) {
+ DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+ r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+ if (r) {
+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ goto free_scratch;
+ }
+ ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
+ ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ ib.ptr[2] = 0xDEADBEEF;
+ ib.length_dw = 3;
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ goto free_ib;
+ }
+ r = radeon_fence_wait(ib.fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ goto free_ib;
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+ } else {
+ DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
+ scratch, tmp);
+ r = -EINVAL;
+ }
+free_ib:
+ radeon_ib_free(rdev, &ib);
+free_scratch:
+ radeon_scratch_free(rdev, scratch);
+ return r;
+}
+
+/**
+ * r600_dma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (r6xx-SI).
+ * Returns 0 on success, error on failure.
+ */
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_ib ib;
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp = 0;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+ if (r) {
+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ return r;
+ }
+
+ ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
+ ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
+ ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+ ib.ptr[3] = 0xDEADBEEF;
+ ib.length_dw = 4;
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ return r;
+ }
+ r = radeon_fence_wait(ib.fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ return r;
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+ } else {
+ DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+ radeon_ib_free(rdev, &ib);
+ return r;
+}
+
+int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_fence *fence = NULL;
+ int r;
+
+ r = radeon_set_uvd_clocks(rdev, 53300, 40000);
+ if (r) {
+ DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
+ if (r) {
+ DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
+ goto error;
+ }
+
+ r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
+ if (r) {
+ DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
+ goto error;
+ }
+
+ r = radeon_fence_wait(fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ goto error;
+ }
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+error:
+ radeon_fence_unref(&fence);
+ radeon_set_uvd_clocks(rdev, 0, 0);
+ return r;
+}
+
+/**
+ * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (r6xx-r7xx).
+ */
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/*
+ * Interrupts
+ *
+ * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
+ * the same as the CP ring buffer, but in reverse. Rather than the CPU
+ * writing to the ring and the GPU consuming, the GPU writes to the ring
+ * and host consumes. As the host irq handler processes interrupts, it
+ * increments the rptr. When the rptr catches up with the wptr, all the
+ * current interrupts have been processed.
+ */
+
+void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
+{
+ u32 rb_bufsz;
+
+ /* Align ring size */
+ rb_bufsz = drm_order(ring_size / 4);
+ ring_size = (1 << rb_bufsz) * 4;
+ rdev->ih.ring_size = ring_size;
+ rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
+ rdev->ih.rptr = 0;
+}
+
+int r600_ih_ring_alloc(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Allocate ring buffer */
+ if (rdev->ih.ring_obj == NULL) {
+ r = radeon_bo_create(rdev, rdev->ih.ring_size,
+ PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_GTT,
+ NULL, &rdev->ih.ring_obj);
+ if (r) {
+ DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
+ return r;
+ }
+ r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->ih.ring_obj,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->ih.gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->ih.ring_obj);
+ DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->ih.ring_obj,
+ (void **)&rdev->ih.ring);
+ radeon_bo_unreserve(rdev->ih.ring_obj);
+ if (r) {
+ DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
+ return r;
+ }
+ }
+ return 0;
+}
+
+void r600_ih_ring_fini(struct radeon_device *rdev)
+{
+ int r;
+ if (rdev->ih.ring_obj) {
+ r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->ih.ring_obj);
+ radeon_bo_unpin(rdev->ih.ring_obj);
+ radeon_bo_unreserve(rdev->ih.ring_obj);
+ }
+ radeon_bo_unref(&rdev->ih.ring_obj);
+ rdev->ih.ring = NULL;
+ rdev->ih.ring_obj = NULL;
+ }
+}
+
+void r600_rlc_stop(struct radeon_device *rdev)
+{
+
+ if ((rdev->family >= CHIP_RV770) &&
+ (rdev->family <= CHIP_RV740)) {
+ /* r7xx asics need to soft reset RLC before halting */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
+ RREG32(SRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(SRBM_SOFT_RESET, 0);
+ RREG32(SRBM_SOFT_RESET);
+ }
+
+ WREG32(RLC_CNTL, 0);
+}
+
+static void r600_rlc_start(struct radeon_device *rdev)
+{
+ WREG32(RLC_CNTL, RLC_ENABLE);
+}
+
+static int r600_rlc_init(struct radeon_device *rdev)
+{
+ u32 i;
+ const __be32 *fw_data;
+
+ if (!rdev->rlc_fw)
+ return -EINVAL;
+
+ r600_rlc_stop(rdev);
+
+ WREG32(RLC_HB_CNTL, 0);
+
+ if (rdev->family == CHIP_ARUBA) {
+ WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+ WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
+ }
+ if (rdev->family <= CHIP_CAYMAN) {
+ WREG32(RLC_HB_BASE, 0);
+ WREG32(RLC_HB_RPTR, 0);
+ WREG32(RLC_HB_WPTR, 0);
+ }
+ if (rdev->family <= CHIP_CAICOS) {
+ WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+ WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+ }
+ WREG32(RLC_MC_CNTL, 0);
+ WREG32(RLC_UCODE_CNTL, 0);
+
+ fw_data = (const __be32 *)rdev->rlc_fw->data;
+ if (rdev->family >= CHIP_ARUBA) {
+ for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else if (rdev->family >= CHIP_CAYMAN) {
+ for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else if (rdev->family >= CHIP_CEDAR) {
+ for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else if (rdev->family >= CHIP_RV770) {
+ for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else {
+ for (i = 0; i < RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ }
+ WREG32(RLC_UCODE_ADDR, 0);
+
+ r600_rlc_start(rdev);
+
+ return 0;
+}
+
+static void r600_enable_interrupts(struct radeon_device *rdev)
+{
+ u32 ih_cntl = RREG32(IH_CNTL);
+ u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+ ih_cntl |= ENABLE_INTR;
+ ih_rb_cntl |= IH_RB_ENABLE;
+ WREG32(IH_CNTL, ih_cntl);
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ rdev->ih.enabled = true;
+}
+
+void r600_disable_interrupts(struct radeon_device *rdev)
+{
+ u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+ u32 ih_cntl = RREG32(IH_CNTL);
+
+ ih_rb_cntl &= ~IH_RB_ENABLE;
+ ih_cntl &= ~ENABLE_INTR;
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ WREG32(IH_CNTL, ih_cntl);
+ /* set rptr, wptr to 0 */
+ WREG32(IH_RB_RPTR, 0);
+ WREG32(IH_RB_WPTR, 0);
+ rdev->ih.enabled = false;
+ rdev->ih.rptr = 0;
+}
+
+static void r600_disable_interrupt_state(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL, tmp);
+ WREG32(GRBM_INT_CNTL, 0);
+ WREG32(DxMODE_INT_MASK, 0);
+ WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
+ WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
+ if (ASIC_IS_DCE3(rdev)) {
+ WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
+ tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ if (ASIC_IS_DCE32(rdev)) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
+ } else {
+ tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+ tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
+ }
+ } else {
+ WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+ tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+ tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+ tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
+ }
+}
+
+int r600_irq_init(struct radeon_device *rdev)
+{
+ int ret = 0;
+ int rb_bufsz;
+ u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+
+ /* allocate ring */
+ ret = r600_ih_ring_alloc(rdev);
+ if (ret)
+ return ret;
+
+ /* disable irqs */
+ r600_disable_interrupts(rdev);
+
+ /* init rlc */
+ ret = r600_rlc_init(rdev);
+ if (ret) {
+ r600_ih_ring_fini(rdev);
+ return ret;
+ }
+
+ /* setup interrupt control */
+ /* set dummy read address to ring address */
+ WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ interrupt_cntl = RREG32(INTERRUPT_CNTL);
+ /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+ * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+ */
+ interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+ /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
+ interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+ WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+ WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
+ rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+
+ ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
+ IH_WPTR_OVERFLOW_CLEAR |
+ (rb_bufsz << 1));
+
+ if (rdev->wb.enabled)
+ ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
+
+ /* set the writeback address whether it's enabled or not */
+ WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
+
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+
+ /* set rptr, wptr to 0 */
+ WREG32(IH_RB_RPTR, 0);
+ WREG32(IH_RB_WPTR, 0);
+
+ /* Default settings for IH_CNTL (disabled at first) */
+ ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
+ /* RPTR_REARM only works if msi's are enabled */
+ if (rdev->msi_enabled)
+ ih_cntl |= RPTR_REARM;
+ WREG32(IH_CNTL, ih_cntl);
+
+ /* force the active interrupt state to all disabled */
+ if (rdev->family >= CHIP_CEDAR)
+ evergreen_disable_interrupt_state(rdev);
+ else
+ r600_disable_interrupt_state(rdev);
+
+ /* at this point everything should be setup correctly to enable master */
+ pci_set_master(rdev->pdev);
+
+ /* enable irqs */
+ r600_enable_interrupts(rdev);
+
+ return ret;
+}
+
+void r600_irq_suspend(struct radeon_device *rdev)
+{
+ r600_irq_disable(rdev);
+ r600_rlc_stop(rdev);
+}
+
+void r600_irq_fini(struct radeon_device *rdev)
+{
+ r600_irq_suspend(rdev);
+ r600_ih_ring_fini(rdev);
+}
+
+int r600_irq_set(struct radeon_device *rdev)
+{
+ u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ u32 mode_int = 0;
+ u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+ u32 grbm_int_cntl = 0;
+ u32 hdmi0, hdmi1;
+ u32 d1grph = 0, d2grph = 0;
+ u32 dma_cntl;
+
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+ return -EINVAL;
+ }
+ /* don't enable anything if the ih is disabled */
+ if (!rdev->ih.enabled) {
+ r600_disable_interrupts(rdev);
+ /* force the active interrupt state to all disabled */
+ r600_disable_interrupt_state(rdev);
+ return 0;
+ }
+
+ if (ASIC_IS_DCE3(rdev)) {
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ if (ASIC_IS_DCE32(rdev)) {
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ } else {
+ hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ }
+ } else {
+ hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ }
+ dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+ DRM_DEBUG("r600_irq_set: sw int\n");
+ cp_int_cntl |= RB_INT_ENABLE;
+ cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+ }
+
+ if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+ DRM_DEBUG("r600_irq_set: sw int dma\n");
+ dma_cntl |= TRAP_ENABLE;
+ }
+
+ if (rdev->irq.crtc_vblank_int[0] ||
+ atomic_read(&rdev->irq.pflip[0])) {
+ DRM_DEBUG("r600_irq_set: vblank 0\n");
+ mode_int |= D1MODE_VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[1] ||
+ atomic_read(&rdev->irq.pflip[1])) {
+ DRM_DEBUG("r600_irq_set: vblank 1\n");
+ mode_int |= D2MODE_VBLANK_INT_MASK;
+ }
+ if (rdev->irq.hpd[0]) {
+ DRM_DEBUG("r600_irq_set: hpd 1\n");
+ hpd1 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[1]) {
+ DRM_DEBUG("r600_irq_set: hpd 2\n");
+ hpd2 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[2]) {
+ DRM_DEBUG("r600_irq_set: hpd 3\n");
+ hpd3 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[3]) {
+ DRM_DEBUG("r600_irq_set: hpd 4\n");
+ hpd4 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[4]) {
+ DRM_DEBUG("r600_irq_set: hpd 5\n");
+ hpd5 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[5]) {
+ DRM_DEBUG("r600_irq_set: hpd 6\n");
+ hpd6 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.afmt[0]) {
+ DRM_DEBUG("r600_irq_set: hdmi 0\n");
+ hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[1]) {
+ DRM_DEBUG("r600_irq_set: hdmi 0\n");
+ hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
+ }
+
+ WREG32(CP_INT_CNTL, cp_int_cntl);
+ WREG32(DMA_CNTL, dma_cntl);
+ WREG32(DxMODE_INT_MASK, mode_int);
+ WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
+ WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
+ WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+ if (ASIC_IS_DCE3(rdev)) {
+ WREG32(DC_HPD1_INT_CONTROL, hpd1);
+ WREG32(DC_HPD2_INT_CONTROL, hpd2);
+ WREG32(DC_HPD3_INT_CONTROL, hpd3);
+ WREG32(DC_HPD4_INT_CONTROL, hpd4);
+ if (ASIC_IS_DCE32(rdev)) {
+ WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
+ } else {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+ WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
+ }
+ } else {
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+ WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
+ }
+
+ return 0;
+}
+
+static void r600_irq_ack(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (ASIC_IS_DCE3(rdev)) {
+ rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
+ rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
+ rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
+ if (ASIC_IS_DCE32(rdev)) {
+ rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
+ rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
+ } else {
+ rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
+ rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
+ }
+ } else {
+ rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+ rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
+ rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
+ rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
+ }
+ rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
+ rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
+
+ if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+ WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+ WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
+ WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
+ WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
+ WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
+ WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
+ if (ASIC_IS_DCE3(rdev)) {
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ } else {
+ tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+ }
+ }
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
+ if (ASIC_IS_DCE3(rdev)) {
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ } else {
+ tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+ }
+ }
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
+ if (ASIC_IS_DCE3(rdev)) {
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ } else {
+ tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+ }
+ }
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ }
+ if (ASIC_IS_DCE32(rdev)) {
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
+ }
+ if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
+ }
+ } else {
+ if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
+ tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
+ if (ASIC_IS_DCE3(rdev)) {
+ tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
+ tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+ WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
+ } else {
+ tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
+ tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+ WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
+ }
+ }
+ }
+}
+
+void r600_irq_disable(struct radeon_device *rdev)
+{
+ r600_disable_interrupts(rdev);
+ /* Wait and acknowledge irq */
+ mdelay(1);
+ r600_irq_ack(rdev);
+ r600_disable_interrupt_state(rdev);
+}
+
+static u32 r600_get_ih_wptr(struct radeon_device *rdev)
+{
+ u32 wptr, tmp;
+
+ if (rdev->wb.enabled)
+ wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
+ else
+ wptr = RREG32(IH_RB_WPTR);
+
+ if (wptr & RB_OVERFLOW) {
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+ tmp = RREG32(IH_RB_CNTL);
+ tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ WREG32(IH_RB_CNTL, tmp);
+ }
+ return (wptr & rdev->ih.ptr_mask);
+}
+
+/* r600 IV Ring
+ * Each IV ring entry is 128 bits:
+ * [7:0] - interrupt source id
+ * [31:8] - reserved
+ * [59:32] - interrupt source data
+ * [127:60] - reserved
+ *
+ * The basic interrupt vector entries
+ * are decoded as follows:
+ * src_id src_data description
+ * 1 0 D1 Vblank
+ * 1 1 D1 Vline
+ * 5 0 D2 Vblank
+ * 5 1 D2 Vline
+ * 19 0 FP Hot plug detection A
+ * 19 1 FP Hot plug detection B
+ * 19 2 DAC A auto-detection
+ * 19 3 DAC B auto-detection
+ * 21 4 HDMI block A
+ * 21 5 HDMI block B
+ * 176 - CP_INT RB
+ * 177 - CP_INT IB1
+ * 178 - CP_INT IB2
+ * 181 - EOP Interrupt
+ * 233 - GUI Idle
+ *
+ * Note, these are based on r600 and may need to be
+ * adjusted or added to on newer asics
+ */
+
+int r600_irq_process(struct radeon_device *rdev)
+{
+ u32 wptr;
+ u32 rptr;
+ u32 src_id, src_data;
+ u32 ring_index;
+ bool queue_hotplug = false;
+ bool queue_hdmi = false;
+
+ if (!rdev->ih.enabled || rdev->shutdown)
+ return IRQ_NONE;
+
+ /* No MSIs, need a dummy read to flush PCI DMAs */
+ if (!rdev->msi_enabled)
+ RREG32(IH_RB_WPTR);
+
+ wptr = r600_get_ih_wptr(rdev);
+
+restart_ih:
+ /* is somebody else already processing irqs? */
+ if (atomic_xchg(&rdev->ih.lock, 1))
+ return IRQ_NONE;
+
+ rptr = rdev->ih.rptr;
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+ /* Order reading of wptr vs. reading of IH ring data */
+ rmb();
+
+ /* display interrupts */
+ r600_irq_ack(rdev);
+
+ while (rptr != wptr) {
+ /* wptr/rptr are in bytes! */
+ ring_index = rptr / 4;
+ src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+ src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
+
+ switch (src_id) {
+ case 1: /* D1 vblank/vline */
+ switch (src_data) {
+ case 0: /* D1 vblank */
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_flip(rdev, 0);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+ }
+ break;
+ case 1: /* D1 vline */
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 5: /* D2 vblank/vline */
+ switch (src_data) {
+ case 0: /* D2 vblank */
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_flip(rdev, 1);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+ }
+ break;
+ case 1: /* D1 vline */
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 19: /* HPD/DAC hotplug */
+ switch (src_data) {
+ case 0:
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+ }
+ break;
+ case 1:
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+ }
+ break;
+ case 4:
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+ }
+ break;
+ case 5:
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+ }
+ break;
+ case 10:
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+ }
+ break;
+ case 12:
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 21: /* hdmi */
+ switch (src_data) {
+ case 4:
+ if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
+ }
+ break;
+ case 5:
+ if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 176: /* CP_INT in ring buffer */
+ case 177: /* CP_INT in IB1 */
+ case 178: /* CP_INT in IB2 */
+ DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ break;
+ case 181: /* CP EOP event */
+ DRM_DEBUG("IH: CP EOP\n");
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ break;
+ case 224: /* DMA trap event */
+ DRM_DEBUG("IH: DMA trap\n");
+ radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+ break;
+ case 233: /* GUI IDLE */
+ DRM_DEBUG("IH: GUI idle\n");
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+
+ /* wptr/rptr are in bytes! */
+ rptr += 16;
+ rptr &= rdev->ih.ptr_mask;
+ }
+ if (queue_hotplug)
+ schedule_work(&rdev->hotplug_work);
+ if (queue_hdmi)
+ schedule_work(&rdev->audio_work);
+ rdev->ih.rptr = rptr;
+ WREG32(IH_RB_RPTR, rdev->ih.rptr);
+ atomic_set(&rdev->ih.lock, 0);
+
+ /* make sure wptr hasn't changed while processing */
+ wptr = r600_get_ih_wptr(rdev);
+ if (wptr != rptr)
+ goto restart_ih;
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int r600_debugfs_mc_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
+ DREG32_SYS(m, rdev, VM_L2_STATUS);
+ return 0;
+}
+
+static struct drm_info_list r600_mc_info_list[] = {
+ {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
+};
+#endif
+
+int r600_debugfs_mc_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
+#else
+ return 0;
+#endif
+}
+
+/**
+ * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
+ * rdev: radeon device structure
+ * bo: buffer object struct which userspace is waiting for idle
+ *
+ * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
+ * through ring buffer, this leads to corruption in rendering, see
+ * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
+ * directly perform HDP flush by writing register through MMIO.
+ */
+void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
+{
+ /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
+ * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
+ * This seems to cause problems on some AGP cards. Just use the old
+ * method for them.
+ */
+ if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
+ rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp;
+
+ WREG32(HDP_DEBUG1, 0);
+ tmp = readl((void __iomem *)ptr);
+ } else
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+}
+
+void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
+{
+ u32 link_width_cntl, mask;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return;
+
+ radeon_gui_idle(rdev);
+
+ switch (lanes) {
+ case 0:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
+ break;
+ case 1:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
+ break;
+ case 2:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
+ break;
+ case 4:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
+ break;
+ case 8:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
+ break;
+ case 12:
+ /* not actually supported */
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
+ break;
+ case 16:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
+ break;
+ default:
+ DRM_ERROR("invalid pcie lane request: %d\n", lanes);
+ return;
+ }
+
+ link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
+ link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
+ link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
+ R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
+
+ WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+}
+
+int r600_get_pcie_lanes(struct radeon_device *rdev)
+{
+ u32 link_width_cntl;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return 0;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return 0;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return 0;
+
+ radeon_gui_idle(rdev);
+
+ link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+ switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
+ case RADEON_PCIE_LC_LINK_WIDTH_X1:
+ return 1;
+ case RADEON_PCIE_LC_LINK_WIDTH_X2:
+ return 2;
+ case RADEON_PCIE_LC_LINK_WIDTH_X4:
+ return 4;
+ case RADEON_PCIE_LC_LINK_WIDTH_X8:
+ return 8;
+ case RADEON_PCIE_LC_LINK_WIDTH_X12:
+ /* not actually supported */
+ return 12;
+ case RADEON_PCIE_LC_LINK_WIDTH_X0:
+ case RADEON_PCIE_LC_LINK_WIDTH_X16:
+ default:
+ return 16;
+ }
+}
+
+static void r600_pcie_gen2_enable(struct radeon_device *rdev)
+{
+ u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
+ u16 link_cntl2;
+
+ if (radeon_pcie_gen2 == 0)
+ return;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return;
+
+ /* only RV6xx+ chips are supported */
+ if (rdev->family <= CHIP_R600)
+ return;
+
+ if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+ (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
+ return;
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ if (speed_cntl & LC_CURRENT_DATA_RATE) {
+ DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+ return;
+ }
+
+ DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
+ /* 55 nm r6xx asics */
+ if ((rdev->family == CHIP_RV670) ||
+ (rdev->family == CHIP_RV620) ||
+ (rdev->family == CHIP_RV635)) {
+ /* advertise upconfig capability */
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+ lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+ link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+ LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ } else {
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+ }
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+ (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+ /* 55 nm r6xx asics */
+ if ((rdev->family == CHIP_RV670) ||
+ (rdev->family == CHIP_RV620) ||
+ (rdev->family == CHIP_RV635)) {
+ WREG32(MM_CFGREGS_CNTL, 0x8);
+ link_cntl2 = RREG32(0x4088);
+ WREG32(MM_CFGREGS_CNTL, 0);
+ /* not supported yet */
+ if (link_cntl2 & SELECTABLE_DEEMPHASIS)
+ return;
+ }
+
+ speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
+ speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
+ speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
+ speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
+ speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ tmp = RREG32(0x541c);
+ WREG32(0x541c, tmp | 0x8);
+ WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+ link_cntl2 = RREG16(0x4088);
+ link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+ link_cntl2 |= 0x2;
+ WREG16(0x4088, link_cntl2);
+ WREG32(MM_CFGREGS_CNTL, 0);
+
+ if ((rdev->family == CHIP_RV670) ||
+ (rdev->family == CHIP_RV620) ||
+ (rdev->family == CHIP_RV635)) {
+ training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
+ training_cntl &= ~LC_POINT_7_PLUS_EN;
+ WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
+ } else {
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ }
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_GEN2_EN_STRAP;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ } else {
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+ if (1)
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ else
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+}
+
+/**
+ * r600_get_gpu_clock_counter - return GPU clock counter snapshot
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (R6xx-cayman).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
+{
+ uint64_t clock;
+
+ mutex_lock(&rdev->gpu_clock_mutex);
+ WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ mutex_unlock(&rdev->gpu_clock_mutex);
+ return clock;
+}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
new file mode 100644
index 0000000..c92eb86
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_reg.h"
+#include "radeon_asic.h"
+#include "atom.h"
+
+/*
+ * check if enc_priv stores radeon_encoder_atom_dig
+ */
+static bool radeon_dig_encoder(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ return true;
+ }
+ return false;
+}
+
+/*
+ * check if the chipset is supported
+ */
+static int r600_audio_chipset_supported(struct radeon_device *rdev)
+{
+ return ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE6(rdev);
+}
+
+struct r600_audio r600_audio_status(struct radeon_device *rdev)
+{
+ struct r600_audio status;
+ uint32_t value;
+
+ value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
+
+ /* number of channels */
+ status.channels = (value & 0x7) + 1;
+
+ /* bits per sample */
+ switch ((value & 0xF0) >> 4) {
+ case 0x0:
+ status.bits_per_sample = 8;
+ break;
+ case 0x1:
+ status.bits_per_sample = 16;
+ break;
+ case 0x2:
+ status.bits_per_sample = 20;
+ break;
+ case 0x3:
+ status.bits_per_sample = 24;
+ break;
+ case 0x4:
+ status.bits_per_sample = 32;
+ break;
+ default:
+ dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n",
+ (int)value);
+ status.bits_per_sample = 16;
+ }
+
+ /* current sampling rate in HZ */
+ if (value & 0x4000)
+ status.rate = 44100;
+ else
+ status.rate = 48000;
+ status.rate *= ((value >> 11) & 0x7) + 1;
+ status.rate /= ((value >> 8) & 0x7) + 1;
+
+ value = RREG32(R600_AUDIO_STATUS_BITS);
+
+ /* iec 60958 status bits */
+ status.status_bits = value & 0xff;
+
+ /* iec 60958 category code */
+ status.category_code = (value >> 8) & 0xff;
+
+ return status;
+}
+
+/*
+ * update all hdmi interfaces with current audio parameters
+ */
+void r600_audio_update_hdmi(struct work_struct *work)
+{
+ struct radeon_device *rdev = container_of(work, struct radeon_device,
+ audio_work);
+ struct drm_device *dev = rdev->ddev;
+ struct r600_audio audio_status = r600_audio_status(rdev);
+ struct drm_encoder *encoder;
+ bool changed = false;
+
+ if (rdev->audio_status.channels != audio_status.channels ||
+ rdev->audio_status.rate != audio_status.rate ||
+ rdev->audio_status.bits_per_sample != audio_status.bits_per_sample ||
+ rdev->audio_status.status_bits != audio_status.status_bits ||
+ rdev->audio_status.category_code != audio_status.category_code) {
+ rdev->audio_status = audio_status;
+ changed = true;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (!radeon_dig_encoder(encoder))
+ continue;
+ if (changed || r600_hdmi_buffer_status_changed(encoder))
+ r600_hdmi_update_audio_settings(encoder);
+ }
+}
+
+/*
+ * turn on/off audio engine
+ */
+static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
+{
+ u32 value = 0;
+ DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
+ if (ASIC_IS_DCE4(rdev)) {
+ if (enable) {
+ value |= 0x81000000; /* Required to enable audio */
+ value |= 0x0e1000f0; /* fglrx sets that too */
+ }
+ WREG32(EVERGREEN_AUDIO_ENABLE, value);
+ } else {
+ WREG32_P(R600_AUDIO_ENABLE,
+ enable ? 0x81000000 : 0x0, ~0x81000000);
+ }
+ rdev->audio_enabled = enable;
+}
+
+/*
+ * initialize the audio vars
+ */
+int r600_audio_init(struct radeon_device *rdev)
+{
+ if (!radeon_audio || !r600_audio_chipset_supported(rdev))
+ return 0;
+
+ r600_audio_engine_enable(rdev, true);
+
+ rdev->audio_status.channels = -1;
+ rdev->audio_status.rate = -1;
+ rdev->audio_status.bits_per_sample = -1;
+ rdev->audio_status.status_bits = 0;
+ rdev->audio_status.category_code = 0;
+
+ return 0;
+}
+
+/*
+ * release the audio timer
+ * TODO: How to do this correctly on SMP systems?
+ */
+void r600_audio_fini(struct radeon_device *rdev)
+{
+ if (!rdev->audio_enabled)
+ return;
+
+ r600_audio_engine_enable(rdev, false);
+}
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
new file mode 100644
index 0000000..f651881
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -0,0 +1,843 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Alex Deucher <alexander.deucher@amd.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+
+#include "r600_blit_shaders.h"
+
+#define DI_PT_RECTLIST 0x11
+#define DI_INDEX_SIZE_16_BIT 0x0
+#define DI_SRC_SEL_AUTO_INDEX 0x2
+
+#define FMT_8 0x1
+#define FMT_5_6_5 0x8
+#define FMT_8_8_8_8 0x1a
+#define COLOR_8 0x1
+#define COLOR_5_6_5 0x8
+#define COLOR_8_8_8_8 0x1a
+
+static void
+set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr)
+{
+ u32 cb_color_info;
+ int pitch, slice;
+ RING_LOCALS;
+ DRM_DEBUG("\n");
+
+ h = ALIGN(h, 8);
+ if (h < 8)
+ h = 8;
+
+ cb_color_info = ((format << 2) | (1 << 27));
+ pitch = (w / 8) - 1;
+ slice = ((w * h) / 64) - 1;
+
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600) &&
+ ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770)) {
+ BEGIN_RING(21 + 2);
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+ OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING(gpu_addr >> 8);
+ OUT_RING(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
+ OUT_RING(2 << 0);
+ } else {
+ BEGIN_RING(21);
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+ OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING(gpu_addr >> 8);
+ }
+
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+ OUT_RING((R600_CB_COLOR0_SIZE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING((pitch << 0) | (slice << 10));
+
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+ OUT_RING((R600_CB_COLOR0_VIEW - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING(0);
+
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+ OUT_RING((R600_CB_COLOR0_INFO - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING(cb_color_info);
+
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+ OUT_RING((R600_CB_COLOR0_TILE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING(0);
+
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+ OUT_RING((R600_CB_COLOR0_FRAG - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING(0);
+
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+ OUT_RING((R600_CB_COLOR0_MASK - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING(0);
+
+ ADVANCE_RING();
+}
+
+static void
+cp_set_surface_sync(drm_radeon_private_t *dev_priv,
+ u32 sync_type, u32 size, u64 mc_addr)
+{
+ u32 cp_coher_size;
+ RING_LOCALS;
+ DRM_DEBUG("\n");
+
+ if (size == 0xffffffff)
+ cp_coher_size = 0xffffffff;
+ else
+ cp_coher_size = ((size + 255) >> 8);
+
+ BEGIN_RING(5);
+ OUT_RING(CP_PACKET3(R600_IT_SURFACE_SYNC, 3));
+ OUT_RING(sync_type);
+ OUT_RING(cp_coher_size);
+ OUT_RING((mc_addr >> 8));
+ OUT_RING(10); /* poll interval */
+ ADVANCE_RING();
+}
+
+static void
+set_shaders(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ u64 gpu_addr;
+ int i;
+ u32 *vs, *ps;
+ uint32_t sq_pgm_resources;
+ RING_LOCALS;
+ DRM_DEBUG("\n");
+
+ /* load shaders */
+ vs = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset);
+ ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
+
+ for (i = 0; i < r6xx_vs_size; i++)
+ vs[i] = cpu_to_le32(r6xx_vs[i]);
+ for (i = 0; i < r6xx_ps_size; i++)
+ ps[i] = cpu_to_le32(r6xx_ps[i]);
+
+ dev_priv->blit_vb->used = 512;
+
+ gpu_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset;
+
+ /* setup shader regs */
+ sq_pgm_resources = (1 << 0);
+
+ BEGIN_RING(9 + 12);
+ /* VS */
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+ OUT_RING((R600_SQ_PGM_START_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING(gpu_addr >> 8);
+
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+ OUT_RING((R600_SQ_PGM_RESOURCES_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING(sq_pgm_resources);
+
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+ OUT_RING((R600_SQ_PGM_CF_OFFSET_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING(0);
+
+ /* PS */
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+ OUT_RING((R600_SQ_PGM_START_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING((gpu_addr + 256) >> 8);
+
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+ OUT_RING((R600_SQ_PGM_RESOURCES_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING(sq_pgm_resources | (1 << 28));
+
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+ OUT_RING((R600_SQ_PGM_EXPORTS_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING(2);
+
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+ OUT_RING((R600_SQ_PGM_CF_OFFSET_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING(0);
+ ADVANCE_RING();
+
+ cp_set_surface_sync(dev_priv,
+ R600_SH_ACTION_ENA, 512, gpu_addr);
+}
+
+static void
+set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
+{
+ uint32_t sq_vtx_constant_word2;
+ RING_LOCALS;
+ DRM_DEBUG("\n");
+
+ sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+ sq_vtx_constant_word2 |= (2 << 30);
+#endif
+
+ BEGIN_RING(9);
+ OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
+ OUT_RING(0x460);
+ OUT_RING(gpu_addr & 0xffffffff);
+ OUT_RING(48 - 1);
+ OUT_RING(sq_vtx_constant_word2);
+ OUT_RING(1 << 0);
+ OUT_RING(0);
+ OUT_RING(0);
+ OUT_RING(R600_SQ_TEX_VTX_VALID_BUFFER << 30);
+ ADVANCE_RING();
+
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710))
+ cp_set_surface_sync(dev_priv,
+ R600_TC_ACTION_ENA, 48, gpu_addr);
+ else
+ cp_set_surface_sync(dev_priv,
+ R600_VC_ACTION_ENA, 48, gpu_addr);
+}
+
+static void
+set_tex_resource(drm_radeon_private_t *dev_priv,
+ int format, int w, int h, int pitch, u64 gpu_addr)
+{
+ uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
+ RING_LOCALS;
+ DRM_DEBUG("\n");
+
+ if (h < 1)
+ h = 1;
+
+ sq_tex_resource_word0 = (1 << 0);
+ sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
+ ((w - 1) << 19));
+
+ sq_tex_resource_word1 = (format << 26);
+ sq_tex_resource_word1 |= ((h - 1) << 0);
+
+ sq_tex_resource_word4 = ((1 << 14) |
+ (0 << 16) |
+ (1 << 19) |
+ (2 << 22) |
+ (3 << 25));
+
+ BEGIN_RING(9);
+ OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
+ OUT_RING(0);
+ OUT_RING(sq_tex_resource_word0);
+ OUT_RING(sq_tex_resource_word1);
+ OUT_RING(gpu_addr >> 8);
+ OUT_RING(gpu_addr >> 8);
+ OUT_RING(sq_tex_resource_word4);
+ OUT_RING(0);
+ OUT_RING(R600_SQ_TEX_VTX_VALID_TEXTURE << 30);
+ ADVANCE_RING();
+
+}
+
+static void
+set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2)
+{
+ RING_LOCALS;
+ DRM_DEBUG("\n");
+
+ BEGIN_RING(12);
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
+ OUT_RING((R600_PA_SC_SCREEN_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING((x1 << 0) | (y1 << 16));
+ OUT_RING((x2 << 0) | (y2 << 16));
+
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
+ OUT_RING((R600_PA_SC_GENERIC_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31));
+ OUT_RING((x2 << 0) | (y2 << 16));
+
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
+ OUT_RING((R600_PA_SC_WINDOW_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+ OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31));
+ OUT_RING((x2 << 0) | (y2 << 16));
+ ADVANCE_RING();
+}
+
+static void
+draw_auto(drm_radeon_private_t *dev_priv)
+{
+ RING_LOCALS;
+ DRM_DEBUG("\n");
+
+ BEGIN_RING(10);
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
+ OUT_RING((R600_VGT_PRIMITIVE_TYPE - R600_SET_CONFIG_REG_OFFSET) >> 2);
+ OUT_RING(DI_PT_RECTLIST);
+
+ OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
+#ifdef __BIG_ENDIAN
+ OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT);
+#else
+ OUT_RING(DI_INDEX_SIZE_16_BIT);
+#endif
+
+ OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
+ OUT_RING(1);
+
+ OUT_RING(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1));
+ OUT_RING(3);
+ OUT_RING(DI_SRC_SEL_AUTO_INDEX);
+
+ ADVANCE_RING();
+ COMMIT_RING();
+}
+
+static void
+set_default_state(drm_radeon_private_t *dev_priv)
+{
+ int i;
+ u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
+ u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
+ int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
+ int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
+ int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
+ RING_LOCALS;
+
+ switch ((dev_priv->flags & RADEON_FAMILY_MASK)) {
+ case CHIP_R600:
+ num_ps_gprs = 192;
+ num_vs_gprs = 56;
+ num_temp_gprs = 4;
+ num_gs_gprs = 0;
+ num_es_gprs = 0;
+ num_ps_threads = 136;
+ num_vs_threads = 48;
+ num_gs_threads = 4;
+ num_es_threads = 4;
+ num_ps_stack_entries = 128;
+ num_vs_stack_entries = 128;
+ num_gs_stack_entries = 0;
+ num_es_stack_entries = 0;
+ break;
+ case CHIP_RV630:
+ case CHIP_RV635:
+ num_ps_gprs = 84;
+ num_vs_gprs = 36;
+ num_temp_gprs = 4;
+ num_gs_gprs = 0;
+ num_es_gprs = 0;
+ num_ps_threads = 144;
+ num_vs_threads = 40;
+ num_gs_threads = 4;
+ num_es_threads = 4;
+ num_ps_stack_entries = 40;
+ num_vs_stack_entries = 40;
+ num_gs_stack_entries = 32;
+ num_es_stack_entries = 16;
+ break;
+ case CHIP_RV610:
+ case CHIP_RV620:
+ case CHIP_RS780:
+ case CHIP_RS880:
+ default:
+ num_ps_gprs = 84;
+ num_vs_gprs = 36;
+ num_temp_gprs = 4;
+ num_gs_gprs = 0;
+ num_es_gprs = 0;
+ num_ps_threads = 136;
+ num_vs_threads = 48;
+ num_gs_threads = 4;
+ num_es_threads = 4;
+ num_ps_stack_entries = 40;
+ num_vs_stack_entries = 40;
+ num_gs_stack_entries = 32;
+ num_es_stack_entries = 16;
+ break;
+ case CHIP_RV670:
+ num_ps_gprs = 144;
+ num_vs_gprs = 40;
+ num_temp_gprs = 4;
+ num_gs_gprs = 0;
+ num_es_gprs = 0;
+ num_ps_threads = 136;
+ num_vs_threads = 48;
+ num_gs_threads = 4;
+ num_es_threads = 4;
+ num_ps_stack_entries = 40;
+ num_vs_stack_entries = 40;
+ num_gs_stack_entries = 32;
+ num_es_stack_entries = 16;
+ break;
+ case CHIP_RV770:
+ num_ps_gprs = 192;
+ num_vs_gprs = 56;
+ num_temp_gprs = 4;
+ num_gs_gprs = 0;
+ num_es_gprs = 0;
+ num_ps_threads = 188;
+ num_vs_threads = 60;
+ num_gs_threads = 0;
+ num_es_threads = 0;
+ num_ps_stack_entries = 256;
+ num_vs_stack_entries = 256;
+ num_gs_stack_entries = 0;
+ num_es_stack_entries = 0;
+ break;
+ case CHIP_RV730:
+ case CHIP_RV740:
+ num_ps_gprs = 84;
+ num_vs_gprs = 36;
+ num_temp_gprs = 4;
+ num_gs_gprs = 0;
+ num_es_gprs = 0;
+ num_ps_threads = 188;
+ num_vs_threads = 60;
+ num_gs_threads = 0;
+ num_es_threads = 0;
+ num_ps_stack_entries = 128;
+ num_vs_stack_entries = 128;
+ num_gs_stack_entries = 0;
+ num_es_stack_entries = 0;
+ break;
+ case CHIP_RV710:
+ num_ps_gprs = 192;
+ num_vs_gprs = 56;
+ num_temp_gprs = 4;
+ num_gs_gprs = 0;
+ num_es_gprs = 0;
+ num_ps_threads = 144;
+ num_vs_threads = 48;
+ num_gs_threads = 0;
+ num_es_threads = 0;
+ num_ps_stack_entries = 128;
+ num_vs_stack_entries = 128;
+ num_gs_stack_entries = 0;
+ num_es_stack_entries = 0;
+ break;
+ }
+
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710))
+ sq_config = 0;
+ else
+ sq_config = R600_VC_ENABLE;
+
+ sq_config |= (R600_DX9_CONSTS |
+ R600_ALU_INST_PREFER_VECTOR |
+ R600_PS_PRIO(0) |
+ R600_VS_PRIO(1) |
+ R600_GS_PRIO(2) |
+ R600_ES_PRIO(3));
+
+ sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(num_ps_gprs) |
+ R600_NUM_VS_GPRS(num_vs_gprs) |
+ R600_NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
+ sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(num_gs_gprs) |
+ R600_NUM_ES_GPRS(num_es_gprs));
+ sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(num_ps_threads) |
+ R600_NUM_VS_THREADS(num_vs_threads) |
+ R600_NUM_GS_THREADS(num_gs_threads) |
+ R600_NUM_ES_THREADS(num_es_threads));
+ sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
+ R600_NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
+ sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
+ R600_NUM_ES_STACK_ENTRIES(num_es_stack_entries));
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
+ BEGIN_RING(r7xx_default_size + 10);
+ for (i = 0; i < r7xx_default_size; i++)
+ OUT_RING(r7xx_default_state[i]);
+ } else {
+ BEGIN_RING(r6xx_default_size + 10);
+ for (i = 0; i < r6xx_default_size; i++)
+ OUT_RING(r6xx_default_state[i]);
+ }
+ OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
+ OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
+ /* SQ config */
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 6));
+ OUT_RING((R600_SQ_CONFIG - R600_SET_CONFIG_REG_OFFSET) >> 2);
+ OUT_RING(sq_config);
+ OUT_RING(sq_gpr_resource_mgmt_1);
+ OUT_RING(sq_gpr_resource_mgmt_2);
+ OUT_RING(sq_thread_resource_mgmt);
+ OUT_RING(sq_stack_resource_mgmt_1);
+ OUT_RING(sq_stack_resource_mgmt_2);
+ ADVANCE_RING();
+}
+
+static int r600_nomm_get_vb(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ dev_priv->blit_vb = radeon_freelist_get(dev);
+ if (!dev_priv->blit_vb) {
+ DRM_ERROR("Unable to allocate vertex buffer for blit\n");
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static void r600_nomm_put_vb(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ dev_priv->blit_vb->used = 0;
+ radeon_cp_discard_buffer(dev, dev_priv->blit_vb->file_priv->master, dev_priv->blit_vb);
+}
+
+static void *r600_nomm_get_vb_ptr(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ return (((char *)dev->agp_buffer_map->handle +
+ dev_priv->blit_vb->offset + dev_priv->blit_vb->used));
+}
+
+int
+r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ int ret;
+ DRM_DEBUG("\n");
+
+ ret = r600_nomm_get_vb(dev);
+ if (ret)
+ return ret;
+
+ dev_priv->blit_vb->file_priv = file_priv;
+
+ set_default_state(dev_priv);
+ set_shaders(dev);
+
+ return 0;
+}
+
+
+void
+r600_done_blit_copy(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+ DRM_DEBUG("\n");
+
+ BEGIN_RING(5);
+ OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
+ OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
+ /* wait for 3D idle clean */
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
+ OUT_RING((R600_WAIT_UNTIL - R600_SET_CONFIG_REG_OFFSET) >> 2);
+ OUT_RING(RADEON_WAIT_3D_IDLE | RADEON_WAIT_3D_IDLECLEAN);
+
+ ADVANCE_RING();
+ COMMIT_RING();
+
+ r600_nomm_put_vb(dev);
+}
+
+void
+r600_blit_copy(struct drm_device *dev,
+ uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
+ int size_bytes)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ int max_bytes;
+ u64 vb_addr;
+ u32 *vb;
+
+ vb = r600_nomm_get_vb_ptr(dev);
+
+ if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
+ max_bytes = 8192;
+
+ while (size_bytes) {
+ int cur_size = size_bytes;
+ int src_x = src_gpu_addr & 255;
+ int dst_x = dst_gpu_addr & 255;
+ int h = 1;
+ src_gpu_addr = src_gpu_addr & ~255;
+ dst_gpu_addr = dst_gpu_addr & ~255;
+
+ if (!src_x && !dst_x) {
+ h = (cur_size / max_bytes);
+ if (h > 8192)
+ h = 8192;
+ if (h == 0)
+ h = 1;
+ else
+ cur_size = max_bytes;
+ } else {
+ if (cur_size > max_bytes)
+ cur_size = max_bytes;
+ if (cur_size > (max_bytes - dst_x))
+ cur_size = (max_bytes - dst_x);
+ if (cur_size > (max_bytes - src_x))
+ cur_size = (max_bytes - src_x);
+ }
+
+ if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
+
+ r600_nomm_put_vb(dev);
+ r600_nomm_get_vb(dev);
+ if (!dev_priv->blit_vb)
+ return;
+ set_shaders(dev);
+ vb = r600_nomm_get_vb_ptr(dev);
+ }
+
+ vb[0] = int2float(dst_x);
+ vb[1] = 0;
+ vb[2] = int2float(src_x);
+ vb[3] = 0;
+
+ vb[4] = int2float(dst_x);
+ vb[5] = int2float(h);
+ vb[6] = int2float(src_x);
+ vb[7] = int2float(h);
+
+ vb[8] = int2float(dst_x + cur_size);
+ vb[9] = int2float(h);
+ vb[10] = int2float(src_x + cur_size);
+ vb[11] = int2float(h);
+
+ /* src */
+ set_tex_resource(dev_priv, FMT_8,
+ src_x + cur_size, h, src_x + cur_size,
+ src_gpu_addr);
+
+ cp_set_surface_sync(dev_priv,
+ R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
+
+ /* dst */
+ set_render_target(dev_priv, COLOR_8,
+ dst_x + cur_size, h,
+ dst_gpu_addr);
+
+ /* scissors */
+ set_scissors(dev_priv, dst_x, 0, dst_x + cur_size, h);
+
+ /* Vertex buffer setup */
+ vb_addr = dev_priv->gart_buffers_offset +
+ dev_priv->blit_vb->offset +
+ dev_priv->blit_vb->used;
+ set_vtx_resource(dev_priv, vb_addr);
+
+ /* draw */
+ draw_auto(dev_priv);
+
+ cp_set_surface_sync(dev_priv,
+ R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
+ cur_size * h, dst_gpu_addr);
+
+ vb += 12;
+ dev_priv->blit_vb->used += 12 * 4;
+
+ src_gpu_addr += cur_size * h;
+ dst_gpu_addr += cur_size * h;
+ size_bytes -= cur_size * h;
+ }
+ } else {
+ max_bytes = 8192 * 4;
+
+ while (size_bytes) {
+ int cur_size = size_bytes;
+ int src_x = (src_gpu_addr & 255);
+ int dst_x = (dst_gpu_addr & 255);
+ int h = 1;
+ src_gpu_addr = src_gpu_addr & ~255;
+ dst_gpu_addr = dst_gpu_addr & ~255;
+
+ if (!src_x && !dst_x) {
+ h = (cur_size / max_bytes);
+ if (h > 8192)
+ h = 8192;
+ if (h == 0)
+ h = 1;
+ else
+ cur_size = max_bytes;
+ } else {
+ if (cur_size > max_bytes)
+ cur_size = max_bytes;
+ if (cur_size > (max_bytes - dst_x))
+ cur_size = (max_bytes - dst_x);
+ if (cur_size > (max_bytes - src_x))
+ cur_size = (max_bytes - src_x);
+ }
+
+ if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
+ r600_nomm_put_vb(dev);
+ r600_nomm_get_vb(dev);
+ if (!dev_priv->blit_vb)
+ return;
+
+ set_shaders(dev);
+ vb = r600_nomm_get_vb_ptr(dev);
+ }
+
+ vb[0] = int2float(dst_x / 4);
+ vb[1] = 0;
+ vb[2] = int2float(src_x / 4);
+ vb[3] = 0;
+
+ vb[4] = int2float(dst_x / 4);
+ vb[5] = int2float(h);
+ vb[6] = int2float(src_x / 4);
+ vb[7] = int2float(h);
+
+ vb[8] = int2float((dst_x + cur_size) / 4);
+ vb[9] = int2float(h);
+ vb[10] = int2float((src_x + cur_size) / 4);
+ vb[11] = int2float(h);
+
+ /* src */
+ set_tex_resource(dev_priv, FMT_8_8_8_8,
+ (src_x + cur_size) / 4,
+ h, (src_x + cur_size) / 4,
+ src_gpu_addr);
+
+ cp_set_surface_sync(dev_priv,
+ R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
+
+ /* dst */
+ set_render_target(dev_priv, COLOR_8_8_8_8,
+ (dst_x + cur_size) / 4, h,
+ dst_gpu_addr);
+
+ /* scissors */
+ set_scissors(dev_priv, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
+
+ /* Vertex buffer setup */
+ vb_addr = dev_priv->gart_buffers_offset +
+ dev_priv->blit_vb->offset +
+ dev_priv->blit_vb->used;
+ set_vtx_resource(dev_priv, vb_addr);
+
+ /* draw */
+ draw_auto(dev_priv);
+
+ cp_set_surface_sync(dev_priv,
+ R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
+ cur_size * h, dst_gpu_addr);
+
+ vb += 12;
+ dev_priv->blit_vb->used += 12 * 4;
+
+ src_gpu_addr += cur_size * h;
+ dst_gpu_addr += cur_size * h;
+ size_bytes -= cur_size * h;
+ }
+ }
+}
+
+void
+r600_blit_swap(struct drm_device *dev,
+ uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
+ int sx, int sy, int dx, int dy,
+ int w, int h, int src_pitch, int dst_pitch, int cpp)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ int cb_format, tex_format;
+ int sx2, sy2, dx2, dy2;
+ u64 vb_addr;
+ u32 *vb;
+
+ if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
+
+ r600_nomm_put_vb(dev);
+ r600_nomm_get_vb(dev);
+ if (!dev_priv->blit_vb)
+ return;
+
+ set_shaders(dev);
+ }
+ vb = r600_nomm_get_vb_ptr(dev);
+
+ sx2 = sx + w;
+ sy2 = sy + h;
+ dx2 = dx + w;
+ dy2 = dy + h;
+
+ vb[0] = int2float(dx);
+ vb[1] = int2float(dy);
+ vb[2] = int2float(sx);
+ vb[3] = int2float(sy);
+
+ vb[4] = int2float(dx);
+ vb[5] = int2float(dy2);
+ vb[6] = int2float(sx);
+ vb[7] = int2float(sy2);
+
+ vb[8] = int2float(dx2);
+ vb[9] = int2float(dy2);
+ vb[10] = int2float(sx2);
+ vb[11] = int2float(sy2);
+
+ switch(cpp) {
+ case 4:
+ cb_format = COLOR_8_8_8_8;
+ tex_format = FMT_8_8_8_8;
+ break;
+ case 2:
+ cb_format = COLOR_5_6_5;
+ tex_format = FMT_5_6_5;
+ break;
+ default:
+ cb_format = COLOR_8;
+ tex_format = FMT_8;
+ break;
+ }
+
+ /* src */
+ set_tex_resource(dev_priv, tex_format,
+ src_pitch / cpp,
+ sy2, src_pitch / cpp,
+ src_gpu_addr);
+
+ cp_set_surface_sync(dev_priv,
+ R600_TC_ACTION_ENA, src_pitch * sy2, src_gpu_addr);
+
+ /* dst */
+ set_render_target(dev_priv, cb_format,
+ dst_pitch / cpp, dy2,
+ dst_gpu_addr);
+
+ /* scissors */
+ set_scissors(dev_priv, dx, dy, dx2, dy2);
+
+ /* Vertex buffer setup */
+ vb_addr = dev_priv->gart_buffers_offset +
+ dev_priv->blit_vb->offset +
+ dev_priv->blit_vb->used;
+ set_vtx_resource(dev_priv, vb_addr);
+
+ /* draw */
+ draw_auto(dev_priv);
+
+ cp_set_surface_sync(dev_priv,
+ R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
+ dst_pitch * dy2, dst_gpu_addr);
+
+ dev_priv->blit_vb->used += 12 * 4;
+}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
new file mode 100644
index 0000000..9fb5780
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -0,0 +1,785 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#include "r600d.h"
+#include "r600_blit_shaders.h"
+#include "radeon_blit_common.h"
+
+/* 23 bits of float fractional data */
+#define I2F_FRAC_BITS 23
+#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
+
+/*
+ * Converts unsigned integer into 32-bit IEEE floating point representation.
+ * Will be exact from 0 to 2^24. Above that, we round towards zero
+ * as the fractional bits will not fit in a float. (It would be better to
+ * round towards even as the fpu does, but that is slower.)
+ */
+__pure uint32_t int2float(uint32_t x)
+{
+ uint32_t msb, exponent, fraction;
+
+ /* Zero is special */
+ if (!x) return 0;
+
+ /* Get location of the most significant bit */
+ msb = __fls(x);
+
+ /*
+ * Use a rotate instead of a shift because that works both leftwards
+ * and rightwards due to the mod(32) behaviour. This means we don't
+ * need to check to see if we are above 2^24 or not.
+ */
+ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+ exponent = (127 + msb) << I2F_FRAC_BITS;
+
+ return fraction + exponent;
+}
+
+/* emits 21 on rv770+, 23 on r600 */
+static void
+set_render_target(struct radeon_device *rdev, int format,
+ int w, int h, u64 gpu_addr)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ u32 cb_color_info;
+ int pitch, slice;
+
+ h = ALIGN(h, 8);
+ if (h < 8)
+ h = 8;
+
+ cb_color_info = CB_FORMAT(format) |
+ CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
+ CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ pitch = (w / 8) - 1;
+ slice = ((w * h) / 64) - 1;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
+
+ if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
+ radeon_ring_write(ring, 2 << 0);
+ }
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, (pitch << 0) | (slice << 10));
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, cb_color_info);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
+}
+
+/* emits 5dw */
+static void
+cp_set_surface_sync(struct radeon_device *rdev,
+ u32 sync_type, u32 size,
+ u64 mc_addr)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ u32 cp_coher_size;
+
+ if (size == 0xffffffff)
+ cp_coher_size = 0xffffffff;
+ else
+ cp_coher_size = ((size + 255) >> 8);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, sync_type);
+ radeon_ring_write(ring, cp_coher_size);
+ radeon_ring_write(ring, mc_addr >> 8);
+ radeon_ring_write(ring, 10); /* poll interval */
+}
+
+/* emits 21dw + 1 surface sync = 26dw */
+static void
+set_shaders(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ u64 gpu_addr;
+ u32 sq_pgm_resources;
+
+ /* setup shader regs */
+ sq_pgm_resources = (1 << 0);
+
+ /* VS */
+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, sq_pgm_resources);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
+
+ /* PS */
+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 2);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
+
+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+ cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
+}
+
+/* emits 9 + 1 sync (5) = 14*/
+static void
+set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ u32 sq_vtx_constant_word2;
+
+ sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
+ SQ_VTXC_STRIDE(16);
+#ifdef __BIG_ENDIAN
+ sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
+#endif
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
+ radeon_ring_write(ring, 0x460);
+ radeon_ring_write(ring, gpu_addr & 0xffffffff);
+ radeon_ring_write(ring, 48 - 1);
+ radeon_ring_write(ring, sq_vtx_constant_word2);
+ radeon_ring_write(ring, 1 << 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
+
+ if ((rdev->family == CHIP_RV610) ||
+ (rdev->family == CHIP_RV620) ||
+ (rdev->family == CHIP_RS780) ||
+ (rdev->family == CHIP_RS880) ||
+ (rdev->family == CHIP_RV710))
+ cp_set_surface_sync(rdev,
+ PACKET3_TC_ACTION_ENA, 48, gpu_addr);
+ else
+ cp_set_surface_sync(rdev,
+ PACKET3_VC_ACTION_ENA, 48, gpu_addr);
+}
+
+/* emits 9 */
+static void
+set_tex_resource(struct radeon_device *rdev,
+ int format, int w, int h, int pitch,
+ u64 gpu_addr, u32 size)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
+
+ if (h < 1)
+ h = 1;
+
+ sq_tex_resource_word0 = S_038000_DIM(V_038000_SQ_TEX_DIM_2D) |
+ S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+ sq_tex_resource_word0 |= S_038000_PITCH((pitch >> 3) - 1) |
+ S_038000_TEX_WIDTH(w - 1);
+
+ sq_tex_resource_word1 = S_038004_DATA_FORMAT(format);
+ sq_tex_resource_word1 |= S_038004_TEX_HEIGHT(h - 1);
+
+ sq_tex_resource_word4 = S_038010_REQUEST_SIZE(1) |
+ S_038010_DST_SEL_X(SQ_SEL_X) |
+ S_038010_DST_SEL_Y(SQ_SEL_Y) |
+ S_038010_DST_SEL_Z(SQ_SEL_Z) |
+ S_038010_DST_SEL_W(SQ_SEL_W);
+
+ cp_set_surface_sync(rdev,
+ PACKET3_TC_ACTION_ENA, size, gpu_addr);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, sq_tex_resource_word0);
+ radeon_ring_write(ring, sq_tex_resource_word1);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, sq_tex_resource_word4);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
+}
+
+/* emits 12 */
+static void
+set_scissors(struct radeon_device *rdev, int x1, int y1,
+ int x2, int y2)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+}
+
+/* emits 10 */
+static void
+draw_auto(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, DI_PT_RECTLIST);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
+ radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 2) |
+#endif
+ DI_INDEX_SIZE_16_BIT);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
+ radeon_ring_write(ring, 1);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+ radeon_ring_write(ring, 3);
+ radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
+
+}
+
+/* emits 14 */
+static void
+set_default_state(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
+ u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
+ int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
+ int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
+ int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
+ u64 gpu_addr;
+ int dwords;
+
+ switch (rdev->family) {
+ case CHIP_R600:
+ num_ps_gprs = 192;
+ num_vs_gprs = 56;
+ num_temp_gprs = 4;
+ num_gs_gprs = 0;
+ num_es_gprs = 0;
+ num_ps_threads = 136;
+ num_vs_threads = 48;
+ num_gs_threads = 4;
+ num_es_threads = 4;
+ num_ps_stack_entries = 128;
+ num_vs_stack_entries = 128;
+ num_gs_stack_entries = 0;
+ num_es_stack_entries = 0;
+ break;
+ case CHIP_RV630:
+ case CHIP_RV635:
+ num_ps_gprs = 84;
+ num_vs_gprs = 36;
+ num_temp_gprs = 4;
+ num_gs_gprs = 0;
+ num_es_gprs = 0;
+ num_ps_threads = 144;
+ num_vs_threads = 40;
+ num_gs_threads = 4;
+ num_es_threads = 4;
+ num_ps_stack_entries = 40;
+ num_vs_stack_entries = 40;
+ num_gs_stack_entries = 32;
+ num_es_stack_entries = 16;
+ break;
+ case CHIP_RV610:
+ case CHIP_RV620:
+ case CHIP_RS780:
+ case CHIP_RS880:
+ default:
+ num_ps_gprs = 84;
+ num_vs_gprs = 36;
+ num_temp_gprs = 4;
+ num_gs_gprs = 0;
+ num_es_gprs = 0;
+ num_ps_threads = 136;
+ num_vs_threads = 48;
+ num_gs_threads = 4;
+ num_es_threads = 4;
+ num_ps_stack_entries = 40;
+ num_vs_stack_entries = 40;
+ num_gs_stack_entries = 32;
+ num_es_stack_entries = 16;
+ break;
+ case CHIP_RV670:
+ num_ps_gprs = 144;
+ num_vs_gprs = 40;
+ num_temp_gprs = 4;
+ num_gs_gprs = 0;
+ num_es_gprs = 0;
+ num_ps_threads = 136;
+ num_vs_threads = 48;
+ num_gs_threads = 4;
+ num_es_threads = 4;
+ num_ps_stack_entries = 40;
+ num_vs_stack_entries = 40;
+ num_gs_stack_entries = 32;
+ num_es_stack_entries = 16;
+ break;
+ case CHIP_RV770:
+ num_ps_gprs = 192;
+ num_vs_gprs = 56;
+ num_temp_gprs = 4;
+ num_gs_gprs = 0;
+ num_es_gprs = 0;
+ num_ps_threads = 188;
+ num_vs_threads = 60;
+ num_gs_threads = 0;
+ num_es_threads = 0;
+ num_ps_stack_entries = 256;
+ num_vs_stack_entries = 256;
+ num_gs_stack_entries = 0;
+ num_es_stack_entries = 0;
+ break;
+ case CHIP_RV730:
+ case CHIP_RV740:
+ num_ps_gprs = 84;
+ num_vs_gprs = 36;
+ num_temp_gprs = 4;
+ num_gs_gprs = 0;
+ num_es_gprs = 0;
+ num_ps_threads = 188;
+ num_vs_threads = 60;
+ num_gs_threads = 0;
+ num_es_threads = 0;
+ num_ps_stack_entries = 128;
+ num_vs_stack_entries = 128;
+ num_gs_stack_entries = 0;
+ num_es_stack_entries = 0;
+ break;
+ case CHIP_RV710:
+ num_ps_gprs = 192;
+ num_vs_gprs = 56;
+ num_temp_gprs = 4;
+ num_gs_gprs = 0;
+ num_es_gprs = 0;
+ num_ps_threads = 144;
+ num_vs_threads = 48;
+ num_gs_threads = 0;
+ num_es_threads = 0;
+ num_ps_stack_entries = 128;
+ num_vs_stack_entries = 128;
+ num_gs_stack_entries = 0;
+ num_es_stack_entries = 0;
+ break;
+ }
+
+ if ((rdev->family == CHIP_RV610) ||
+ (rdev->family == CHIP_RV620) ||
+ (rdev->family == CHIP_RS780) ||
+ (rdev->family == CHIP_RS880) ||
+ (rdev->family == CHIP_RV710))
+ sq_config = 0;
+ else
+ sq_config = VC_ENABLE;
+
+ sq_config |= (DX9_CONSTS |
+ ALU_INST_PREFER_VECTOR |
+ PS_PRIO(0) |
+ VS_PRIO(1) |
+ GS_PRIO(2) |
+ ES_PRIO(3));
+
+ sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
+ NUM_VS_GPRS(num_vs_gprs) |
+ NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
+ sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
+ NUM_ES_GPRS(num_es_gprs));
+ sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
+ NUM_VS_THREADS(num_vs_threads) |
+ NUM_GS_THREADS(num_gs_threads) |
+ NUM_ES_THREADS(num_es_threads));
+ sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
+ NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
+ sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
+ NUM_ES_STACK_ENTRIES(num_es_stack_entries));
+
+ /* emit an IB pointing at default state */
+ dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
+ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (gpu_addr & 0xFFFFFFFC));
+ radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
+ radeon_ring_write(ring, dwords);
+
+ /* SQ config */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
+ radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, sq_config);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
+ radeon_ring_write(ring, sq_thread_resource_mgmt);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_1);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_2);
+}
+
+int r600_blit_init(struct radeon_device *rdev)
+{
+ u32 obj_size;
+ int i, r, dwords;
+ void *ptr;
+ u32 packet2s[16];
+ int num_packet2s = 0;
+
+ rdev->r600_blit.primitives.set_render_target = set_render_target;
+ rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
+ rdev->r600_blit.primitives.set_shaders = set_shaders;
+ rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
+ rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
+ rdev->r600_blit.primitives.set_scissors = set_scissors;
+ rdev->r600_blit.primitives.draw_auto = draw_auto;
+ rdev->r600_blit.primitives.set_default_state = set_default_state;
+
+ rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
+ rdev->r600_blit.ring_size_common += 40; /* shaders + def state */
+ rdev->r600_blit.ring_size_common += 5; /* done copy */
+ rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
+
+ rdev->r600_blit.ring_size_per_loop = 76;
+ /* set_render_target emits 2 extra dwords on rv6xx */
+ if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
+ rdev->r600_blit.ring_size_per_loop += 2;
+
+ rdev->r600_blit.max_dim = 8192;
+
+ rdev->r600_blit.state_offset = 0;
+
+ if (rdev->family >= CHIP_RV770)
+ rdev->r600_blit.state_len = r7xx_default_size;
+ else
+ rdev->r600_blit.state_len = r6xx_default_size;
+
+ dwords = rdev->r600_blit.state_len;
+ while (dwords & 0xf) {
+ packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
+ dwords++;
+ }
+
+ obj_size = dwords * 4;
+ obj_size = ALIGN(obj_size, 256);
+
+ rdev->r600_blit.vs_offset = obj_size;
+ obj_size += r6xx_vs_size * 4;
+ obj_size = ALIGN(obj_size, 256);
+
+ rdev->r600_blit.ps_offset = obj_size;
+ obj_size += r6xx_ps_size * 4;
+ obj_size = ALIGN(obj_size, 256);
+
+ /* pin copy shader into vram if not already initialized */
+ if (rdev->r600_blit.shader_obj == NULL) {
+ r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM,
+ NULL, &rdev->r600_blit.shader_obj);
+ if (r) {
+ DRM_ERROR("r600 failed to allocate shader\n");
+ return r;
+ }
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+ return r;
+ }
+ }
+
+ DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
+ obj_size,
+ rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
+ if (r) {
+ DRM_ERROR("failed to map blit object %d\n", r);
+ return r;
+ }
+ if (rdev->family >= CHIP_RV770)
+ memcpy_toio(ptr + rdev->r600_blit.state_offset,
+ r7xx_default_state, rdev->r600_blit.state_len * 4);
+ else
+ memcpy_toio(ptr + rdev->r600_blit.state_offset,
+ r6xx_default_state, rdev->r600_blit.state_len * 4);
+ if (num_packet2s)
+ memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+ packet2s, num_packet2s * 4);
+ for (i = 0; i < r6xx_vs_size; i++)
+ *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
+ for (i = 0; i < r6xx_ps_size; i++)
+ *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
+ radeon_bo_kunmap(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+ return 0;
+}
+
+void r600_blit_fini(struct radeon_device *rdev)
+{
+ int r;
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ if (rdev->r600_blit.shader_obj == NULL)
+ return;
+ /* If we can't reserve the bo, unref should be enough to destroy
+ * it when it becomes idle.
+ */
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (!r) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+ radeon_bo_unref(&rdev->r600_blit.shader_obj);
+}
+
+static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
+ int *width, int *height, int max_dim)
+{
+ unsigned max_pages;
+ unsigned pages = num_gpu_pages;
+ int w, h;
+
+ if (num_gpu_pages == 0) {
+ /* not supposed to be called with no pages, but just in case */
+ h = 0;
+ w = 0;
+ pages = 0;
+ WARN_ON(1);
+ } else {
+ int rect_order = 2;
+ h = RECT_UNIT_H;
+ while (num_gpu_pages / rect_order) {
+ h *= 2;
+ rect_order *= 4;
+ if (h >= max_dim) {
+ h = max_dim;
+ break;
+ }
+ }
+ max_pages = (max_dim * h) / (RECT_UNIT_W * RECT_UNIT_H);
+ if (pages > max_pages)
+ pages = max_pages;
+ w = (pages * RECT_UNIT_W * RECT_UNIT_H) / h;
+ w = (w / RECT_UNIT_W) * RECT_UNIT_W;
+ pages = (w * h) / (RECT_UNIT_W * RECT_UNIT_H);
+ BUG_ON(pages == 0);
+ }
+
+
+ DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h, w, pages);
+
+ /* return width and height only of the caller wants it */
+ if (height)
+ *height = h;
+ if (width)
+ *width = w;
+
+ return pages;
+}
+
+
+int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
+ struct radeon_fence **fence, struct radeon_sa_bo **vb,
+ struct radeon_semaphore **sem)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ int r;
+ int ring_size;
+ int num_loops = 0;
+ int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
+
+ /* num loops */
+ while (num_gpu_pages) {
+ num_gpu_pages -=
+ r600_blit_create_rect(num_gpu_pages, NULL, NULL,
+ rdev->r600_blit.max_dim);
+ num_loops++;
+ }
+
+ /* 48 bytes for vertex per loop */
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb,
+ (num_loops*48)+256, 256, true);
+ if (r) {
+ return r;
+ }
+
+ r = radeon_semaphore_create(rdev, sem);
+ if (r) {
+ radeon_sa_bo_free(rdev, vb, NULL);
+ return r;
+ }
+
+ /* calculate number of loops correctly */
+ ring_size = num_loops * dwords_per_loop;
+ ring_size += rdev->r600_blit.ring_size_common;
+ r = radeon_ring_lock(rdev, ring, ring_size);
+ if (r) {
+ radeon_sa_bo_free(rdev, vb, NULL);
+ radeon_semaphore_free(rdev, sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, RADEON_RING_TYPE_GFX_INDEX)) {
+ radeon_semaphore_sync_rings(rdev, *sem, (*fence)->ring,
+ RADEON_RING_TYPE_GFX_INDEX);
+ radeon_fence_note_sync(*fence, RADEON_RING_TYPE_GFX_INDEX);
+ } else {
+ radeon_semaphore_free(rdev, sem, NULL);
+ }
+
+ rdev->r600_blit.primitives.set_default_state(rdev);
+ rdev->r600_blit.primitives.set_shaders(rdev);
+ return 0;
+}
+
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
+ struct radeon_sa_bo *vb, struct radeon_semaphore *sem)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ int r;
+
+ r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_sa_bo_free(rdev, &vb, *fence);
+ radeon_semaphore_free(rdev, &sem, *fence);
+}
+
+void r600_kms_blit_copy(struct radeon_device *rdev,
+ u64 src_gpu_addr, u64 dst_gpu_addr,
+ unsigned num_gpu_pages,
+ struct radeon_sa_bo *vb)
+{
+ u64 vb_gpu_addr;
+ u32 *vb_cpu_addr;
+
+ DRM_DEBUG("emitting copy %16llx %16llx %d\n",
+ src_gpu_addr, dst_gpu_addr, num_gpu_pages);
+ vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb);
+ vb_gpu_addr = radeon_sa_bo_gpu_addr(vb);
+
+ while (num_gpu_pages) {
+ int w, h;
+ unsigned size_in_bytes;
+ unsigned pages_per_loop =
+ r600_blit_create_rect(num_gpu_pages, &w, &h,
+ rdev->r600_blit.max_dim);
+
+ size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
+ DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
+
+ vb_cpu_addr[0] = 0;
+ vb_cpu_addr[1] = 0;
+ vb_cpu_addr[2] = 0;
+ vb_cpu_addr[3] = 0;
+
+ vb_cpu_addr[4] = 0;
+ vb_cpu_addr[5] = int2float(h);
+ vb_cpu_addr[6] = 0;
+ vb_cpu_addr[7] = int2float(h);
+
+ vb_cpu_addr[8] = int2float(w);
+ vb_cpu_addr[9] = int2float(h);
+ vb_cpu_addr[10] = int2float(w);
+ vb_cpu_addr[11] = int2float(h);
+
+ rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
+ w, h, w, src_gpu_addr, size_in_bytes);
+ rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
+ w, h, dst_gpu_addr);
+ rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
+ rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
+ rdev->r600_blit.primitives.draw_auto(rdev);
+ rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
+ PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
+ size_in_bytes, dst_gpu_addr);
+
+ vb_cpu_addr += 12;
+ vb_gpu_addr += 4*12;
+ src_gpu_addr += size_in_bytes;
+ dst_gpu_addr += size_in_bytes;
+ num_gpu_pages -= pages_per_loop;
+ }
+}
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
new file mode 100644
index 0000000..34c8b23
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -0,0 +1,719 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+/*
+ * R6xx+ cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup. Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables. The regsiter state and shaders
+ * were hand generated to support blitting functionality. See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 r6xx_default_state[] =
+{
+ 0xc0002400, /* START_3D_CMDBUF */
+ 0x00000000,
+
+ 0xc0012800, /* CONTEXT_CONTROL */
+ 0x80000000,
+ 0x80000000,
+
+ 0xc0016800,
+ 0x00000010,
+ 0x00008000, /* WAIT_UNTIL */
+
+ 0xc0016800,
+ 0x00000542,
+ 0x07000003, /* TA_CNTL_AUX */
+
+ 0xc0016800,
+ 0x000005c5,
+ 0x00000000, /* VC_ENHANCE */
+
+ 0xc0016800,
+ 0x00000363,
+ 0x00000000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */
+
+ 0xc0016800,
+ 0x0000060c,
+ 0x82000000, /* DB_DEBUG */
+
+ 0xc0016800,
+ 0x0000060e,
+ 0x01020204, /* DB_WATERMARKS */
+
+ 0xc0026f00,
+ 0x00000000,
+ 0x00000000, /* SQ_VTX_BASE_VTX_LOC */
+ 0x00000000, /* SQ_VTX_START_INST_LOC */
+
+ 0xc0096900,
+ 0x0000022a,
+ 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0016900,
+ 0x00000004,
+ 0x00000000, /* DB_DEPTH_INFO */
+
+ 0xc0026900,
+ 0x0000000a,
+ 0x00000000, /* DB_STENCIL_CLEAR */
+ 0x00000000, /* DB_DEPTH_CLEAR */
+
+ 0xc0016900,
+ 0x00000200,
+ 0x00000000, /* DB_DEPTH_CONTROL */
+
+ 0xc0026900,
+ 0x00000343,
+ 0x00000060, /* DB_RENDER_CONTROL */
+ 0x00000040, /* DB_RENDER_OVERRIDE */
+
+ 0xc0016900,
+ 0x00000351,
+ 0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+ 0xc00f6900,
+ 0x00000100,
+ 0x00000800, /* VGT_MAX_VTX_INDX */
+ 0x00000000, /* VGT_MIN_VTX_INDX */
+ 0x00000000, /* VGT_INDX_OFFSET */
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+ 0x00000000, /* SX_ALPHA_TEST_CONTROL */
+ 0x00000000, /* CB_BLEND_RED */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* CB_FOG_RED */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* DB_STENCILREFMASK */
+ 0x00000000, /* DB_STENCILREFMASK_BF */
+ 0x00000000, /* SX_ALPHA_REF */
+
+ 0xc0046900,
+ 0x0000030c,
+ 0x01000000, /* CB_CLRCMP_CNTL */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0046900,
+ 0x00000048,
+ 0x3f800000, /* CB_CLEAR_RED */
+ 0x00000000,
+ 0x3f800000,
+ 0x3f800000,
+
+ 0xc0016900,
+ 0x00000080,
+ 0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+ 0xc00a6900,
+ 0x00000083,
+ 0x0000ffff, /* PA_SC_CLIP_RECT_RULE */
+ 0x00000000, /* PA_SC_CLIPRECT_0_TL */
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0x00000000, /* PA_SC_EDGERULE */
+
+ 0xc0406900,
+ 0x00000094,
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+
+ 0xc0026900,
+ 0x00000292,
+ 0x00000000, /* PA_SC_MPASS_PS_CNTL */
+ 0x00004010, /* PA_SC_MODE_CNTL */
+
+ 0xc0096900,
+ 0x00000300,
+ 0x00000000, /* PA_SC_LINE_CNTL */
+ 0x00000000, /* PA_SC_AA_CONFIG */
+ 0x0000002d, /* PA_SU_VTX_CNTL */
+ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */
+ 0x00000000,
+
+ 0xc0016900,
+ 0x00000312,
+ 0xffffffff, /* PA_SC_AA_MASK */
+
+ 0xc0066900,
+ 0x0000037e,
+ 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+ 0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */
+ 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */
+ 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */
+ 0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */
+ 0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */
+
+ 0xc0046900,
+ 0x000001b6,
+ 0x00000000, /* SPI_INPUT_Z */
+ 0x00000000, /* SPI_FOG_CNTL */
+ 0x00000000, /* SPI_FOG_FUNC_SCALE */
+ 0x00000000, /* SPI_FOG_FUNC_BIAS */
+
+ 0xc0016900,
+ 0x00000225,
+ 0x00000000, /* SQ_PGM_START_FS */
+
+ 0xc0016900,
+ 0x00000229,
+ 0x00000000, /* SQ_PGM_RESOURCES_FS */
+
+ 0xc0016900,
+ 0x00000237,
+ 0x00000000, /* SQ_PGM_CF_OFFSET_FS */
+
+ 0xc0026900,
+ 0x000002a8,
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */
+
+ 0xc0116900,
+ 0x00000280,
+ 0x00000000, /* PA_SU_POINT_SIZE */
+ 0x00000000, /* PA_SU_POINT_MINMAX */
+ 0x00000008, /* PA_SU_LINE_CNTL */
+ 0x00000000, /* PA_SC_LINE_STIPPLE */
+ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+ 0x00000000, /* VGT_HOS_CNTL */
+ 0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */
+ 0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */
+ 0x00000000, /* VGT_HOS_REUSE_DEPTH */
+ 0x00000000, /* VGT_GROUP_PRIM_TYPE */
+ 0x00000000, /* VGT_GROUP_FIRST_DECR */
+ 0x00000000, /* VGT_GROUP_DECR */
+ 0x00000000, /* VGT_GROUP_VECT_0_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_1_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */
+ 0x00000000, /* VGT_GS_MODE */
+
+ 0xc0016900,
+ 0x000002a1,
+ 0x00000000, /* VGT_PRIMITIVEID_EN */
+
+ 0xc0016900,
+ 0x000002a5,
+ 0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */
+
+ 0xc0036900,
+ 0x000002ac,
+ 0x00000000, /* VGT_STRMOUT_EN */
+ 0x00000000, /* VGT_REUSE_OFF */
+ 0x00000000, /* VGT_VTX_CNT_EN */
+
+ 0xc0016900,
+ 0x000000d4,
+ 0x00000000, /* SX_MISC */
+
+ 0xc0016900,
+ 0x000002c8,
+ 0x00000000, /* VGT_STRMOUT_BUFFER_EN */
+
+ 0xc0076900,
+ 0x00000202,
+ 0x00cc0000, /* CB_COLOR_CONTROL */
+ 0x00000210, /* DB_SHADER_CNTL */
+ 0x00010000, /* PA_CL_CLIP_CNTL */
+ 0x00000244, /* PA_SU_SC_MODE_CNTL */
+ 0x00000100, /* PA_CL_VTE_CNTL */
+ 0x00000000, /* PA_CL_VS_OUT_CNTL */
+ 0x00000000, /* PA_CL_NANINF_CNTL */
+
+ 0xc0026900,
+ 0x0000008e,
+ 0x0000000f, /* CB_TARGET_MASK */
+ 0x0000000f, /* CB_SHADER_MASK */
+
+ 0xc0016900,
+ 0x000001e8,
+ 0x00000001, /* CB_SHADER_CONTROL */
+
+ 0xc0016900,
+ 0x00000185,
+ 0x00000000, /* SPI_VS_OUT_ID_0 */
+
+ 0xc0016900,
+ 0x00000191,
+ 0x00000b00, /* SPI_PS_INPUT_CNTL_0 */
+
+ 0xc0056900,
+ 0x000001b1,
+ 0x00000000, /* SPI_VS_OUT_CONFIG */
+ 0x00000000, /* SPI_THREAD_GROUPING */
+ 0x00000001, /* SPI_PS_IN_CONTROL_0 */
+ 0x00000000, /* SPI_PS_IN_CONTROL_1 */
+ 0x00000000, /* SPI_INTERP_CONTROL_0 */
+
+ 0xc0036e00, /* SET_SAMPLER */
+ 0x00000000,
+ 0x00000012,
+ 0x00000000,
+ 0x00000000,
+};
+
+const u32 r7xx_default_state[] =
+{
+ 0xc0012800, /* CONTEXT_CONTROL */
+ 0x80000000,
+ 0x80000000,
+
+ 0xc0016800,
+ 0x00000010,
+ 0x00008000, /* WAIT_UNTIL */
+
+ 0xc0016800,
+ 0x00000542,
+ 0x07000002, /* TA_CNTL_AUX */
+
+ 0xc0016800,
+ 0x000005c5,
+ 0x00000000, /* VC_ENHANCE */
+
+ 0xc0016800,
+ 0x00000363,
+ 0x00004000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */
+
+ 0xc0016800,
+ 0x0000060c,
+ 0x00000000, /* DB_DEBUG */
+
+ 0xc0016800,
+ 0x0000060e,
+ 0x00420204, /* DB_WATERMARKS */
+
+ 0xc0026f00,
+ 0x00000000,
+ 0x00000000, /* SQ_VTX_BASE_VTX_LOC */
+ 0x00000000, /* SQ_VTX_START_INST_LOC */
+
+ 0xc0096900,
+ 0x0000022a,
+ 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0016900,
+ 0x00000004,
+ 0x00000000, /* DB_DEPTH_INFO */
+
+ 0xc0026900,
+ 0x0000000a,
+ 0x00000000, /* DB_STENCIL_CLEAR */
+ 0x00000000, /* DB_DEPTH_CLEAR */
+
+ 0xc0016900,
+ 0x00000200,
+ 0x00000000, /* DB_DEPTH_CONTROL */
+
+ 0xc0026900,
+ 0x00000343,
+ 0x00000060, /* DB_RENDER_CONTROL */
+ 0x00000000, /* DB_RENDER_OVERRIDE */
+
+ 0xc0016900,
+ 0x00000351,
+ 0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+ 0xc0096900,
+ 0x00000100,
+ 0x00000800, /* VGT_MAX_VTX_INDX */
+ 0x00000000, /* VGT_MIN_VTX_INDX */
+ 0x00000000, /* VGT_INDX_OFFSET */
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+ 0x00000000, /* SX_ALPHA_TEST_CONTROL */
+ 0x00000000, /* CB_BLEND_RED */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0036900,
+ 0x0000010c,
+ 0x00000000, /* DB_STENCILREFMASK */
+ 0x00000000, /* DB_STENCILREFMASK_BF */
+ 0x00000000, /* SX_ALPHA_REF */
+
+ 0xc0046900,
+ 0x0000030c, /* CB_CLRCMP_CNTL */
+ 0x01000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0016900,
+ 0x00000080,
+ 0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+ 0xc00a6900,
+ 0x00000083,
+ 0x0000ffff, /* PA_SC_CLIP_RECT_RULE */
+ 0x00000000, /* PA_SC_CLIPRECT_0_TL */
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0xaaaaaaaa, /* PA_SC_EDGERULE */
+
+ 0xc0406900,
+ 0x00000094,
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+ 0x00000000,
+ 0x3f800000,
+
+ 0xc0026900,
+ 0x00000292,
+ 0x00000000, /* PA_SC_MPASS_PS_CNTL */
+ 0x00514000, /* PA_SC_MODE_CNTL */
+
+ 0xc0096900,
+ 0x00000300,
+ 0x00000000, /* PA_SC_LINE_CNTL */
+ 0x00000000, /* PA_SC_AA_CONFIG */
+ 0x0000002d, /* PA_SU_VTX_CNTL */
+ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+ 0x3f800000,
+ 0x3f800000,
+ 0x3f800000,
+ 0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */
+ 0x00000000,
+
+ 0xc0016900,
+ 0x00000312,
+ 0xffffffff, /* PA_SC_AA_MASK */
+
+ 0xc0066900,
+ 0x0000037e,
+ 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+ 0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */
+ 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */
+ 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */
+ 0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */
+ 0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */
+
+ 0xc0046900,
+ 0x000001b6,
+ 0x00000000, /* SPI_INPUT_Z */
+ 0x00000000, /* SPI_FOG_CNTL */
+ 0x00000000, /* SPI_FOG_FUNC_SCALE */
+ 0x00000000, /* SPI_FOG_FUNC_BIAS */
+
+ 0xc0016900,
+ 0x00000225,
+ 0x00000000, /* SQ_PGM_START_FS */
+
+ 0xc0016900,
+ 0x00000229,
+ 0x00000000, /* SQ_PGM_RESOURCES_FS */
+
+ 0xc0016900,
+ 0x00000237,
+ 0x00000000, /* SQ_PGM_CF_OFFSET_FS */
+
+ 0xc0026900,
+ 0x000002a8,
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */
+
+ 0xc0116900,
+ 0x00000280,
+ 0x00000000, /* PA_SU_POINT_SIZE */
+ 0x00000000, /* PA_SU_POINT_MINMAX */
+ 0x00000008, /* PA_SU_LINE_CNTL */
+ 0x00000000, /* PA_SC_LINE_STIPPLE */
+ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+ 0x00000000, /* VGT_HOS_CNTL */
+ 0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */
+ 0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */
+ 0x00000000, /* VGT_HOS_REUSE_DEPTH */
+ 0x00000000, /* VGT_GROUP_PRIM_TYPE */
+ 0x00000000, /* VGT_GROUP_FIRST_DECR */
+ 0x00000000, /* VGT_GROUP_DECR */
+ 0x00000000, /* VGT_GROUP_VECT_0_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_1_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */
+ 0x00000000, /* VGT_GS_MODE */
+
+ 0xc0016900,
+ 0x000002a1,
+ 0x00000000, /* VGT_PRIMITIVEID_EN */
+
+ 0xc0016900,
+ 0x000002a5,
+ 0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */
+
+ 0xc0036900,
+ 0x000002ac,
+ 0x00000000, /* VGT_STRMOUT_EN */
+ 0x00000000, /* VGT_REUSE_OFF */
+ 0x00000000, /* VGT_VTX_CNT_EN */
+
+ 0xc0016900,
+ 0x000000d4,
+ 0x00000000, /* SX_MISC */
+
+ 0xc0016900,
+ 0x000002c8,
+ 0x00000000, /* VGT_STRMOUT_BUFFER_EN */
+
+ 0xc0076900,
+ 0x00000202,
+ 0x00cc0000, /* CB_COLOR_CONTROL */
+ 0x00000210, /* DB_SHADER_CNTL */
+ 0x00010000, /* PA_CL_CLIP_CNTL */
+ 0x00000244, /* PA_SU_SC_MODE_CNTL */
+ 0x00000100, /* PA_CL_VTE_CNTL */
+ 0x00000000, /* PA_CL_VS_OUT_CNTL */
+ 0x00000000, /* PA_CL_NANINF_CNTL */
+
+ 0xc0026900,
+ 0x0000008e,
+ 0x0000000f, /* CB_TARGET_MASK */
+ 0x0000000f, /* CB_SHADER_MASK */
+
+ 0xc0016900,
+ 0x000001e8,
+ 0x00000001, /* CB_SHADER_CONTROL */
+
+ 0xc0016900,
+ 0x00000185,
+ 0x00000000, /* SPI_VS_OUT_ID_0 */
+
+ 0xc0016900,
+ 0x00000191,
+ 0x00000b00, /* SPI_PS_INPUT_CNTL_0 */
+
+ 0xc0056900,
+ 0x000001b1,
+ 0x00000000, /* SPI_VS_OUT_CONFIG */
+ 0x00000001, /* SPI_THREAD_GROUPING */
+ 0x00000001, /* SPI_PS_IN_CONTROL_0 */
+ 0x00000000, /* SPI_PS_IN_CONTROL_1 */
+ 0x00000000, /* SPI_INTERP_CONTROL_0 */
+
+ 0xc0036e00, /* SET_SAMPLER */
+ 0x00000000,
+ 0x00000012,
+ 0x00000000,
+ 0x00000000,
+};
+
+/* same for r6xx/r7xx */
+const u32 r6xx_vs[] =
+{
+ 0x00000004,
+ 0x81000000,
+ 0x0000203c,
+ 0x94000b08,
+ 0x00004000,
+ 0x14200b1a,
+ 0x00000000,
+ 0x00000000,
+ 0x3c000000,
+ 0x68cd1000,
+#ifdef __BIG_ENDIAN
+ 0x000a0000,
+#else
+ 0x00080000,
+#endif
+ 0x00000000,
+};
+
+const u32 r6xx_ps[] =
+{
+ 0x00000002,
+ 0x80800000,
+ 0x00000000,
+ 0x94200688,
+ 0x00000010,
+ 0x000d1000,
+ 0xb0800000,
+ 0x00000000,
+};
+
+const u32 r6xx_ps_size = ARRAY_SIZE(r6xx_ps);
+const u32 r6xx_vs_size = ARRAY_SIZE(r6xx_vs);
+const u32 r6xx_default_size = ARRAY_SIZE(r6xx_default_state);
+const u32 r7xx_default_size = ARRAY_SIZE(r7xx_default_state);
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
new file mode 100644
index 0000000..2f3ce7a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef R600_BLIT_SHADERS_H
+#define R600_BLIT_SHADERS_H
+
+extern const u32 r6xx_ps[];
+extern const u32 r6xx_vs[];
+extern const u32 r7xx_default_state[];
+extern const u32 r6xx_default_state[];
+
+
+extern const u32 r6xx_ps_size, r6xx_vs_size;
+extern const u32 r6xx_default_size, r7xx_default_size;
+
+__pure uint32_t int2float(uint32_t x);
+#endif
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
new file mode 100644
index 0000000..1c51c08
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -0,0 +1,2660 @@
+/*
+ * Copyright 2008-2009 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Alex Deucher <alexander.deucher@amd.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
+ */
+
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+
+#define PFP_UCODE_SIZE 576
+#define PM4_UCODE_SIZE 1792
+#define R700_PFP_UCODE_SIZE 848
+#define R700_PM4_UCODE_SIZE 1360
+
+/* Firmware Names */
+MODULE_FIRMWARE("radeon/R600_pfp.bin");
+MODULE_FIRMWARE("radeon/R600_me.bin");
+MODULE_FIRMWARE("radeon/RV610_pfp.bin");
+MODULE_FIRMWARE("radeon/RV610_me.bin");
+MODULE_FIRMWARE("radeon/RV630_pfp.bin");
+MODULE_FIRMWARE("radeon/RV630_me.bin");
+MODULE_FIRMWARE("radeon/RV620_pfp.bin");
+MODULE_FIRMWARE("radeon/RV620_me.bin");
+MODULE_FIRMWARE("radeon/RV635_pfp.bin");
+MODULE_FIRMWARE("radeon/RV635_me.bin");
+MODULE_FIRMWARE("radeon/RV670_pfp.bin");
+MODULE_FIRMWARE("radeon/RV670_me.bin");
+MODULE_FIRMWARE("radeon/RS780_pfp.bin");
+MODULE_FIRMWARE("radeon/RS780_me.bin");
+MODULE_FIRMWARE("radeon/RV770_pfp.bin");
+MODULE_FIRMWARE("radeon/RV770_me.bin");
+MODULE_FIRMWARE("radeon/RV730_pfp.bin");
+MODULE_FIRMWARE("radeon/RV730_me.bin");
+MODULE_FIRMWARE("radeon/RV710_pfp.bin");
+MODULE_FIRMWARE("radeon/RV710_me.bin");
+
+
+int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
+ unsigned family, u32 *ib, int *l);
+void r600_cs_legacy_init(void);
+
+
+# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
+# define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1))
+
+#define R600_PTE_VALID (1 << 0)
+#define R600_PTE_SYSTEM (1 << 1)
+#define R600_PTE_SNOOPED (1 << 2)
+#define R600_PTE_READABLE (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
+/* MAX values used for gfx init */
+#define R6XX_MAX_SH_GPRS 256
+#define R6XX_MAX_TEMP_GPRS 16
+#define R6XX_MAX_SH_THREADS 256
+#define R6XX_MAX_SH_STACK_ENTRIES 4096
+#define R6XX_MAX_BACKENDS 8
+#define R6XX_MAX_BACKENDS_MASK 0xff
+#define R6XX_MAX_SIMDS 8
+#define R6XX_MAX_SIMDS_MASK 0xff
+#define R6XX_MAX_PIPES 8
+#define R6XX_MAX_PIPES_MASK 0xff
+
+#define R7XX_MAX_SH_GPRS 256
+#define R7XX_MAX_TEMP_GPRS 16
+#define R7XX_MAX_SH_THREADS 256
+#define R7XX_MAX_SH_STACK_ENTRIES 4096
+#define R7XX_MAX_BACKENDS 8
+#define R7XX_MAX_BACKENDS_MASK 0xff
+#define R7XX_MAX_SIMDS 16
+#define R7XX_MAX_SIMDS_MASK 0xffff
+#define R7XX_MAX_PIPES 8
+#define R7XX_MAX_PIPES_MASK 0xff
+
+static int r600_do_wait_for_fifo(drm_radeon_private_t *dev_priv, int entries)
+{
+ int i;
+
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+ for (i = 0; i < dev_priv->usec_timeout; i++) {
+ int slots;
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
+ slots = (RADEON_READ(R600_GRBM_STATUS)
+ & R700_CMDFIFO_AVAIL_MASK);
+ else
+ slots = (RADEON_READ(R600_GRBM_STATUS)
+ & R600_CMDFIFO_AVAIL_MASK);
+ if (slots >= entries)
+ return 0;
+ DRM_UDELAY(1);
+ }
+ DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n",
+ RADEON_READ(R600_GRBM_STATUS),
+ RADEON_READ(R600_GRBM_STATUS2));
+
+ return -EBUSY;
+}
+
+static int r600_do_wait_for_idle(drm_radeon_private_t *dev_priv)
+{
+ int i, ret;
+
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
+ ret = r600_do_wait_for_fifo(dev_priv, 8);
+ else
+ ret = r600_do_wait_for_fifo(dev_priv, 16);
+ if (ret)
+ return ret;
+ for (i = 0; i < dev_priv->usec_timeout; i++) {
+ if (!(RADEON_READ(R600_GRBM_STATUS) & R600_GUI_ACTIVE))
+ return 0;
+ DRM_UDELAY(1);
+ }
+ DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n",
+ RADEON_READ(R600_GRBM_STATUS),
+ RADEON_READ(R600_GRBM_STATUS2));
+
+ return -EBUSY;
+}
+
+void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+ struct drm_sg_mem *entry = dev->sg;
+ int max_pages;
+ int pages;
+ int i;
+
+ if (!entry)
+ return;
+
+ if (gart_info->bus_addr) {
+ max_pages = (gart_info->table_size / sizeof(u64));
+ pages = (entry->pages <= max_pages)
+ ? entry->pages : max_pages;
+
+ for (i = 0; i < pages; i++) {
+ if (!entry->busaddr[i])
+ break;
+ pci_unmap_page(dev->pdev, entry->busaddr[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ }
+ if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
+ gart_info->bus_addr = 0;
+ }
+}
+
+/* R600 has page table setup */
+int r600_page_table_init(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_ati_pcigart_info *gart_info = &dev_priv->gart_info;
+ struct drm_local_map *map = &gart_info->mapping;
+ struct drm_sg_mem *entry = dev->sg;
+ int ret = 0;
+ int i, j;
+ int pages;
+ u64 page_base;
+ dma_addr_t entry_addr;
+ int max_ati_pages, max_real_pages, gart_idx;
+
+ /* okay page table is available - lets rock */
+ max_ati_pages = (gart_info->table_size / sizeof(u64));
+ max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
+
+ pages = (entry->pages <= max_real_pages) ?
+ entry->pages : max_real_pages;
+
+ memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u64));
+
+ gart_idx = 0;
+ for (i = 0; i < pages; i++) {
+ entry->busaddr[i] = pci_map_page(dev->pdev,
+ entry->pagelist[i], 0,
+ PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
+ DRM_ERROR("unable to map PCIGART pages!\n");
+ r600_page_table_cleanup(dev, gart_info);
+ goto done;
+ }
+ entry_addr = entry->busaddr[i];
+ for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
+ page_base = (u64) entry_addr & ATI_PCIGART_PAGE_MASK;
+ page_base |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+ page_base |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
+
+ DRM_WRITE64(map, gart_idx * sizeof(u64), page_base);
+
+ gart_idx++;
+
+ if ((i % 128) == 0)
+ DRM_DEBUG("page entry %d: 0x%016llx\n",
+ i, (unsigned long long)page_base);
+ entry_addr += ATI_PCIGART_PAGE_SIZE;
+ }
+ }
+ ret = 1;
+done:
+ return ret;
+}
+
+static void r600_vm_flush_gart_range(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ u32 resp, countdown = 1000;
+ RADEON_WRITE(R600_VM_CONTEXT0_INVALIDATION_LOW_ADDR, dev_priv->gart_vm_start >> 12);
+ RADEON_WRITE(R600_VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
+ RADEON_WRITE(R600_VM_CONTEXT0_REQUEST_RESPONSE, 2);
+
+ do {
+ resp = RADEON_READ(R600_VM_CONTEXT0_REQUEST_RESPONSE);
+ countdown--;
+ DRM_UDELAY(1);
+ } while (((resp & 0xf0) == 0) && countdown);
+}
+
+static void r600_vm_init(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ /* initialise the VM to use the page table we constructed up there */
+ u32 vm_c0, i;
+ u32 mc_rd_a;
+ u32 vm_l2_cntl, vm_l2_cntl3;
+ /* okay set up the PCIE aperture type thingo */
+ RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR, dev_priv->gart_vm_start >> 12);
+ RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
+ RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+
+ /* setup MC RD a */
+ mc_rd_a = R600_MCD_L1_TLB | R600_MCD_L1_FRAG_PROC | R600_MCD_SYSTEM_ACCESS_MODE_IN_SYS |
+ R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | R600_MCD_EFFECTIVE_L1_TLB_SIZE(5) |
+ R600_MCD_EFFECTIVE_L1_QUEUE_SIZE(5) | R600_MCD_WAIT_L2_QUERY;
+
+ RADEON_WRITE(R600_MCD_RD_A_CNTL, mc_rd_a);
+ RADEON_WRITE(R600_MCD_RD_B_CNTL, mc_rd_a);
+
+ RADEON_WRITE(R600_MCD_WR_A_CNTL, mc_rd_a);
+ RADEON_WRITE(R600_MCD_WR_B_CNTL, mc_rd_a);
+
+ RADEON_WRITE(R600_MCD_RD_GFX_CNTL, mc_rd_a);
+ RADEON_WRITE(R600_MCD_WR_GFX_CNTL, mc_rd_a);
+
+ RADEON_WRITE(R600_MCD_RD_SYS_CNTL, mc_rd_a);
+ RADEON_WRITE(R600_MCD_WR_SYS_CNTL, mc_rd_a);
+
+ RADEON_WRITE(R600_MCD_RD_HDP_CNTL, mc_rd_a | R600_MCD_L1_STRICT_ORDERING);
+ RADEON_WRITE(R600_MCD_WR_HDP_CNTL, mc_rd_a /*| R600_MCD_L1_STRICT_ORDERING*/);
+
+ RADEON_WRITE(R600_MCD_RD_PDMA_CNTL, mc_rd_a);
+ RADEON_WRITE(R600_MCD_WR_PDMA_CNTL, mc_rd_a);
+
+ RADEON_WRITE(R600_MCD_RD_SEM_CNTL, mc_rd_a | R600_MCD_SEMAPHORE_MODE);
+ RADEON_WRITE(R600_MCD_WR_SEM_CNTL, mc_rd_a);
+
+ vm_l2_cntl = R600_VM_L2_CACHE_EN | R600_VM_L2_FRAG_PROC | R600_VM_ENABLE_PTE_CACHE_LRU_W;
+ vm_l2_cntl |= R600_VM_L2_CNTL_QUEUE_SIZE(7);
+ RADEON_WRITE(R600_VM_L2_CNTL, vm_l2_cntl);
+
+ RADEON_WRITE(R600_VM_L2_CNTL2, 0);
+ vm_l2_cntl3 = (R600_VM_L2_CNTL3_BANK_SELECT_0(0) |
+ R600_VM_L2_CNTL3_BANK_SELECT_1(1) |
+ R600_VM_L2_CNTL3_CACHE_UPDATE_MODE(2));
+ RADEON_WRITE(R600_VM_L2_CNTL3, vm_l2_cntl3);
+
+ vm_c0 = R600_VM_ENABLE_CONTEXT | R600_VM_PAGE_TABLE_DEPTH_FLAT;
+
+ RADEON_WRITE(R600_VM_CONTEXT0_CNTL, vm_c0);
+
+ vm_c0 &= ~R600_VM_ENABLE_CONTEXT;
+
+ /* disable all other contexts */
+ for (i = 1; i < 8; i++)
+ RADEON_WRITE(R600_VM_CONTEXT0_CNTL + (i * 4), vm_c0);
+
+ RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, dev_priv->gart_info.bus_addr >> 12);
+ RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_START_ADDR, dev_priv->gart_vm_start >> 12);
+ RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_END_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
+
+ r600_vm_flush_gart_range(dev);
+}
+
+static int r600_cp_init_microcode(drm_radeon_private_t *dev_priv)
+{
+ struct platform_device *pdev;
+ const char *chip_name;
+ size_t pfp_req_size, me_req_size;
+ char fw_name[30];
+ int err;
+
+ pdev = platform_device_register_simple("r600_cp", 0, NULL, 0);
+ err = IS_ERR(pdev);
+ if (err) {
+ printk(KERN_ERR "r600_cp: Failed to register firmware\n");
+ return -EINVAL;
+ }
+
+ switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+ case CHIP_R600: chip_name = "R600"; break;
+ case CHIP_RV610: chip_name = "RV610"; break;
+ case CHIP_RV630: chip_name = "RV630"; break;
+ case CHIP_RV620: chip_name = "RV620"; break;
+ case CHIP_RV635: chip_name = "RV635"; break;
+ case CHIP_RV670: chip_name = "RV670"; break;
+ case CHIP_RS780:
+ case CHIP_RS880: chip_name = "RS780"; break;
+ case CHIP_RV770: chip_name = "RV770"; break;
+ case CHIP_RV730:
+ case CHIP_RV740: chip_name = "RV730"; break;
+ case CHIP_RV710: chip_name = "RV710"; break;
+ default: BUG();
+ }
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
+ pfp_req_size = R700_PFP_UCODE_SIZE * 4;
+ me_req_size = R700_PM4_UCODE_SIZE * 4;
+ } else {
+ pfp_req_size = PFP_UCODE_SIZE * 4;
+ me_req_size = PM4_UCODE_SIZE * 12;
+ }
+
+ DRM_INFO("Loading %s CP Microcode\n", chip_name);
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&dev_priv->pfp_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (dev_priv->pfp_fw->size != pfp_req_size) {
+ printk(KERN_ERR
+ "r600_cp: Bogus length %zu in firmware \"%s\"\n",
+ dev_priv->pfp_fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&dev_priv->me_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (dev_priv->me_fw->size != me_req_size) {
+ printk(KERN_ERR
+ "r600_cp: Bogus length %zu in firmware \"%s\"\n",
+ dev_priv->me_fw->size, fw_name);
+ err = -EINVAL;
+ }
+out:
+ platform_device_unregister(pdev);
+
+ if (err) {
+ if (err != -EINVAL)
+ printk(KERN_ERR
+ "r600_cp: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(dev_priv->pfp_fw);
+ dev_priv->pfp_fw = NULL;
+ release_firmware(dev_priv->me_fw);
+ dev_priv->me_fw = NULL;
+ }
+ return err;
+}
+
+static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
+{
+ const __be32 *fw_data;
+ int i;
+
+ if (!dev_priv->me_fw || !dev_priv->pfp_fw)
+ return;
+
+ r600_do_cp_stop(dev_priv);
+
+ RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+ R600_BUF_SWAP_32BIT |
+#endif
+ R600_RB_NO_UPDATE |
+ R600_RB_BLKSZ(15) |
+ R600_RB_BUFSZ(3));
+
+ RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
+ RADEON_READ(R600_GRBM_SOFT_RESET);
+ mdelay(15);
+ RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
+
+ fw_data = (const __be32 *)dev_priv->me_fw->data;
+ RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
+ RADEON_WRITE(R600_CP_ME_RAM_DATA,
+ be32_to_cpup(fw_data++));
+
+ fw_data = (const __be32 *)dev_priv->pfp_fw->data;
+ RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < PFP_UCODE_SIZE; i++)
+ RADEON_WRITE(R600_CP_PFP_UCODE_DATA,
+ be32_to_cpup(fw_data++));
+
+ RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
+ RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
+ RADEON_WRITE(R600_CP_ME_RAM_RADDR, 0);
+
+}
+
+static void r700_vm_init(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ /* initialise the VM to use the page table we constructed up there */
+ u32 vm_c0, i;
+ u32 mc_vm_md_l1;
+ u32 vm_l2_cntl, vm_l2_cntl3;
+ /* okay set up the PCIE aperture type thingo */
+ RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR, dev_priv->gart_vm_start >> 12);
+ RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
+ RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+
+ mc_vm_md_l1 = R700_ENABLE_L1_TLB |
+ R700_ENABLE_L1_FRAGMENT_PROCESSING |
+ R700_SYSTEM_ACCESS_MODE_IN_SYS |
+ R700_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+ R700_EFFECTIVE_L1_TLB_SIZE(5) |
+ R700_EFFECTIVE_L1_QUEUE_SIZE(5);
+
+ RADEON_WRITE(R700_MC_VM_MD_L1_TLB0_CNTL, mc_vm_md_l1);
+ RADEON_WRITE(R700_MC_VM_MD_L1_TLB1_CNTL, mc_vm_md_l1);
+ RADEON_WRITE(R700_MC_VM_MD_L1_TLB2_CNTL, mc_vm_md_l1);
+ RADEON_WRITE(R700_MC_VM_MB_L1_TLB0_CNTL, mc_vm_md_l1);
+ RADEON_WRITE(R700_MC_VM_MB_L1_TLB1_CNTL, mc_vm_md_l1);
+ RADEON_WRITE(R700_MC_VM_MB_L1_TLB2_CNTL, mc_vm_md_l1);
+ RADEON_WRITE(R700_MC_VM_MB_L1_TLB3_CNTL, mc_vm_md_l1);
+
+ vm_l2_cntl = R600_VM_L2_CACHE_EN | R600_VM_L2_FRAG_PROC | R600_VM_ENABLE_PTE_CACHE_LRU_W;
+ vm_l2_cntl |= R700_VM_L2_CNTL_QUEUE_SIZE(7);
+ RADEON_WRITE(R600_VM_L2_CNTL, vm_l2_cntl);
+
+ RADEON_WRITE(R600_VM_L2_CNTL2, 0);
+ vm_l2_cntl3 = R700_VM_L2_CNTL3_BANK_SELECT(0) | R700_VM_L2_CNTL3_CACHE_UPDATE_MODE(2);
+ RADEON_WRITE(R600_VM_L2_CNTL3, vm_l2_cntl3);
+
+ vm_c0 = R600_VM_ENABLE_CONTEXT | R600_VM_PAGE_TABLE_DEPTH_FLAT;
+
+ RADEON_WRITE(R600_VM_CONTEXT0_CNTL, vm_c0);
+
+ vm_c0 &= ~R600_VM_ENABLE_CONTEXT;
+
+ /* disable all other contexts */
+ for (i = 1; i < 8; i++)
+ RADEON_WRITE(R600_VM_CONTEXT0_CNTL + (i * 4), vm_c0);
+
+ RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, dev_priv->gart_info.bus_addr >> 12);
+ RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_START_ADDR, dev_priv->gart_vm_start >> 12);
+ RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_END_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
+
+ r600_vm_flush_gart_range(dev);
+}
+
+static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
+{
+ const __be32 *fw_data;
+ int i;
+
+ if (!dev_priv->me_fw || !dev_priv->pfp_fw)
+ return;
+
+ r600_do_cp_stop(dev_priv);
+
+ RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+ R600_BUF_SWAP_32BIT |
+#endif
+ R600_RB_NO_UPDATE |
+ R600_RB_BLKSZ(15) |
+ R600_RB_BUFSZ(3));
+
+ RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
+ RADEON_READ(R600_GRBM_SOFT_RESET);
+ mdelay(15);
+ RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
+
+ fw_data = (const __be32 *)dev_priv->pfp_fw->data;
+ RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
+ RADEON_WRITE(R600_CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
+
+ fw_data = (const __be32 *)dev_priv->me_fw->data;
+ RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
+ RADEON_WRITE(R600_CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+ RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
+
+ RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
+ RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
+ RADEON_WRITE(R600_CP_ME_RAM_RADDR, 0);
+
+}
+
+static void r600_test_writeback(drm_radeon_private_t *dev_priv)
+{
+ u32 tmp;
+
+ /* Start with assuming that writeback doesn't work */
+ dev_priv->writeback_works = 0;
+
+ /* Writeback doesn't seem to work everywhere, test it here and possibly
+ * enable it if it appears to work
+ */
+ radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(1), 0);
+
+ RADEON_WRITE(R600_SCRATCH_REG1, 0xdeadbeef);
+
+ for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
+ u32 val;
+
+ val = radeon_read_ring_rptr(dev_priv, R600_SCRATCHOFF(1));
+ if (val == 0xdeadbeef)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (tmp < dev_priv->usec_timeout) {
+ dev_priv->writeback_works = 1;
+ DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
+ } else {
+ dev_priv->writeback_works = 0;
+ DRM_INFO("writeback test failed\n");
+ }
+ if (radeon_no_wb == 1) {
+ dev_priv->writeback_works = 0;
+ DRM_INFO("writeback forced off\n");
+ }
+
+ if (!dev_priv->writeback_works) {
+ /* Disable writeback to avoid unnecessary bus master transfer */
+ RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+ R600_BUF_SWAP_32BIT |
+#endif
+ RADEON_READ(R600_CP_RB_CNTL) |
+ R600_RB_NO_UPDATE);
+ RADEON_WRITE(R600_SCRATCH_UMSK, 0);
+ }
+}
+
+int r600_do_engine_reset(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ u32 cp_ptr, cp_me_cntl, cp_rb_cntl;
+
+ DRM_INFO("Resetting GPU\n");
+
+ cp_ptr = RADEON_READ(R600_CP_RB_WPTR);
+ cp_me_cntl = RADEON_READ(R600_CP_ME_CNTL);
+ RADEON_WRITE(R600_CP_ME_CNTL, R600_CP_ME_HALT);
+
+ RADEON_WRITE(R600_GRBM_SOFT_RESET, 0x7fff);
+ RADEON_READ(R600_GRBM_SOFT_RESET);
+ DRM_UDELAY(50);
+ RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
+ RADEON_READ(R600_GRBM_SOFT_RESET);
+
+ RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
+ cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL);
+ RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+ R600_BUF_SWAP_32BIT |
+#endif
+ R600_RB_RPTR_WR_ENA);
+
+ RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr);
+ RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr);
+ RADEON_WRITE(R600_CP_RB_CNTL, cp_rb_cntl);
+ RADEON_WRITE(R600_CP_ME_CNTL, cp_me_cntl);
+
+ /* Reset the CP ring */
+ r600_do_cp_reset(dev_priv);
+
+ /* The CP is no longer running after an engine reset */
+ dev_priv->cp_running = 0;
+
+ /* Reset any pending vertex, indirect buffers */
+ radeon_freelist_reset(dev);
+
+ return 0;
+
+}
+
+static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
+ u32 num_backends,
+ u32 backend_disable_mask)
+{
+ u32 backend_map = 0;
+ u32 enabled_backends_mask;
+ u32 enabled_backends_count;
+ u32 cur_pipe;
+ u32 swizzle_pipe[R6XX_MAX_PIPES];
+ u32 cur_backend;
+ u32 i;
+
+ if (num_tile_pipes > R6XX_MAX_PIPES)
+ num_tile_pipes = R6XX_MAX_PIPES;
+ if (num_tile_pipes < 1)
+ num_tile_pipes = 1;
+ if (num_backends > R6XX_MAX_BACKENDS)
+ num_backends = R6XX_MAX_BACKENDS;
+ if (num_backends < 1)
+ num_backends = 1;
+
+ enabled_backends_mask = 0;
+ enabled_backends_count = 0;
+ for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
+ if (((backend_disable_mask >> i) & 1) == 0) {
+ enabled_backends_mask |= (1 << i);
+ ++enabled_backends_count;
+ }
+ if (enabled_backends_count == num_backends)
+ break;
+ }
+
+ if (enabled_backends_count == 0) {
+ enabled_backends_mask = 1;
+ enabled_backends_count = 1;
+ }
+
+ if (enabled_backends_count != num_backends)
+ num_backends = enabled_backends_count;
+
+ memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
+ switch (num_tile_pipes) {
+ case 1:
+ swizzle_pipe[0] = 0;
+ break;
+ case 2:
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ break;
+ case 3:
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ break;
+ case 4:
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ break;
+ case 5:
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ break;
+ case 6:
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 5;
+ swizzle_pipe[4] = 1;
+ swizzle_pipe[5] = 3;
+ break;
+ case 7:
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 1;
+ swizzle_pipe[5] = 3;
+ swizzle_pipe[6] = 5;
+ break;
+ case 8:
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 1;
+ swizzle_pipe[5] = 3;
+ swizzle_pipe[6] = 5;
+ swizzle_pipe[7] = 7;
+ break;
+ }
+
+ cur_backend = 0;
+ for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
+ while (((1 << cur_backend) & enabled_backends_mask) == 0)
+ cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
+
+ backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
+
+ cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
+ }
+
+ return backend_map;
+}
+
+static int r600_count_pipe_bits(uint32_t val)
+{
+ return hweight32(val);
+}
+
+static void r600_gfx_init(struct drm_device *dev,
+ drm_radeon_private_t *dev_priv)
+{
+ int i, j, num_qd_pipes;
+ u32 sx_debug_1;
+ u32 tc_cntl;
+ u32 arb_pop;
+ u32 num_gs_verts_per_thread;
+ u32 vgt_gs_per_es;
+ u32 gs_prim_buffer_depth = 0;
+ u32 sq_ms_fifo_sizes;
+ u32 sq_config;
+ u32 sq_gpr_resource_mgmt_1 = 0;
+ u32 sq_gpr_resource_mgmt_2 = 0;
+ u32 sq_thread_resource_mgmt = 0;
+ u32 sq_stack_resource_mgmt_1 = 0;
+ u32 sq_stack_resource_mgmt_2 = 0;
+ u32 hdp_host_path_cntl;
+ u32 backend_map;
+ u32 gb_tiling_config = 0;
+ u32 cc_rb_backend_disable;
+ u32 cc_gc_shader_pipe_config;
+ u32 ramcfg;
+
+ /* setup chip specs */
+ switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+ case CHIP_R600:
+ dev_priv->r600_max_pipes = 4;
+ dev_priv->r600_max_tile_pipes = 8;
+ dev_priv->r600_max_simds = 4;
+ dev_priv->r600_max_backends = 4;
+ dev_priv->r600_max_gprs = 256;
+ dev_priv->r600_max_threads = 192;
+ dev_priv->r600_max_stack_entries = 256;
+ dev_priv->r600_max_hw_contexts = 8;
+ dev_priv->r600_max_gs_threads = 16;
+ dev_priv->r600_sx_max_export_size = 128;
+ dev_priv->r600_sx_max_export_pos_size = 16;
+ dev_priv->r600_sx_max_export_smx_size = 128;
+ dev_priv->r600_sq_num_cf_insts = 2;
+ break;
+ case CHIP_RV630:
+ case CHIP_RV635:
+ dev_priv->r600_max_pipes = 2;
+ dev_priv->r600_max_tile_pipes = 2;
+ dev_priv->r600_max_simds = 3;
+ dev_priv->r600_max_backends = 1;
+ dev_priv->r600_max_gprs = 128;
+ dev_priv->r600_max_threads = 192;
+ dev_priv->r600_max_stack_entries = 128;
+ dev_priv->r600_max_hw_contexts = 8;
+ dev_priv->r600_max_gs_threads = 4;
+ dev_priv->r600_sx_max_export_size = 128;
+ dev_priv->r600_sx_max_export_pos_size = 16;
+ dev_priv->r600_sx_max_export_smx_size = 128;
+ dev_priv->r600_sq_num_cf_insts = 2;
+ break;
+ case CHIP_RV610:
+ case CHIP_RS780:
+ case CHIP_RS880:
+ case CHIP_RV620:
+ dev_priv->r600_max_pipes = 1;
+ dev_priv->r600_max_tile_pipes = 1;
+ dev_priv->r600_max_simds = 2;
+ dev_priv->r600_max_backends = 1;
+ dev_priv->r600_max_gprs = 128;
+ dev_priv->r600_max_threads = 192;
+ dev_priv->r600_max_stack_entries = 128;
+ dev_priv->r600_max_hw_contexts = 4;
+ dev_priv->r600_max_gs_threads = 4;
+ dev_priv->r600_sx_max_export_size = 128;
+ dev_priv->r600_sx_max_export_pos_size = 16;
+ dev_priv->r600_sx_max_export_smx_size = 128;
+ dev_priv->r600_sq_num_cf_insts = 1;
+ break;
+ case CHIP_RV670:
+ dev_priv->r600_max_pipes = 4;
+ dev_priv->r600_max_tile_pipes = 4;
+ dev_priv->r600_max_simds = 4;
+ dev_priv->r600_max_backends = 4;
+ dev_priv->r600_max_gprs = 192;
+ dev_priv->r600_max_threads = 192;
+ dev_priv->r600_max_stack_entries = 256;
+ dev_priv->r600_max_hw_contexts = 8;
+ dev_priv->r600_max_gs_threads = 16;
+ dev_priv->r600_sx_max_export_size = 128;
+ dev_priv->r600_sx_max_export_pos_size = 16;
+ dev_priv->r600_sx_max_export_smx_size = 128;
+ dev_priv->r600_sq_num_cf_insts = 2;
+ break;
+ default:
+ break;
+ }
+
+ /* Initialize HDP */
+ j = 0;
+ for (i = 0; i < 32; i++) {
+ RADEON_WRITE((0x2c14 + j), 0x00000000);
+ RADEON_WRITE((0x2c18 + j), 0x00000000);
+ RADEON_WRITE((0x2c1c + j), 0x00000000);
+ RADEON_WRITE((0x2c20 + j), 0x00000000);
+ RADEON_WRITE((0x2c24 + j), 0x00000000);
+ j += 0x18;
+ }
+
+ RADEON_WRITE(R600_GRBM_CNTL, R600_GRBM_READ_TIMEOUT(0xff));
+
+ /* setup tiling, simd, pipe config */
+ ramcfg = RADEON_READ(R600_RAMCFG);
+
+ switch (dev_priv->r600_max_tile_pipes) {
+ case 1:
+ gb_tiling_config |= R600_PIPE_TILING(0);
+ break;
+ case 2:
+ gb_tiling_config |= R600_PIPE_TILING(1);
+ break;
+ case 4:
+ gb_tiling_config |= R600_PIPE_TILING(2);
+ break;
+ case 8:
+ gb_tiling_config |= R600_PIPE_TILING(3);
+ break;
+ default:
+ break;
+ }
+
+ gb_tiling_config |= R600_BANK_TILING((ramcfg >> R600_NOOFBANK_SHIFT) & R600_NOOFBANK_MASK);
+
+ gb_tiling_config |= R600_GROUP_SIZE(0);
+
+ if (((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK) > 3) {
+ gb_tiling_config |= R600_ROW_TILING(3);
+ gb_tiling_config |= R600_SAMPLE_SPLIT(3);
+ } else {
+ gb_tiling_config |=
+ R600_ROW_TILING(((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK));
+ gb_tiling_config |=
+ R600_SAMPLE_SPLIT(((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK));
+ }
+
+ gb_tiling_config |= R600_BANK_SWAPS(1);
+
+ cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ cc_rb_backend_disable |=
+ R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK);
+
+ cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ cc_gc_shader_pipe_config |=
+ R600_INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R6XX_MAX_PIPES_MASK);
+ cc_gc_shader_pipe_config |=
+ R600_INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R6XX_MAX_SIMDS_MASK);
+
+ backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
+ (R6XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable &
+ R6XX_MAX_BACKENDS_MASK) >> 16)),
+ (cc_rb_backend_disable >> 16));
+ gb_tiling_config |= R600_BACKEND_MAP(backend_map);
+
+ RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
+ RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ if (gb_tiling_config & 0xc0) {
+ dev_priv->r600_group_size = 512;
+ } else {
+ dev_priv->r600_group_size = 256;
+ }
+ dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
+ if (gb_tiling_config & 0x30) {
+ dev_priv->r600_nbanks = 8;
+ } else {
+ dev_priv->r600_nbanks = 4;
+ }
+
+ RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+ RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+
+ num_qd_pipes =
+ R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
+ RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
+ RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
+
+ /* set HW defaults for 3D engine */
+ RADEON_WRITE(R600_CP_QUEUE_THRESHOLDS, (R600_ROQ_IB1_START(0x16) |
+ R600_ROQ_IB2_START(0x2b)));
+
+ RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, (R600_MEQ_END(0x40) |
+ R600_ROQ_END(0x40)));
+
+ RADEON_WRITE(R600_TA_CNTL_AUX, (R600_DISABLE_CUBE_ANISO |
+ R600_SYNC_GRADIENT |
+ R600_SYNC_WALKER |
+ R600_SYNC_ALIGNER));
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV670)
+ RADEON_WRITE(R600_ARB_GDEC_RD_CNTL, 0x00000021);
+
+ sx_debug_1 = RADEON_READ(R600_SX_DEBUG_1);
+ sx_debug_1 |= R600_SMX_EVENT_RELEASE;
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600))
+ sx_debug_1 |= R600_ENABLE_NEW_SMX_ADDRESS;
+ RADEON_WRITE(R600_SX_DEBUG_1, sx_debug_1);
+
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880))
+ RADEON_WRITE(R600_DB_DEBUG, R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE);
+ else
+ RADEON_WRITE(R600_DB_DEBUG, 0);
+
+ RADEON_WRITE(R600_DB_WATERMARKS, (R600_DEPTH_FREE(4) |
+ R600_DEPTH_FLUSH(16) |
+ R600_DEPTH_PENDING_FREE(4) |
+ R600_DEPTH_CACHELINE_FREE(16)));
+ RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
+ RADEON_WRITE(R600_VGT_NUM_INSTANCES, 0);
+
+ RADEON_WRITE(R600_SPI_CONFIG_CNTL, R600_GPR_WRITE_PRIORITY(0));
+ RADEON_WRITE(R600_SPI_CONFIG_CNTL_1, R600_VTX_DONE_DELAY(0));
+
+ sq_ms_fifo_sizes = RADEON_READ(R600_SQ_MS_FIFO_SIZES);
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) {
+ sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(0xa) |
+ R600_FETCH_FIFO_HIWATER(0xa) |
+ R600_DONE_FIFO_HIWATER(0xe0) |
+ R600_ALU_UPDATE_FIFO_HIWATER(0x8));
+ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630)) {
+ sq_ms_fifo_sizes &= ~R600_DONE_FIFO_HIWATER(0xff);
+ sq_ms_fifo_sizes |= R600_DONE_FIFO_HIWATER(0x4);
+ }
+ RADEON_WRITE(R600_SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
+
+ /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
+ * should be adjusted as needed by the 2D/3D drivers. This just sets default values
+ */
+ sq_config = RADEON_READ(R600_SQ_CONFIG);
+ sq_config &= ~(R600_PS_PRIO(3) |
+ R600_VS_PRIO(3) |
+ R600_GS_PRIO(3) |
+ R600_ES_PRIO(3));
+ sq_config |= (R600_DX9_CONSTS |
+ R600_VC_ENABLE |
+ R600_PS_PRIO(0) |
+ R600_VS_PRIO(1) |
+ R600_GS_PRIO(2) |
+ R600_ES_PRIO(3));
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) {
+ sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(124) |
+ R600_NUM_VS_GPRS(124) |
+ R600_NUM_CLAUSE_TEMP_GPRS(4));
+ sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(0) |
+ R600_NUM_ES_GPRS(0));
+ sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(136) |
+ R600_NUM_VS_THREADS(48) |
+ R600_NUM_GS_THREADS(4) |
+ R600_NUM_ES_THREADS(4));
+ sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(128) |
+ R600_NUM_VS_STACK_ENTRIES(128));
+ sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(0) |
+ R600_NUM_ES_STACK_ENTRIES(0));
+ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) {
+ /* no vertex cache */
+ sq_config &= ~R600_VC_ENABLE;
+
+ sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) |
+ R600_NUM_VS_GPRS(44) |
+ R600_NUM_CLAUSE_TEMP_GPRS(2));
+ sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(17) |
+ R600_NUM_ES_GPRS(17));
+ sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) |
+ R600_NUM_VS_THREADS(78) |
+ R600_NUM_GS_THREADS(4) |
+ R600_NUM_ES_THREADS(31));
+ sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(40) |
+ R600_NUM_VS_STACK_ENTRIES(40));
+ sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(32) |
+ R600_NUM_ES_STACK_ENTRIES(16));
+ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV635)) {
+ sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) |
+ R600_NUM_VS_GPRS(44) |
+ R600_NUM_CLAUSE_TEMP_GPRS(2));
+ sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(18) |
+ R600_NUM_ES_GPRS(18));
+ sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) |
+ R600_NUM_VS_THREADS(78) |
+ R600_NUM_GS_THREADS(4) |
+ R600_NUM_ES_THREADS(31));
+ sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(40) |
+ R600_NUM_VS_STACK_ENTRIES(40));
+ sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(32) |
+ R600_NUM_ES_STACK_ENTRIES(16));
+ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV670) {
+ sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) |
+ R600_NUM_VS_GPRS(44) |
+ R600_NUM_CLAUSE_TEMP_GPRS(2));
+ sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(17) |
+ R600_NUM_ES_GPRS(17));
+ sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) |
+ R600_NUM_VS_THREADS(78) |
+ R600_NUM_GS_THREADS(4) |
+ R600_NUM_ES_THREADS(31));
+ sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(64) |
+ R600_NUM_VS_STACK_ENTRIES(64));
+ sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(64) |
+ R600_NUM_ES_STACK_ENTRIES(64));
+ }
+
+ RADEON_WRITE(R600_SQ_CONFIG, sq_config);
+ RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
+ RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
+ RADEON_WRITE(R600_SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+ RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+ RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880))
+ RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_TC_ONLY));
+ else
+ RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_VC_AND_TC));
+
+ RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_2S, (R600_S0_X(0xc) |
+ R600_S0_Y(0x4) |
+ R600_S1_X(0x4) |
+ R600_S1_Y(0xc)));
+ RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_4S, (R600_S0_X(0xe) |
+ R600_S0_Y(0xe) |
+ R600_S1_X(0x2) |
+ R600_S1_Y(0x2) |
+ R600_S2_X(0xa) |
+ R600_S2_Y(0x6) |
+ R600_S3_X(0x6) |
+ R600_S3_Y(0xa)));
+ RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_8S_WD0, (R600_S0_X(0xe) |
+ R600_S0_Y(0xb) |
+ R600_S1_X(0x4) |
+ R600_S1_Y(0xc) |
+ R600_S2_X(0x1) |
+ R600_S2_Y(0x6) |
+ R600_S3_X(0xa) |
+ R600_S3_Y(0xe)));
+ RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_8S_WD1, (R600_S4_X(0x6) |
+ R600_S4_Y(0x1) |
+ R600_S5_X(0x0) |
+ R600_S5_Y(0x0) |
+ R600_S6_X(0xb) |
+ R600_S6_Y(0x4) |
+ R600_S7_X(0x7) |
+ R600_S7_Y(0x8)));
+
+
+ switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+ case CHIP_R600:
+ case CHIP_RV630:
+ case CHIP_RV635:
+ gs_prim_buffer_depth = 0;
+ break;
+ case CHIP_RV610:
+ case CHIP_RS780:
+ case CHIP_RS880:
+ case CHIP_RV620:
+ gs_prim_buffer_depth = 32;
+ break;
+ case CHIP_RV670:
+ gs_prim_buffer_depth = 128;
+ break;
+ default:
+ break;
+ }
+
+ num_gs_verts_per_thread = dev_priv->r600_max_pipes * 16;
+ vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
+ /* Max value for this is 256 */
+ if (vgt_gs_per_es > 256)
+ vgt_gs_per_es = 256;
+
+ RADEON_WRITE(R600_VGT_ES_PER_GS, 128);
+ RADEON_WRITE(R600_VGT_GS_PER_ES, vgt_gs_per_es);
+ RADEON_WRITE(R600_VGT_GS_PER_VS, 2);
+ RADEON_WRITE(R600_VGT_GS_VERTEX_REUSE, 16);
+
+ /* more default values. 2D/3D driver should adjust as needed */
+ RADEON_WRITE(R600_PA_SC_LINE_STIPPLE_STATE, 0);
+ RADEON_WRITE(R600_VGT_STRMOUT_EN, 0);
+ RADEON_WRITE(R600_SX_MISC, 0);
+ RADEON_WRITE(R600_PA_SC_MODE_CNTL, 0);
+ RADEON_WRITE(R600_PA_SC_AA_CONFIG, 0);
+ RADEON_WRITE(R600_PA_SC_LINE_STIPPLE, 0);
+ RADEON_WRITE(R600_SPI_INPUT_Z, 0);
+ RADEON_WRITE(R600_SPI_PS_IN_CONTROL_0, R600_NUM_INTERP(2));
+ RADEON_WRITE(R600_CB_COLOR7_FRAG, 0);
+
+ /* clear render buffer base addresses */
+ RADEON_WRITE(R600_CB_COLOR0_BASE, 0);
+ RADEON_WRITE(R600_CB_COLOR1_BASE, 0);
+ RADEON_WRITE(R600_CB_COLOR2_BASE, 0);
+ RADEON_WRITE(R600_CB_COLOR3_BASE, 0);
+ RADEON_WRITE(R600_CB_COLOR4_BASE, 0);
+ RADEON_WRITE(R600_CB_COLOR5_BASE, 0);
+ RADEON_WRITE(R600_CB_COLOR6_BASE, 0);
+ RADEON_WRITE(R600_CB_COLOR7_BASE, 0);
+
+ switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+ case CHIP_RV610:
+ case CHIP_RS780:
+ case CHIP_RS880:
+ case CHIP_RV620:
+ tc_cntl = R600_TC_L2_SIZE(8);
+ break;
+ case CHIP_RV630:
+ case CHIP_RV635:
+ tc_cntl = R600_TC_L2_SIZE(4);
+ break;
+ case CHIP_R600:
+ tc_cntl = R600_TC_L2_SIZE(0) | R600_L2_DISABLE_LATE_HIT;
+ break;
+ default:
+ tc_cntl = R600_TC_L2_SIZE(0);
+ break;
+ }
+
+ RADEON_WRITE(R600_TC_CNTL, tc_cntl);
+
+ hdp_host_path_cntl = RADEON_READ(R600_HDP_HOST_PATH_CNTL);
+ RADEON_WRITE(R600_HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+ arb_pop = RADEON_READ(R600_ARB_POP);
+ arb_pop |= R600_ENABLE_TC128;
+ RADEON_WRITE(R600_ARB_POP, arb_pop);
+
+ RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
+ RADEON_WRITE(R600_PA_CL_ENHANCE, (R600_CLIP_VTX_REORDER_ENA |
+ R600_NUM_CLIP_SEQ(3)));
+ RADEON_WRITE(R600_PA_SC_ENHANCE, R600_FORCE_EOV_MAX_CLK_CNT(4095));
+
+}
+
+static u32 r700_get_tile_pipe_to_backend_map(drm_radeon_private_t *dev_priv,
+ u32 num_tile_pipes,
+ u32 num_backends,
+ u32 backend_disable_mask)
+{
+ u32 backend_map = 0;
+ u32 enabled_backends_mask;
+ u32 enabled_backends_count;
+ u32 cur_pipe;
+ u32 swizzle_pipe[R7XX_MAX_PIPES];
+ u32 cur_backend;
+ u32 i;
+ bool force_no_swizzle;
+
+ if (num_tile_pipes > R7XX_MAX_PIPES)
+ num_tile_pipes = R7XX_MAX_PIPES;
+ if (num_tile_pipes < 1)
+ num_tile_pipes = 1;
+ if (num_backends > R7XX_MAX_BACKENDS)
+ num_backends = R7XX_MAX_BACKENDS;
+ if (num_backends < 1)
+ num_backends = 1;
+
+ enabled_backends_mask = 0;
+ enabled_backends_count = 0;
+ for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
+ if (((backend_disable_mask >> i) & 1) == 0) {
+ enabled_backends_mask |= (1 << i);
+ ++enabled_backends_count;
+ }
+ if (enabled_backends_count == num_backends)
+ break;
+ }
+
+ if (enabled_backends_count == 0) {
+ enabled_backends_mask = 1;
+ enabled_backends_count = 1;
+ }
+
+ if (enabled_backends_count != num_backends)
+ num_backends = enabled_backends_count;
+
+ switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+ case CHIP_RV770:
+ case CHIP_RV730:
+ force_no_swizzle = false;
+ break;
+ case CHIP_RV710:
+ case CHIP_RV740:
+ default:
+ force_no_swizzle = true;
+ break;
+ }
+
+ memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
+ switch (num_tile_pipes) {
+ case 1:
+ swizzle_pipe[0] = 0;
+ break;
+ case 2:
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ break;
+ case 3:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 1;
+ }
+ break;
+ case 4:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 3;
+ swizzle_pipe[3] = 1;
+ }
+ break;
+ case 5:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 1;
+ swizzle_pipe[4] = 3;
+ }
+ break;
+ case 6:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 5;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ }
+ break;
+ case 7:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ swizzle_pipe[6] = 5;
+ }
+ break;
+ case 8:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ swizzle_pipe[7] = 7;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ swizzle_pipe[6] = 7;
+ swizzle_pipe[7] = 5;
+ }
+ break;
+ }
+
+ cur_backend = 0;
+ for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
+ while (((1 << cur_backend) & enabled_backends_mask) == 0)
+ cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
+
+ backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
+
+ cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
+ }
+
+ return backend_map;
+}
+
+static void r700_gfx_init(struct drm_device *dev,
+ drm_radeon_private_t *dev_priv)
+{
+ int i, j, num_qd_pipes;
+ u32 ta_aux_cntl;
+ u32 sx_debug_1;
+ u32 smx_dc_ctl0;
+ u32 db_debug3;
+ u32 num_gs_verts_per_thread;
+ u32 vgt_gs_per_es;
+ u32 gs_prim_buffer_depth = 0;
+ u32 sq_ms_fifo_sizes;
+ u32 sq_config;
+ u32 sq_thread_resource_mgmt;
+ u32 hdp_host_path_cntl;
+ u32 sq_dyn_gpr_size_simd_ab_0;
+ u32 backend_map;
+ u32 gb_tiling_config = 0;
+ u32 cc_rb_backend_disable;
+ u32 cc_gc_shader_pipe_config;
+ u32 mc_arb_ramcfg;
+ u32 db_debug4;
+
+ /* setup chip specs */
+ switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+ case CHIP_RV770:
+ dev_priv->r600_max_pipes = 4;
+ dev_priv->r600_max_tile_pipes = 8;
+ dev_priv->r600_max_simds = 10;
+ dev_priv->r600_max_backends = 4;
+ dev_priv->r600_max_gprs = 256;
+ dev_priv->r600_max_threads = 248;
+ dev_priv->r600_max_stack_entries = 512;
+ dev_priv->r600_max_hw_contexts = 8;
+ dev_priv->r600_max_gs_threads = 16 * 2;
+ dev_priv->r600_sx_max_export_size = 128;
+ dev_priv->r600_sx_max_export_pos_size = 16;
+ dev_priv->r600_sx_max_export_smx_size = 112;
+ dev_priv->r600_sq_num_cf_insts = 2;
+
+ dev_priv->r700_sx_num_of_sets = 7;
+ dev_priv->r700_sc_prim_fifo_size = 0xF9;
+ dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
+ dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
+ break;
+ case CHIP_RV730:
+ dev_priv->r600_max_pipes = 2;
+ dev_priv->r600_max_tile_pipes = 4;
+ dev_priv->r600_max_simds = 8;
+ dev_priv->r600_max_backends = 2;
+ dev_priv->r600_max_gprs = 128;
+ dev_priv->r600_max_threads = 248;
+ dev_priv->r600_max_stack_entries = 256;
+ dev_priv->r600_max_hw_contexts = 8;
+ dev_priv->r600_max_gs_threads = 16 * 2;
+ dev_priv->r600_sx_max_export_size = 256;
+ dev_priv->r600_sx_max_export_pos_size = 32;
+ dev_priv->r600_sx_max_export_smx_size = 224;
+ dev_priv->r600_sq_num_cf_insts = 2;
+
+ dev_priv->r700_sx_num_of_sets = 7;
+ dev_priv->r700_sc_prim_fifo_size = 0xf9;
+ dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
+ dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
+ if (dev_priv->r600_sx_max_export_pos_size > 16) {
+ dev_priv->r600_sx_max_export_pos_size -= 16;
+ dev_priv->r600_sx_max_export_smx_size += 16;
+ }
+ break;
+ case CHIP_RV710:
+ dev_priv->r600_max_pipes = 2;
+ dev_priv->r600_max_tile_pipes = 2;
+ dev_priv->r600_max_simds = 2;
+ dev_priv->r600_max_backends = 1;
+ dev_priv->r600_max_gprs = 256;
+ dev_priv->r600_max_threads = 192;
+ dev_priv->r600_max_stack_entries = 256;
+ dev_priv->r600_max_hw_contexts = 4;
+ dev_priv->r600_max_gs_threads = 8 * 2;
+ dev_priv->r600_sx_max_export_size = 128;
+ dev_priv->r600_sx_max_export_pos_size = 16;
+ dev_priv->r600_sx_max_export_smx_size = 112;
+ dev_priv->r600_sq_num_cf_insts = 1;
+
+ dev_priv->r700_sx_num_of_sets = 7;
+ dev_priv->r700_sc_prim_fifo_size = 0x40;
+ dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
+ dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
+ break;
+ case CHIP_RV740:
+ dev_priv->r600_max_pipes = 4;
+ dev_priv->r600_max_tile_pipes = 4;
+ dev_priv->r600_max_simds = 8;
+ dev_priv->r600_max_backends = 4;
+ dev_priv->r600_max_gprs = 256;
+ dev_priv->r600_max_threads = 248;
+ dev_priv->r600_max_stack_entries = 512;
+ dev_priv->r600_max_hw_contexts = 8;
+ dev_priv->r600_max_gs_threads = 16 * 2;
+ dev_priv->r600_sx_max_export_size = 256;
+ dev_priv->r600_sx_max_export_pos_size = 32;
+ dev_priv->r600_sx_max_export_smx_size = 224;
+ dev_priv->r600_sq_num_cf_insts = 2;
+
+ dev_priv->r700_sx_num_of_sets = 7;
+ dev_priv->r700_sc_prim_fifo_size = 0x100;
+ dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
+ dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
+
+ if (dev_priv->r600_sx_max_export_pos_size > 16) {
+ dev_priv->r600_sx_max_export_pos_size -= 16;
+ dev_priv->r600_sx_max_export_smx_size += 16;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Initialize HDP */
+ j = 0;
+ for (i = 0; i < 32; i++) {
+ RADEON_WRITE((0x2c14 + j), 0x00000000);
+ RADEON_WRITE((0x2c18 + j), 0x00000000);
+ RADEON_WRITE((0x2c1c + j), 0x00000000);
+ RADEON_WRITE((0x2c20 + j), 0x00000000);
+ RADEON_WRITE((0x2c24 + j), 0x00000000);
+ j += 0x18;
+ }
+
+ RADEON_WRITE(R600_GRBM_CNTL, R600_GRBM_READ_TIMEOUT(0xff));
+
+ /* setup tiling, simd, pipe config */
+ mc_arb_ramcfg = RADEON_READ(R700_MC_ARB_RAMCFG);
+
+ switch (dev_priv->r600_max_tile_pipes) {
+ case 1:
+ gb_tiling_config |= R600_PIPE_TILING(0);
+ break;
+ case 2:
+ gb_tiling_config |= R600_PIPE_TILING(1);
+ break;
+ case 4:
+ gb_tiling_config |= R600_PIPE_TILING(2);
+ break;
+ case 8:
+ gb_tiling_config |= R600_PIPE_TILING(3);
+ break;
+ default:
+ break;
+ }
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770)
+ gb_tiling_config |= R600_BANK_TILING(1);
+ else
+ gb_tiling_config |= R600_BANK_TILING((mc_arb_ramcfg >> R700_NOOFBANK_SHIFT) & R700_NOOFBANK_MASK);
+
+ gb_tiling_config |= R600_GROUP_SIZE(0);
+
+ if (((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK) > 3) {
+ gb_tiling_config |= R600_ROW_TILING(3);
+ gb_tiling_config |= R600_SAMPLE_SPLIT(3);
+ } else {
+ gb_tiling_config |=
+ R600_ROW_TILING(((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK));
+ gb_tiling_config |=
+ R600_SAMPLE_SPLIT(((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK));
+ }
+
+ gb_tiling_config |= R600_BANK_SWAPS(1);
+
+ cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ cc_rb_backend_disable |=
+ R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK);
+
+ cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ cc_gc_shader_pipe_config |=
+ R600_INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R7XX_MAX_PIPES_MASK);
+ cc_gc_shader_pipe_config |=
+ R600_INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R7XX_MAX_SIMDS_MASK);
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
+ backend_map = 0x28;
+ else
+ backend_map = r700_get_tile_pipe_to_backend_map(dev_priv,
+ dev_priv->r600_max_tile_pipes,
+ (R7XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable &
+ R7XX_MAX_BACKENDS_MASK) >> 16)),
+ (cc_rb_backend_disable >> 16));
+ gb_tiling_config |= R600_BACKEND_MAP(backend_map);
+
+ RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
+ RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ if (gb_tiling_config & 0xc0) {
+ dev_priv->r600_group_size = 512;
+ } else {
+ dev_priv->r600_group_size = 256;
+ }
+ dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
+ if (gb_tiling_config & 0x30) {
+ dev_priv->r600_nbanks = 8;
+ } else {
+ dev_priv->r600_nbanks = 4;
+ }
+
+ RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+ RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+
+ RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0);
+ RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0);
+ RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0);
+ RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0);
+
+ num_qd_pipes =
+ R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
+ RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
+ RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
+
+ /* set HW defaults for 3D engine */
+ RADEON_WRITE(R600_CP_QUEUE_THRESHOLDS, (R600_ROQ_IB1_START(0x16) |
+ R600_ROQ_IB2_START(0x2b)));
+
+ RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, R700_STQ_SPLIT(0x30));
+
+ ta_aux_cntl = RADEON_READ(R600_TA_CNTL_AUX);
+ RADEON_WRITE(R600_TA_CNTL_AUX, ta_aux_cntl | R600_DISABLE_CUBE_ANISO);
+
+ sx_debug_1 = RADEON_READ(R700_SX_DEBUG_1);
+ sx_debug_1 |= R700_ENABLE_NEW_SMX_ADDRESS;
+ RADEON_WRITE(R700_SX_DEBUG_1, sx_debug_1);
+
+ smx_dc_ctl0 = RADEON_READ(R600_SMX_DC_CTL0);
+ smx_dc_ctl0 &= ~R700_CACHE_DEPTH(0x1ff);
+ smx_dc_ctl0 |= R700_CACHE_DEPTH((dev_priv->r700_sx_num_of_sets * 64) - 1);
+ RADEON_WRITE(R600_SMX_DC_CTL0, smx_dc_ctl0);
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV740)
+ RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) |
+ R700_GS_FLUSH_CTL(4) |
+ R700_ACK_FLUSH_CTL(3) |
+ R700_SYNC_FLUSH_CTL));
+
+ db_debug3 = RADEON_READ(R700_DB_DEBUG3);
+ db_debug3 &= ~R700_DB_CLK_OFF_DELAY(0x1f);
+ switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+ case CHIP_RV770:
+ case CHIP_RV740:
+ db_debug3 |= R700_DB_CLK_OFF_DELAY(0x1f);
+ break;
+ case CHIP_RV710:
+ case CHIP_RV730:
+ default:
+ db_debug3 |= R700_DB_CLK_OFF_DELAY(2);
+ break;
+ }
+ RADEON_WRITE(R700_DB_DEBUG3, db_debug3);
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV770) {
+ db_debug4 = RADEON_READ(RV700_DB_DEBUG4);
+ db_debug4 |= RV700_DISABLE_TILE_COVERED_FOR_PS_ITER;
+ RADEON_WRITE(RV700_DB_DEBUG4, db_debug4);
+ }
+
+ RADEON_WRITE(R600_SX_EXPORT_BUFFER_SIZES, (R600_COLOR_BUFFER_SIZE((dev_priv->r600_sx_max_export_size / 4) - 1) |
+ R600_POSITION_BUFFER_SIZE((dev_priv->r600_sx_max_export_pos_size / 4) - 1) |
+ R600_SMX_BUFFER_SIZE((dev_priv->r600_sx_max_export_smx_size / 4) - 1)));
+
+ RADEON_WRITE(R700_PA_SC_FIFO_SIZE_R7XX, (R700_SC_PRIM_FIFO_SIZE(dev_priv->r700_sc_prim_fifo_size) |
+ R700_SC_HIZ_TILE_FIFO_SIZE(dev_priv->r700_sc_hiz_tile_fifo_size) |
+ R700_SC_EARLYZ_TILE_FIFO_SIZE(dev_priv->r700_sc_earlyz_tile_fifo_fize)));
+
+ RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
+
+ RADEON_WRITE(R600_VGT_NUM_INSTANCES, 1);
+
+ RADEON_WRITE(R600_SPI_CONFIG_CNTL, R600_GPR_WRITE_PRIORITY(0));
+
+ RADEON_WRITE(R600_SPI_CONFIG_CNTL_1, R600_VTX_DONE_DELAY(4));
+
+ RADEON_WRITE(R600_CP_PERFMON_CNTL, 0);
+
+ sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(16 * dev_priv->r600_sq_num_cf_insts) |
+ R600_DONE_FIFO_HIWATER(0xe0) |
+ R600_ALU_UPDATE_FIFO_HIWATER(0x8));
+ switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+ case CHIP_RV770:
+ case CHIP_RV730:
+ case CHIP_RV710:
+ sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
+ break;
+ case CHIP_RV740:
+ default:
+ sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4);
+ break;
+ }
+ RADEON_WRITE(R600_SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
+
+ /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
+ * should be adjusted as needed by the 2D/3D drivers. This just sets default values
+ */
+ sq_config = RADEON_READ(R600_SQ_CONFIG);
+ sq_config &= ~(R600_PS_PRIO(3) |
+ R600_VS_PRIO(3) |
+ R600_GS_PRIO(3) |
+ R600_ES_PRIO(3));
+ sq_config |= (R600_DX9_CONSTS |
+ R600_VC_ENABLE |
+ R600_EXPORT_SRC_C |
+ R600_PS_PRIO(0) |
+ R600_VS_PRIO(1) |
+ R600_GS_PRIO(2) |
+ R600_ES_PRIO(3));
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710)
+ /* no vertex cache */
+ sq_config &= ~R600_VC_ENABLE;
+
+ RADEON_WRITE(R600_SQ_CONFIG, sq_config);
+
+ RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_1, (R600_NUM_PS_GPRS((dev_priv->r600_max_gprs * 24)/64) |
+ R600_NUM_VS_GPRS((dev_priv->r600_max_gprs * 24)/64) |
+ R600_NUM_CLAUSE_TEMP_GPRS(((dev_priv->r600_max_gprs * 24)/64)/2)));
+
+ RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_2, (R600_NUM_GS_GPRS((dev_priv->r600_max_gprs * 7)/64) |
+ R600_NUM_ES_GPRS((dev_priv->r600_max_gprs * 7)/64)));
+
+ sq_thread_resource_mgmt = (R600_NUM_PS_THREADS((dev_priv->r600_max_threads * 4)/8) |
+ R600_NUM_VS_THREADS((dev_priv->r600_max_threads * 2)/8) |
+ R600_NUM_ES_THREADS((dev_priv->r600_max_threads * 1)/8));
+ if (((dev_priv->r600_max_threads * 1) / 8) > dev_priv->r600_max_gs_threads)
+ sq_thread_resource_mgmt |= R600_NUM_GS_THREADS(dev_priv->r600_max_gs_threads);
+ else
+ sq_thread_resource_mgmt |= R600_NUM_GS_THREADS((dev_priv->r600_max_gs_threads * 1)/8);
+ RADEON_WRITE(R600_SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+
+ RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_1, (R600_NUM_PS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4) |
+ R600_NUM_VS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4)));
+
+ RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_2, (R600_NUM_GS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4) |
+ R600_NUM_ES_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4)));
+
+ sq_dyn_gpr_size_simd_ab_0 = (R700_SIMDA_RING0((dev_priv->r600_max_gprs * 38)/64) |
+ R700_SIMDA_RING1((dev_priv->r600_max_gprs * 38)/64) |
+ R700_SIMDB_RING0((dev_priv->r600_max_gprs * 38)/64) |
+ R700_SIMDB_RING1((dev_priv->r600_max_gprs * 38)/64));
+
+ RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0);
+ RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0);
+ RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0);
+ RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0);
+ RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0);
+ RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0);
+ RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0);
+ RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0);
+
+ RADEON_WRITE(R700_PA_SC_FORCE_EOV_MAX_CNTS, (R700_FORCE_EOV_MAX_CLK_CNT(4095) |
+ R700_FORCE_EOV_MAX_REZ_CNT(255)));
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710)
+ RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, (R600_CACHE_INVALIDATION(R600_TC_ONLY) |
+ R700_AUTO_INVLD_EN(R700_ES_AND_GS_AUTO)));
+ else
+ RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, (R600_CACHE_INVALIDATION(R600_VC_AND_TC) |
+ R700_AUTO_INVLD_EN(R700_ES_AND_GS_AUTO)));
+
+ switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+ case CHIP_RV770:
+ case CHIP_RV730:
+ case CHIP_RV740:
+ gs_prim_buffer_depth = 384;
+ break;
+ case CHIP_RV710:
+ gs_prim_buffer_depth = 128;
+ break;
+ default:
+ break;
+ }
+
+ num_gs_verts_per_thread = dev_priv->r600_max_pipes * 16;
+ vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
+ /* Max value for this is 256 */
+ if (vgt_gs_per_es > 256)
+ vgt_gs_per_es = 256;
+
+ RADEON_WRITE(R600_VGT_ES_PER_GS, 128);
+ RADEON_WRITE(R600_VGT_GS_PER_ES, vgt_gs_per_es);
+ RADEON_WRITE(R600_VGT_GS_PER_VS, 2);
+
+ /* more default values. 2D/3D driver should adjust as needed */
+ RADEON_WRITE(R600_VGT_GS_VERTEX_REUSE, 16);
+ RADEON_WRITE(R600_PA_SC_LINE_STIPPLE_STATE, 0);
+ RADEON_WRITE(R600_VGT_STRMOUT_EN, 0);
+ RADEON_WRITE(R600_SX_MISC, 0);
+ RADEON_WRITE(R600_PA_SC_MODE_CNTL, 0);
+ RADEON_WRITE(R700_PA_SC_EDGERULE, 0xaaaaaaaa);
+ RADEON_WRITE(R600_PA_SC_AA_CONFIG, 0);
+ RADEON_WRITE(R600_PA_SC_CLIPRECT_RULE, 0xffff);
+ RADEON_WRITE(R600_PA_SC_LINE_STIPPLE, 0);
+ RADEON_WRITE(R600_SPI_INPUT_Z, 0);
+ RADEON_WRITE(R600_SPI_PS_IN_CONTROL_0, R600_NUM_INTERP(2));
+ RADEON_WRITE(R600_CB_COLOR7_FRAG, 0);
+
+ /* clear render buffer base addresses */
+ RADEON_WRITE(R600_CB_COLOR0_BASE, 0);
+ RADEON_WRITE(R600_CB_COLOR1_BASE, 0);
+ RADEON_WRITE(R600_CB_COLOR2_BASE, 0);
+ RADEON_WRITE(R600_CB_COLOR3_BASE, 0);
+ RADEON_WRITE(R600_CB_COLOR4_BASE, 0);
+ RADEON_WRITE(R600_CB_COLOR5_BASE, 0);
+ RADEON_WRITE(R600_CB_COLOR6_BASE, 0);
+ RADEON_WRITE(R600_CB_COLOR7_BASE, 0);
+
+ RADEON_WRITE(R700_TCP_CNTL, 0);
+
+ hdp_host_path_cntl = RADEON_READ(R600_HDP_HOST_PATH_CNTL);
+ RADEON_WRITE(R600_HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+ RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
+
+ RADEON_WRITE(R600_PA_CL_ENHANCE, (R600_CLIP_VTX_REORDER_ENA |
+ R600_NUM_CLIP_SEQ(3)));
+
+}
+
+static void r600_cp_init_ring_buffer(struct drm_device *dev,
+ drm_radeon_private_t *dev_priv,
+ struct drm_file *file_priv)
+{
+ struct drm_radeon_master_private *master_priv;
+ u32 ring_start;
+ u64 rptr_addr;
+
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770))
+ r700_gfx_init(dev, dev_priv);
+ else
+ r600_gfx_init(dev, dev_priv);
+
+ RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
+ RADEON_READ(R600_GRBM_SOFT_RESET);
+ mdelay(15);
+ RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
+
+
+ /* Set ring buffer size */
+#ifdef __BIG_ENDIAN
+ RADEON_WRITE(R600_CP_RB_CNTL,
+ R600_BUF_SWAP_32BIT |
+ R600_RB_NO_UPDATE |
+ (dev_priv->ring.rptr_update_l2qw << 8) |
+ dev_priv->ring.size_l2qw);
+#else
+ RADEON_WRITE(R600_CP_RB_CNTL,
+ RADEON_RB_NO_UPDATE |
+ (dev_priv->ring.rptr_update_l2qw << 8) |
+ dev_priv->ring.size_l2qw);
+#endif
+
+ RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x0);
+
+ /* Set the write pointer delay */
+ RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
+
+#ifdef __BIG_ENDIAN
+ RADEON_WRITE(R600_CP_RB_CNTL,
+ R600_BUF_SWAP_32BIT |
+ R600_RB_NO_UPDATE |
+ R600_RB_RPTR_WR_ENA |
+ (dev_priv->ring.rptr_update_l2qw << 8) |
+ dev_priv->ring.size_l2qw);
+#else
+ RADEON_WRITE(R600_CP_RB_CNTL,
+ R600_RB_NO_UPDATE |
+ R600_RB_RPTR_WR_ENA |
+ (dev_priv->ring.rptr_update_l2qw << 8) |
+ dev_priv->ring.size_l2qw);
+#endif
+
+ /* Initialize the ring buffer's read and write pointers */
+ RADEON_WRITE(R600_CP_RB_RPTR_WR, 0);
+ RADEON_WRITE(R600_CP_RB_WPTR, 0);
+ SET_RING_HEAD(dev_priv, 0);
+ dev_priv->ring.tail = 0;
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ rptr_addr = dev_priv->ring_rptr->offset
+ - dev->agp->base +
+ dev_priv->gart_vm_start;
+ } else
+#endif
+ {
+ rptr_addr = dev_priv->ring_rptr->offset
+ - ((unsigned long) dev->sg->virtual)
+ + dev_priv->gart_vm_start;
+ }
+ RADEON_WRITE(R600_CP_RB_RPTR_ADDR, (rptr_addr & 0xfffffffc));
+ RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, upper_32_bits(rptr_addr));
+
+#ifdef __BIG_ENDIAN
+ RADEON_WRITE(R600_CP_RB_CNTL,
+ RADEON_BUF_SWAP_32BIT |
+ (dev_priv->ring.rptr_update_l2qw << 8) |
+ dev_priv->ring.size_l2qw);
+#else
+ RADEON_WRITE(R600_CP_RB_CNTL,
+ (dev_priv->ring.rptr_update_l2qw << 8) |
+ dev_priv->ring.size_l2qw);
+#endif
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ /* XXX */
+ radeon_write_agp_base(dev_priv, dev->agp->base);
+
+ /* XXX */
+ radeon_write_agp_location(dev_priv,
+ (((dev_priv->gart_vm_start - 1 +
+ dev_priv->gart_size) & 0xffff0000) |
+ (dev_priv->gart_vm_start >> 16)));
+
+ ring_start = (dev_priv->cp_ring->offset
+ - dev->agp->base
+ + dev_priv->gart_vm_start);
+ } else
+#endif
+ ring_start = (dev_priv->cp_ring->offset
+ - (unsigned long)dev->sg->virtual
+ + dev_priv->gart_vm_start);
+
+ RADEON_WRITE(R600_CP_RB_BASE, ring_start >> 8);
+
+ RADEON_WRITE(R600_CP_ME_CNTL, 0xff);
+
+ RADEON_WRITE(R600_CP_DEBUG, (1 << 27) | (1 << 28));
+
+ /* Initialize the scratch register pointer. This will cause
+ * the scratch register values to be written out to memory
+ * whenever they are updated.
+ *
+ * We simply put this behind the ring read pointer, this works
+ * with PCI GART as well as (whatever kind of) AGP GART
+ */
+ {
+ u64 scratch_addr;
+
+ scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC;
+ scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32;
+ scratch_addr += R600_SCRATCH_REG_OFFSET;
+ scratch_addr >>= 8;
+ scratch_addr &= 0xffffffff;
+
+ RADEON_WRITE(R600_SCRATCH_ADDR, (uint32_t)scratch_addr);
+ }
+
+ RADEON_WRITE(R600_SCRATCH_UMSK, 0x7);
+
+ /* Turn on bus mastering */
+ radeon_enable_bm(dev_priv);
+
+ radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(0), 0);
+ RADEON_WRITE(R600_LAST_FRAME_REG, 0);
+
+ radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(1), 0);
+ RADEON_WRITE(R600_LAST_DISPATCH_REG, 0);
+
+ radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(2), 0);
+ RADEON_WRITE(R600_LAST_CLEAR_REG, 0);
+
+ /* reset sarea copies of these */
+ master_priv = file_priv->master->driver_priv;
+ if (master_priv->sarea_priv) {
+ master_priv->sarea_priv->last_frame = 0;
+ master_priv->sarea_priv->last_dispatch = 0;
+ master_priv->sarea_priv->last_clear = 0;
+ }
+
+ r600_do_wait_for_idle(dev_priv);
+
+}
+
+int r600_do_cleanup_cp(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ DRM_DEBUG("\n");
+
+ /* Make sure interrupts are disabled here because the uninstall ioctl
+ * may not have been called from userspace and after dev_private
+ * is freed, it's too late.
+ */
+ if (dev->irq_enabled)
+ drm_irq_uninstall(dev);
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ if (dev_priv->cp_ring != NULL) {
+ drm_core_ioremapfree(dev_priv->cp_ring, dev);
+ dev_priv->cp_ring = NULL;
+ }
+ if (dev_priv->ring_rptr != NULL) {
+ drm_core_ioremapfree(dev_priv->ring_rptr, dev);
+ dev_priv->ring_rptr = NULL;
+ }
+ if (dev->agp_buffer_map != NULL) {
+ drm_core_ioremapfree(dev->agp_buffer_map, dev);
+ dev->agp_buffer_map = NULL;
+ }
+ } else
+#endif
+ {
+
+ if (dev_priv->gart_info.bus_addr)
+ r600_page_table_cleanup(dev, &dev_priv->gart_info);
+
+ if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) {
+ drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
+ dev_priv->gart_info.addr = NULL;
+ }
+ }
+ /* only clear to the start of flags */
+ memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
+
+ return 0;
+}
+
+int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
+ struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+
+ DRM_DEBUG("\n");
+
+ mutex_init(&dev_priv->cs_mutex);
+ r600_cs_legacy_init();
+ /* if we require new memory map but we don't have it fail */
+ if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
+ DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
+ r600_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+
+ if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
+ DRM_DEBUG("Forcing AGP card to PCI mode\n");
+ dev_priv->flags &= ~RADEON_IS_AGP;
+ /* The writeback test succeeds, but when writeback is enabled,
+ * the ring buffer read ptr update fails after first 128 bytes.
+ */
+ radeon_no_wb = 1;
+ } else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
+ && !init->is_pci) {
+ DRM_DEBUG("Restoring AGP flag\n");
+ dev_priv->flags |= RADEON_IS_AGP;
+ }
+
+ dev_priv->usec_timeout = init->usec_timeout;
+ if (dev_priv->usec_timeout < 1 ||
+ dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
+ DRM_DEBUG("TIMEOUT problem!\n");
+ r600_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+
+ /* Enable vblank on CRTC1 for older X servers
+ */
+ dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
+ dev_priv->do_boxes = 0;
+ dev_priv->cp_mode = init->cp_mode;
+
+ /* We don't support anything other than bus-mastering ring mode,
+ * but the ring can be in either AGP or PCI space for the ring
+ * read pointer.
+ */
+ if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
+ (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
+ DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
+ r600_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+
+ switch (init->fb_bpp) {
+ case 16:
+ dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
+ break;
+ case 32:
+ default:
+ dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
+ break;
+ }
+ dev_priv->front_offset = init->front_offset;
+ dev_priv->front_pitch = init->front_pitch;
+ dev_priv->back_offset = init->back_offset;
+ dev_priv->back_pitch = init->back_pitch;
+
+ dev_priv->ring_offset = init->ring_offset;
+ dev_priv->ring_rptr_offset = init->ring_rptr_offset;
+ dev_priv->buffers_offset = init->buffers_offset;
+ dev_priv->gart_textures_offset = init->gart_textures_offset;
+
+ master_priv->sarea = drm_getsarea(dev);
+ if (!master_priv->sarea) {
+ DRM_ERROR("could not find sarea!\n");
+ r600_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+
+ dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
+ if (!dev_priv->cp_ring) {
+ DRM_ERROR("could not find cp ring region!\n");
+ r600_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+ dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
+ if (!dev_priv->ring_rptr) {
+ DRM_ERROR("could not find ring read pointer!\n");
+ r600_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+ dev->agp_buffer_token = init->buffers_offset;
+ dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
+ if (!dev->agp_buffer_map) {
+ DRM_ERROR("could not find dma buffer region!\n");
+ r600_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+
+ if (init->gart_textures_offset) {
+ dev_priv->gart_textures =
+ drm_core_findmap(dev, init->gart_textures_offset);
+ if (!dev_priv->gart_textures) {
+ DRM_ERROR("could not find GART texture region!\n");
+ r600_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+ }
+
+#if __OS_HAS_AGP
+ /* XXX */
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ drm_core_ioremap_wc(dev_priv->cp_ring, dev);
+ drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
+ drm_core_ioremap_wc(dev->agp_buffer_map, dev);
+ if (!dev_priv->cp_ring->handle ||
+ !dev_priv->ring_rptr->handle ||
+ !dev->agp_buffer_map->handle) {
+ DRM_ERROR("could not find ioremap agp regions!\n");
+ r600_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+ } else
+#endif
+ {
+ dev_priv->cp_ring->handle = (void *)(unsigned long)dev_priv->cp_ring->offset;
+ dev_priv->ring_rptr->handle =
+ (void *)(unsigned long)dev_priv->ring_rptr->offset;
+ dev->agp_buffer_map->handle =
+ (void *)(unsigned long)dev->agp_buffer_map->offset;
+
+ DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
+ dev_priv->cp_ring->handle);
+ DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
+ dev_priv->ring_rptr->handle);
+ DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
+ dev->agp_buffer_map->handle);
+ }
+
+ dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 24;
+ dev_priv->fb_size =
+ (((radeon_read_fb_location(dev_priv) & 0xffff0000u) << 8) + 0x1000000)
+ - dev_priv->fb_location;
+
+ dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
+ ((dev_priv->front_offset
+ + dev_priv->fb_location) >> 10));
+
+ dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
+ ((dev_priv->back_offset
+ + dev_priv->fb_location) >> 10));
+
+ dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
+ ((dev_priv->depth_offset
+ + dev_priv->fb_location) >> 10));
+
+ dev_priv->gart_size = init->gart_size;
+
+ /* New let's set the memory map ... */
+ if (dev_priv->new_memmap) {
+ u32 base = 0;
+
+ DRM_INFO("Setting GART location based on new memory map\n");
+
+ /* If using AGP, try to locate the AGP aperture at the same
+ * location in the card and on the bus, though we have to
+ * align it down.
+ */
+#if __OS_HAS_AGP
+ /* XXX */
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ base = dev->agp->base;
+ /* Check if valid */
+ if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
+ base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
+ DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
+ dev->agp->base);
+ base = 0;
+ }
+ }
+#endif
+ /* If not or if AGP is at 0 (Macs), try to put it elsewhere */
+ if (base == 0) {
+ base = dev_priv->fb_location + dev_priv->fb_size;
+ if (base < dev_priv->fb_location ||
+ ((base + dev_priv->gart_size) & 0xfffffffful) < base)
+ base = dev_priv->fb_location
+ - dev_priv->gart_size;
+ }
+ dev_priv->gart_vm_start = base & 0xffc00000u;
+ if (dev_priv->gart_vm_start != base)
+ DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
+ base, dev_priv->gart_vm_start);
+ }
+
+#if __OS_HAS_AGP
+ /* XXX */
+ if (dev_priv->flags & RADEON_IS_AGP)
+ dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
+ - dev->agp->base
+ + dev_priv->gart_vm_start);
+ else
+#endif
+ dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
+ - (unsigned long)dev->sg->virtual
+ + dev_priv->gart_vm_start);
+
+ DRM_DEBUG("fb 0x%08x size %d\n",
+ (unsigned int) dev_priv->fb_location,
+ (unsigned int) dev_priv->fb_size);
+ DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
+ DRM_DEBUG("dev_priv->gart_vm_start 0x%08x\n",
+ (unsigned int) dev_priv->gart_vm_start);
+ DRM_DEBUG("dev_priv->gart_buffers_offset 0x%08lx\n",
+ dev_priv->gart_buffers_offset);
+
+ dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
+ dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+ + init->ring_size / sizeof(u32));
+ dev_priv->ring.size = init->ring_size;
+ dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+
+ dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
+ dev_priv->ring.rptr_update_l2qw = drm_order(/* init->rptr_update */ 4096 / 8);
+
+ dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
+ dev_priv->ring.fetch_size_l2ow = drm_order(/* init->fetch_size */ 32 / 16);
+
+ dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
+
+ dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ /* XXX turn off pcie gart */
+ } else
+#endif
+ {
+ dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
+ /* if we have an offset set from userspace */
+ if (!dev_priv->pcigart_offset_set) {
+ DRM_ERROR("Need gart offset from userspace\n");
+ r600_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("Using gart offset 0x%08lx\n", dev_priv->pcigart_offset);
+
+ dev_priv->gart_info.bus_addr =
+ dev_priv->pcigart_offset + dev_priv->fb_location;
+ dev_priv->gart_info.mapping.offset =
+ dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
+ dev_priv->gart_info.mapping.size =
+ dev_priv->gart_info.table_size;
+
+ drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
+ if (!dev_priv->gart_info.mapping.handle) {
+ DRM_ERROR("ioremap failed.\n");
+ r600_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+
+ dev_priv->gart_info.addr =
+ dev_priv->gart_info.mapping.handle;
+
+ DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
+ dev_priv->gart_info.addr,
+ dev_priv->pcigart_offset);
+
+ if (!r600_page_table_init(dev)) {
+ DRM_ERROR("Failed to init GART table\n");
+ r600_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770))
+ r700_vm_init(dev);
+ else
+ r600_vm_init(dev);
+ }
+
+ if (!dev_priv->me_fw || !dev_priv->pfp_fw) {
+ int err = r600_cp_init_microcode(dev_priv);
+ if (err) {
+ DRM_ERROR("Failed to load firmware!\n");
+ r600_do_cleanup_cp(dev);
+ return err;
+ }
+ }
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770))
+ r700_cp_load_microcode(dev_priv);
+ else
+ r600_cp_load_microcode(dev_priv);
+
+ r600_cp_init_ring_buffer(dev, dev_priv, file_priv);
+
+ dev_priv->last_buf = 0;
+
+ r600_do_engine_reset(dev);
+ r600_test_writeback(dev_priv);
+
+ return 0;
+}
+
+int r600_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)) {
+ r700_vm_init(dev);
+ r700_cp_load_microcode(dev_priv);
+ } else {
+ r600_vm_init(dev);
+ r600_cp_load_microcode(dev_priv);
+ }
+ r600_cp_init_ring_buffer(dev, dev_priv, file_priv);
+ r600_do_engine_reset(dev);
+
+ return 0;
+}
+
+/* Wait for the CP to go idle.
+ */
+int r600_do_cp_idle(drm_radeon_private_t *dev_priv)
+{
+ RING_LOCALS;
+ DRM_DEBUG("\n");
+
+ BEGIN_RING(5);
+ OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
+ OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
+ /* wait for 3D idle clean */
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
+ OUT_RING((R600_WAIT_UNTIL - R600_SET_CONFIG_REG_OFFSET) >> 2);
+ OUT_RING(RADEON_WAIT_3D_IDLE | RADEON_WAIT_3D_IDLECLEAN);
+
+ ADVANCE_RING();
+ COMMIT_RING();
+
+ return r600_do_wait_for_idle(dev_priv);
+}
+
+/* Start the Command Processor.
+ */
+void r600_do_cp_start(drm_radeon_private_t *dev_priv)
+{
+ u32 cp_me;
+ RING_LOCALS;
+ DRM_DEBUG("\n");
+
+ BEGIN_RING(7);
+ OUT_RING(CP_PACKET3(R600_IT_ME_INITIALIZE, 5));
+ OUT_RING(0x00000001);
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770))
+ OUT_RING(0x00000003);
+ else
+ OUT_RING(0x00000000);
+ OUT_RING((dev_priv->r600_max_hw_contexts - 1));
+ OUT_RING(R600_ME_INITIALIZE_DEVICE_ID(1));
+ OUT_RING(0x00000000);
+ OUT_RING(0x00000000);
+ ADVANCE_RING();
+ COMMIT_RING();
+
+ /* set the mux and reset the halt bit */
+ cp_me = 0xff;
+ RADEON_WRITE(R600_CP_ME_CNTL, cp_me);
+
+ dev_priv->cp_running = 1;
+
+}
+
+void r600_do_cp_reset(drm_radeon_private_t *dev_priv)
+{
+ u32 cur_read_ptr;
+ DRM_DEBUG("\n");
+
+ cur_read_ptr = RADEON_READ(R600_CP_RB_RPTR);
+ RADEON_WRITE(R600_CP_RB_WPTR, cur_read_ptr);
+ SET_RING_HEAD(dev_priv, cur_read_ptr);
+ dev_priv->ring.tail = cur_read_ptr;
+}
+
+void r600_do_cp_stop(drm_radeon_private_t *dev_priv)
+{
+ uint32_t cp_me;
+
+ DRM_DEBUG("\n");
+
+ cp_me = 0xff | R600_CP_ME_HALT;
+
+ RADEON_WRITE(R600_CP_ME_CNTL, cp_me);
+
+ dev_priv->cp_running = 0;
+}
+
+int r600_cp_dispatch_indirect(struct drm_device *dev,
+ struct drm_buf *buf, int start, int end)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+ if (start != end) {
+ unsigned long offset = (dev_priv->gart_buffers_offset
+ + buf->offset + start);
+ int dwords = (end - start + 3) / sizeof(u32);
+
+ DRM_DEBUG("dwords:%d\n", dwords);
+ DRM_DEBUG("offset 0x%lx\n", offset);
+
+
+ /* Indirect buffer data must be a multiple of 16 dwords.
+ * pad the data with a Type-2 CP packet.
+ */
+ while (dwords & 0xf) {
+ u32 *data = (u32 *)
+ ((char *)dev->agp_buffer_map->handle
+ + buf->offset + start);
+ data[dwords++] = RADEON_CP_PACKET2;
+ }
+
+ /* Fire off the indirect buffer */
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET3(R600_IT_INDIRECT_BUFFER, 2));
+ OUT_RING((offset & 0xfffffffc));
+ OUT_RING((upper_32_bits(offset) & 0xff));
+ OUT_RING(dwords);
+ ADVANCE_RING();
+ }
+
+ return 0;
+}
+
+void r600_cp_dispatch_swap(struct drm_device *dev, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_master *master = file_priv->master;
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+ int nbox = sarea_priv->nbox;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
+ int i, cpp, src_pitch, dst_pitch;
+ uint64_t src, dst;
+ RING_LOCALS;
+ DRM_DEBUG("\n");
+
+ if (dev_priv->color_fmt == RADEON_COLOR_FORMAT_ARGB8888)
+ cpp = 4;
+ else
+ cpp = 2;
+
+ if (sarea_priv->pfCurrentPage == 0) {
+ src_pitch = dev_priv->back_pitch;
+ dst_pitch = dev_priv->front_pitch;
+ src = dev_priv->back_offset + dev_priv->fb_location;
+ dst = dev_priv->front_offset + dev_priv->fb_location;
+ } else {
+ src_pitch = dev_priv->front_pitch;
+ dst_pitch = dev_priv->back_pitch;
+ src = dev_priv->front_offset + dev_priv->fb_location;
+ dst = dev_priv->back_offset + dev_priv->fb_location;
+ }
+
+ if (r600_prepare_blit_copy(dev, file_priv)) {
+ DRM_ERROR("unable to allocate vertex buffer for swap buffer\n");
+ return;
+ }
+ for (i = 0; i < nbox; i++) {
+ int x = pbox[i].x1;
+ int y = pbox[i].y1;
+ int w = pbox[i].x2 - x;
+ int h = pbox[i].y2 - y;
+
+ DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
+
+ r600_blit_swap(dev,
+ src, dst,
+ x, y, x, y, w, h,
+ src_pitch, dst_pitch, cpp);
+ }
+ r600_done_blit_copy(dev);
+
+ /* Increment the frame counter. The client-side 3D driver must
+ * throttle the framerate by waiting for this value before
+ * performing the swapbuffer ioctl.
+ */
+ sarea_priv->last_frame++;
+
+ BEGIN_RING(3);
+ R600_FRAME_AGE(sarea_priv->last_frame);
+ ADVANCE_RING();
+}
+
+int r600_cp_dispatch_texture(struct drm_device *dev,
+ struct drm_file *file_priv,
+ drm_radeon_texture_t *tex,
+ drm_radeon_tex_image_t *image)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_buf *buf;
+ u32 *buffer;
+ const u8 __user *data;
+ int size, pass_size;
+ u64 src_offset, dst_offset;
+
+ if (!radeon_check_offset(dev_priv, tex->offset)) {
+ DRM_ERROR("Invalid destination offset\n");
+ return -EINVAL;
+ }
+
+ /* this might fail for zero-sized uploads - are those illegal? */
+ if (!radeon_check_offset(dev_priv, tex->offset + tex->height * tex->pitch - 1)) {
+ DRM_ERROR("Invalid final destination offset\n");
+ return -EINVAL;
+ }
+
+ size = tex->height * tex->pitch;
+
+ if (size == 0)
+ return 0;
+
+ dst_offset = tex->offset;
+
+ if (r600_prepare_blit_copy(dev, file_priv)) {
+ DRM_ERROR("unable to allocate vertex buffer for swap buffer\n");
+ return -EAGAIN;
+ }
+ do {
+ data = (const u8 __user *)image->data;
+ pass_size = size;
+
+ buf = radeon_freelist_get(dev);
+ if (!buf) {
+ DRM_DEBUG("EAGAIN\n");
+ if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
+ return -EFAULT;
+ return -EAGAIN;
+ }
+
+ if (pass_size > buf->total)
+ pass_size = buf->total;
+
+ /* Dispatch the indirect buffer.
+ */
+ buffer =
+ (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
+
+ if (DRM_COPY_FROM_USER(buffer, data, pass_size)) {
+ DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size);
+ return -EFAULT;
+ }
+
+ buf->file_priv = file_priv;
+ buf->used = pass_size;
+ src_offset = dev_priv->gart_buffers_offset + buf->offset;
+
+ r600_blit_copy(dev, src_offset, dst_offset, pass_size);
+
+ radeon_cp_discard_buffer(dev, file_priv->master, buf);
+
+ /* Update the input parameters for next time */
+ image->data = (const u8 __user *)image->data + pass_size;
+ dst_offset += pass_size;
+ size -= pass_size;
+ } while (size > 0);
+ r600_done_blit_copy(dev);
+
+ return 0;
+}
+
+/*
+ * Legacy cs ioctl
+ */
+static u32 radeon_cs_id_get(struct drm_radeon_private *radeon)
+{
+ /* FIXME: check if wrap affect last reported wrap & sequence */
+ radeon->cs_id_scnt = (radeon->cs_id_scnt + 1) & 0x00FFFFFF;
+ if (!radeon->cs_id_scnt) {
+ /* increment wrap counter */
+ radeon->cs_id_wcnt += 0x01000000;
+ /* valid sequence counter start at 1 */
+ radeon->cs_id_scnt = 1;
+ }
+ return (radeon->cs_id_scnt | radeon->cs_id_wcnt);
+}
+
+static void r600_cs_id_emit(drm_radeon_private_t *dev_priv, u32 *id)
+{
+ RING_LOCALS;
+
+ *id = radeon_cs_id_get(dev_priv);
+
+ /* SCRATCH 2 */
+ BEGIN_RING(3);
+ R600_CLEAR_AGE(*id);
+ ADVANCE_RING();
+ COMMIT_RING();
+}
+
+static int r600_ib_get(struct drm_device *dev,
+ struct drm_file *fpriv,
+ struct drm_buf **buffer)
+{
+ struct drm_buf *buf;
+
+ *buffer = NULL;
+ buf = radeon_freelist_get(dev);
+ if (!buf) {
+ return -EBUSY;
+ }
+ buf->file_priv = fpriv;
+ *buffer = buf;
+ return 0;
+}
+
+static void r600_ib_free(struct drm_device *dev, struct drm_buf *buf,
+ struct drm_file *fpriv, int l, int r)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ if (buf) {
+ if (!r)
+ r600_cp_dispatch_indirect(dev, buf, 0, l * 4);
+ radeon_cp_discard_buffer(dev, fpriv->master, buf);
+ COMMIT_RING();
+ }
+}
+
+int r600_cs_legacy_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv)
+{
+ struct drm_radeon_private *dev_priv = dev->dev_private;
+ struct drm_radeon_cs *cs = data;
+ struct drm_buf *buf;
+ unsigned family;
+ int l, r = 0;
+ u32 *ib, cs_id = 0;
+
+ if (dev_priv == NULL) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+ family = dev_priv->flags & RADEON_FAMILY_MASK;
+ if (family < CHIP_R600) {
+ DRM_ERROR("cs ioctl valid only for R6XX & R7XX in legacy mode\n");
+ return -EINVAL;
+ }
+ mutex_lock(&dev_priv->cs_mutex);
+ /* get ib */
+ r = r600_ib_get(dev, fpriv, &buf);
+ if (r) {
+ DRM_ERROR("ib_get failed\n");
+ goto out;
+ }
+ ib = dev->agp_buffer_map->handle + buf->offset;
+ /* now parse command stream */
+ r = r600_cs_legacy(dev, data, fpriv, family, ib, &l);
+ if (r) {
+ goto out;
+ }
+
+out:
+ r600_ib_free(dev, buf, fpriv, l, r);
+ /* emit cs id sequence */
+ r600_cs_id_emit(dev_priv, &cs_id);
+ cs->cs_id = cs_id;
+ mutex_unlock(&dev_priv->cs_mutex);
+ return r;
+}
+
+void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size)
+{
+ struct drm_radeon_private *dev_priv = dev->dev_private;
+
+ *npipes = dev_priv->r600_npipes;
+ *nbanks = dev_priv->r600_nbanks;
+ *group_size = dev_priv->r600_group_size;
+}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
new file mode 100644
index 0000000..2dc3b57
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -0,0 +1,2621 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <linux/kernel.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "r600d.h"
+#include "r600_reg_safe.h"
+
+static int r600_nomm;
+extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
+
+
+struct r600_cs_track {
+ /* configuration we miror so that we use same code btw kms/ums */
+ u32 group_size;
+ u32 nbanks;
+ u32 npipes;
+ /* value we track */
+ u32 sq_config;
+ u32 log_nsamples;
+ u32 nsamples;
+ u32 cb_color_base_last[8];
+ struct radeon_bo *cb_color_bo[8];
+ u64 cb_color_bo_mc[8];
+ u64 cb_color_bo_offset[8];
+ struct radeon_bo *cb_color_frag_bo[8];
+ u64 cb_color_frag_offset[8];
+ struct radeon_bo *cb_color_tile_bo[8];
+ u64 cb_color_tile_offset[8];
+ u32 cb_color_mask[8];
+ u32 cb_color_info[8];
+ u32 cb_color_view[8];
+ u32 cb_color_size_idx[8]; /* unused */
+ u32 cb_target_mask;
+ u32 cb_shader_mask; /* unused */
+ bool is_resolve;
+ u32 cb_color_size[8];
+ u32 vgt_strmout_en;
+ u32 vgt_strmout_buffer_en;
+ struct radeon_bo *vgt_strmout_bo[4];
+ u64 vgt_strmout_bo_mc[4]; /* unused */
+ u32 vgt_strmout_bo_offset[4];
+ u32 vgt_strmout_size[4];
+ u32 db_depth_control;
+ u32 db_depth_info;
+ u32 db_depth_size_idx;
+ u32 db_depth_view;
+ u32 db_depth_size;
+ u32 db_offset;
+ struct radeon_bo *db_bo;
+ u64 db_bo_mc;
+ bool sx_misc_kill_all_prims;
+ bool cb_dirty;
+ bool db_dirty;
+ bool streamout_dirty;
+ struct radeon_bo *htile_bo;
+ u64 htile_offset;
+ u32 htile_surface;
+};
+
+#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
+#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 }
+#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 }
+#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 }
+#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 }
+#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 }
+#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 }
+#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
+
+struct gpu_formats {
+ unsigned blockwidth;
+ unsigned blockheight;
+ unsigned blocksize;
+ unsigned valid_color;
+ enum radeon_family min_family;
+};
+
+static const struct gpu_formats color_formats_table[] = {
+ /* 8 bit */
+ FMT_8_BIT(V_038004_COLOR_8, 1),
+ FMT_8_BIT(V_038004_COLOR_4_4, 1),
+ FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
+ FMT_8_BIT(V_038004_FMT_1, 0),
+
+ /* 16-bit */
+ FMT_16_BIT(V_038004_COLOR_16, 1),
+ FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
+ FMT_16_BIT(V_038004_COLOR_8_8, 1),
+ FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
+ FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
+ FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
+ FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
+ FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
+
+ /* 24-bit */
+ FMT_24_BIT(V_038004_FMT_8_8_8),
+
+ /* 32-bit */
+ FMT_32_BIT(V_038004_COLOR_32, 1),
+ FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_16_16, 1),
+ FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_8_24, 1),
+ FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_24_8, 1),
+ FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
+ FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
+ FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
+ FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
+ FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
+ FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
+ FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
+ FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
+
+ /* 48-bit */
+ FMT_48_BIT(V_038004_FMT_16_16_16),
+ FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
+
+ /* 64-bit */
+ FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
+ FMT_64_BIT(V_038004_COLOR_32_32, 1),
+ FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
+ FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
+ FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
+
+ FMT_96_BIT(V_038004_FMT_32_32_32),
+ FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
+
+ /* 128-bit */
+ FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
+ FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
+
+ [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
+ [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
+
+ /* block compressed formats */
+ [V_038004_FMT_BC1] = { 4, 4, 8, 0 },
+ [V_038004_FMT_BC2] = { 4, 4, 16, 0 },
+ [V_038004_FMT_BC3] = { 4, 4, 16, 0 },
+ [V_038004_FMT_BC4] = { 4, 4, 8, 0 },
+ [V_038004_FMT_BC5] = { 4, 4, 16, 0},
+ [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
+ [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
+
+ /* The other Evergreen formats */
+ [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
+};
+
+bool r600_fmt_is_valid_color(u32 format)
+{
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return false;
+
+ if (color_formats_table[format].valid_color)
+ return true;
+
+ return false;
+}
+
+bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
+{
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return false;
+
+ if (family < color_formats_table[format].min_family)
+ return false;
+
+ if (color_formats_table[format].blockwidth > 0)
+ return true;
+
+ return false;
+}
+
+int r600_fmt_get_blocksize(u32 format)
+{
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return 0;
+
+ return color_formats_table[format].blocksize;
+}
+
+int r600_fmt_get_nblocksx(u32 format, u32 w)
+{
+ unsigned bw;
+
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return 0;
+
+ bw = color_formats_table[format].blockwidth;
+ if (bw == 0)
+ return 0;
+
+ return (w + bw - 1) / bw;
+}
+
+int r600_fmt_get_nblocksy(u32 format, u32 h)
+{
+ unsigned bh;
+
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return 0;
+
+ bh = color_formats_table[format].blockheight;
+ if (bh == 0)
+ return 0;
+
+ return (h + bh - 1) / bh;
+}
+
+struct array_mode_checker {
+ int array_mode;
+ u32 group_size;
+ u32 nbanks;
+ u32 npipes;
+ u32 nsamples;
+ u32 blocksize;
+};
+
+/* returns alignment in pixels for pitch/height/depth and bytes for base */
+static int r600_get_array_mode_alignment(struct array_mode_checker *values,
+ u32 *pitch_align,
+ u32 *height_align,
+ u32 *depth_align,
+ u64 *base_align)
+{
+ u32 tile_width = 8;
+ u32 tile_height = 8;
+ u32 macro_tile_width = values->nbanks;
+ u32 macro_tile_height = values->npipes;
+ u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
+ u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
+
+ switch (values->array_mode) {
+ case ARRAY_LINEAR_GENERAL:
+ /* technically tile_width/_height for pitch/height */
+ *pitch_align = 1; /* tile_width */
+ *height_align = 1; /* tile_height */
+ *depth_align = 1;
+ *base_align = 1;
+ break;
+ case ARRAY_LINEAR_ALIGNED:
+ *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
+ *height_align = 1;
+ *depth_align = 1;
+ *base_align = values->group_size;
+ break;
+ case ARRAY_1D_TILED_THIN1:
+ *pitch_align = max((u32)tile_width,
+ (u32)(values->group_size /
+ (tile_height * values->blocksize * values->nsamples)));
+ *height_align = tile_height;
+ *depth_align = 1;
+ *base_align = values->group_size;
+ break;
+ case ARRAY_2D_TILED_THIN1:
+ *pitch_align = max((u32)macro_tile_width * tile_width,
+ (u32)((values->group_size * values->nbanks) /
+ (values->blocksize * values->nsamples * tile_width)));
+ *height_align = macro_tile_height * tile_height;
+ *depth_align = 1;
+ *base_align = max(macro_tile_bytes,
+ (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void r600_cs_track_init(struct r600_cs_track *track)
+{
+ int i;
+
+ /* assume DX9 mode */
+ track->sq_config = DX9_CONSTS;
+ for (i = 0; i < 8; i++) {
+ track->cb_color_base_last[i] = 0;
+ track->cb_color_size[i] = 0;
+ track->cb_color_size_idx[i] = 0;
+ track->cb_color_info[i] = 0;
+ track->cb_color_view[i] = 0xFFFFFFFF;
+ track->cb_color_bo[i] = NULL;
+ track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+ track->cb_color_bo_mc[i] = 0xFFFFFFFF;
+ track->cb_color_frag_bo[i] = NULL;
+ track->cb_color_frag_offset[i] = 0xFFFFFFFF;
+ track->cb_color_tile_bo[i] = NULL;
+ track->cb_color_tile_offset[i] = 0xFFFFFFFF;
+ track->cb_color_mask[i] = 0xFFFFFFFF;
+ }
+ track->is_resolve = false;
+ track->nsamples = 16;
+ track->log_nsamples = 4;
+ track->cb_target_mask = 0xFFFFFFFF;
+ track->cb_shader_mask = 0xFFFFFFFF;
+ track->cb_dirty = true;
+ track->db_bo = NULL;
+ track->db_bo_mc = 0xFFFFFFFF;
+ /* assume the biggest format and that htile is enabled */
+ track->db_depth_info = 7 | (1 << 25);
+ track->db_depth_view = 0xFFFFC000;
+ track->db_depth_size = 0xFFFFFFFF;
+ track->db_depth_size_idx = 0;
+ track->db_depth_control = 0xFFFFFFFF;
+ track->db_dirty = true;
+ track->htile_bo = NULL;
+ track->htile_offset = 0xFFFFFFFF;
+ track->htile_surface = 0;
+
+ for (i = 0; i < 4; i++) {
+ track->vgt_strmout_size[i] = 0;
+ track->vgt_strmout_bo[i] = NULL;
+ track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
+ track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
+ }
+ track->streamout_dirty = true;
+ track->sx_misc_kill_all_prims = false;
+}
+
+static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+{
+ struct r600_cs_track *track = p->track;
+ u32 slice_tile_max, size, tmp;
+ u32 height, height_align, pitch, pitch_align, depth_align;
+ u64 base_offset, base_align;
+ struct array_mode_checker array_check;
+ volatile u32 *ib = p->ib.ptr;
+ unsigned array_mode;
+ u32 format;
+ /* When resolve is used, the second colorbuffer has always 1 sample. */
+ unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
+
+ size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
+ format = G_0280A0_FORMAT(track->cb_color_info[i]);
+ if (!r600_fmt_is_valid_color(format)) {
+ dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
+ __func__, __LINE__, format,
+ i, track->cb_color_info[i]);
+ return -EINVAL;
+ }
+ /* pitch in pixels */
+ pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
+ slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
+ slice_tile_max *= 64;
+ height = slice_tile_max / pitch;
+ if (height > 8192)
+ height = 8192;
+ array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
+
+ base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
+ array_check.array_mode = array_mode;
+ array_check.group_size = track->group_size;
+ array_check.nbanks = track->nbanks;
+ array_check.npipes = track->npipes;
+ array_check.nsamples = nsamples;
+ array_check.blocksize = r600_fmt_get_blocksize(format);
+ if (r600_get_array_mode_alignment(&array_check,
+ &pitch_align, &height_align, &depth_align, &base_align)) {
+ dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+ G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+ track->cb_color_info[i]);
+ return -EINVAL;
+ }
+ switch (array_mode) {
+ case V_0280A0_ARRAY_LINEAR_GENERAL:
+ break;
+ case V_0280A0_ARRAY_LINEAR_ALIGNED:
+ break;
+ case V_0280A0_ARRAY_1D_TILED_THIN1:
+ /* avoid breaking userspace */
+ if (height > 7)
+ height &= ~0x7;
+ break;
+ case V_0280A0_ARRAY_2D_TILED_THIN1:
+ break;
+ default:
+ dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+ G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+ track->cb_color_info[i]);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, pitch, pitch_align, array_mode);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(height, height_align)) {
+ dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, height, height_align, array_mode);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(base_offset, base_align)) {
+ dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
+ base_offset, base_align, array_mode);
+ return -EINVAL;
+ }
+
+ /* check offset */
+ tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
+ r600_fmt_get_blocksize(format) * nsamples;
+ switch (array_mode) {
+ default:
+ case V_0280A0_ARRAY_LINEAR_GENERAL:
+ case V_0280A0_ARRAY_LINEAR_ALIGNED:
+ tmp += track->cb_color_view[i] & 0xFF;
+ break;
+ case V_0280A0_ARRAY_1D_TILED_THIN1:
+ case V_0280A0_ARRAY_2D_TILED_THIN1:
+ tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
+ break;
+ }
+ if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
+ if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
+ /* the initial DDX does bad things with the CB size occasionally */
+ /* it rounds up height too far for slice tile max but the BO is smaller */
+ /* r600c,g also seem to flush at bad times in some apps resulting in
+ * bogus values here. So for linear just allow anything to avoid breaking
+ * broken userspace.
+ */
+ } else {
+ dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
+ __func__, i, array_mode,
+ track->cb_color_bo_offset[i], tmp,
+ radeon_bo_size(track->cb_color_bo[i]),
+ pitch, height, r600_fmt_get_nblocksx(format, pitch),
+ r600_fmt_get_nblocksy(format, height),
+ r600_fmt_get_blocksize(format));
+ return -EINVAL;
+ }
+ }
+ /* limit max tile */
+ tmp = (height * pitch) >> 6;
+ if (tmp < slice_tile_max)
+ slice_tile_max = tmp;
+ tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
+ S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
+ ib[track->cb_color_size_idx[i]] = tmp;
+
+ /* FMASK/CMASK */
+ switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
+ case V_0280A0_TILE_DISABLE:
+ break;
+ case V_0280A0_FRAG_ENABLE:
+ if (track->nsamples > 1) {
+ uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
+ /* the tile size is 8x8, but the size is in units of bits.
+ * for bytes, do just * 8. */
+ uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
+
+ if (bytes + track->cb_color_frag_offset[i] >
+ radeon_bo_size(track->cb_color_frag_bo[i])) {
+ dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
+ "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+ __func__, tile_max, bytes,
+ track->cb_color_frag_offset[i],
+ radeon_bo_size(track->cb_color_frag_bo[i]));
+ return -EINVAL;
+ }
+ }
+ /* fall through */
+ case V_0280A0_CLEAR_ENABLE:
+ {
+ uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
+ /* One block = 128x128 pixels, one 8x8 tile has 4 bits..
+ * (128*128) / (8*8) / 2 = 128 bytes per block. */
+ uint32_t bytes = (block_max + 1) * 128;
+
+ if (bytes + track->cb_color_tile_offset[i] >
+ radeon_bo_size(track->cb_color_tile_bo[i])) {
+ dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
+ "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+ __func__, block_max, bytes,
+ track->cb_color_tile_offset[i],
+ radeon_bo_size(track->cb_color_tile_bo[i]));
+ return -EINVAL;
+ }
+ break;
+ }
+ default:
+ dev_warn(p->dev, "%s invalid tile mode\n", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
+{
+ struct r600_cs_track *track = p->track;
+ u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
+ u32 height_align, pitch_align, depth_align;
+ u32 pitch = 8192;
+ u32 height = 8192;
+ u64 base_offset, base_align;
+ struct array_mode_checker array_check;
+ int array_mode;
+ volatile u32 *ib = p->ib.ptr;
+
+
+ if (track->db_bo == NULL) {
+ dev_warn(p->dev, "z/stencil with no depth buffer\n");
+ return -EINVAL;
+ }
+ switch (G_028010_FORMAT(track->db_depth_info)) {
+ case V_028010_DEPTH_16:
+ bpe = 2;
+ break;
+ case V_028010_DEPTH_X8_24:
+ case V_028010_DEPTH_8_24:
+ case V_028010_DEPTH_X8_24_FLOAT:
+ case V_028010_DEPTH_8_24_FLOAT:
+ case V_028010_DEPTH_32_FLOAT:
+ bpe = 4;
+ break;
+ case V_028010_DEPTH_X24_8_32_FLOAT:
+ bpe = 8;
+ break;
+ default:
+ dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
+ return -EINVAL;
+ }
+ if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+ if (!track->db_depth_size_idx) {
+ dev_warn(p->dev, "z/stencil buffer size not set\n");
+ return -EINVAL;
+ }
+ tmp = radeon_bo_size(track->db_bo) - track->db_offset;
+ tmp = (tmp / bpe) >> 6;
+ if (!tmp) {
+ dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
+ track->db_depth_size, bpe, track->db_offset,
+ radeon_bo_size(track->db_bo));
+ return -EINVAL;
+ }
+ ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
+ } else {
+ size = radeon_bo_size(track->db_bo);
+ /* pitch in pixels */
+ pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
+ slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+ slice_tile_max *= 64;
+ height = slice_tile_max / pitch;
+ if (height > 8192)
+ height = 8192;
+ base_offset = track->db_bo_mc + track->db_offset;
+ array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
+ array_check.array_mode = array_mode;
+ array_check.group_size = track->group_size;
+ array_check.nbanks = track->nbanks;
+ array_check.npipes = track->npipes;
+ array_check.nsamples = track->nsamples;
+ array_check.blocksize = bpe;
+ if (r600_get_array_mode_alignment(&array_check,
+ &pitch_align, &height_align, &depth_align, &base_align)) {
+ dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
+ return -EINVAL;
+ }
+ switch (array_mode) {
+ case V_028010_ARRAY_1D_TILED_THIN1:
+ /* don't break userspace */
+ height &= ~0x7;
+ break;
+ case V_028010_ARRAY_2D_TILED_THIN1:
+ break;
+ default:
+ dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, pitch, pitch_align, array_mode);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(height, height_align)) {
+ dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, height, height_align, array_mode);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(base_offset, base_align)) {
+ dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
+ base_offset, base_align, array_mode);
+ return -EINVAL;
+ }
+
+ ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+ nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
+ tmp = ntiles * bpe * 64 * nviews * track->nsamples;
+ if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
+ dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
+ array_mode,
+ track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+ radeon_bo_size(track->db_bo));
+ return -EINVAL;
+ }
+ }
+
+ /* hyperz */
+ if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
+ unsigned long size;
+ unsigned nbx, nby;
+
+ if (track->htile_bo == NULL) {
+ dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
+ __func__, __LINE__, track->db_depth_info);
+ return -EINVAL;
+ }
+ if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+ dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
+ __func__, __LINE__, track->db_depth_size);
+ return -EINVAL;
+ }
+
+ nbx = pitch;
+ nby = height;
+ if (G_028D24_LINEAR(track->htile_surface)) {
+ /* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
+ nbx = round_up(nbx, 16 * 8);
+ /* nby is npipes htiles aligned == npipes * 8 pixel aligned */
+ nby = round_up(nby, track->npipes * 8);
+ } else {
+ /* always assume 8x8 htile */
+ /* align is htile align * 8, htile align vary according to
+ * number of pipe and tile width and nby
+ */
+ switch (track->npipes) {
+ case 8:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 64 * 8);
+ break;
+ case 4:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ case 2:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ case 1:
+ /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 16 * 8);
+ break;
+ default:
+ dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
+ __func__, __LINE__, track->npipes);
+ return -EINVAL;
+ }
+ }
+ /* compute number of htile */
+ nbx = nbx >> 3;
+ nby = nby >> 3;
+ /* size must be aligned on npipes * 2K boundary */
+ size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
+ size += track->htile_offset;
+
+ if (size > radeon_bo_size(track->htile_bo)) {
+ dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
+ __func__, __LINE__, radeon_bo_size(track->htile_bo),
+ size, nbx, nby);
+ return -EINVAL;
+ }
+ }
+
+ track->db_dirty = false;
+ return 0;
+}
+
+static int r600_cs_track_check(struct radeon_cs_parser *p)
+{
+ struct r600_cs_track *track = p->track;
+ u32 tmp;
+ int r, i;
+
+ /* on legacy kernel we don't perform advanced check */
+ if (p->rdev == NULL)
+ return 0;
+
+ /* check streamout */
+ if (track->streamout_dirty && track->vgt_strmout_en) {
+ for (i = 0; i < 4; i++) {
+ if (track->vgt_strmout_buffer_en & (1 << i)) {
+ if (track->vgt_strmout_bo[i]) {
+ u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
+ (u64)track->vgt_strmout_size[i];
+ if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
+ DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
+ i, offset,
+ radeon_bo_size(track->vgt_strmout_bo[i]));
+ return -EINVAL;
+ }
+ } else {
+ dev_warn(p->dev, "No buffer for streamout %d\n", i);
+ return -EINVAL;
+ }
+ }
+ }
+ track->streamout_dirty = false;
+ }
+
+ if (track->sx_misc_kill_all_prims)
+ return 0;
+
+ /* check that we have a cb for each enabled target, we don't check
+ * shader_mask because it seems mesa isn't always setting it :(
+ */
+ if (track->cb_dirty) {
+ tmp = track->cb_target_mask;
+
+ /* We must check both colorbuffers for RESOLVE. */
+ if (track->is_resolve) {
+ tmp |= 0xff;
+ }
+
+ for (i = 0; i < 8; i++) {
+ if ((tmp >> (i * 4)) & 0xF) {
+ /* at least one component is enabled */
+ if (track->cb_color_bo[i] == NULL) {
+ dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+ __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+ return -EINVAL;
+ }
+ /* perform rewrite of CB_COLOR[0-7]_SIZE */
+ r = r600_cs_track_validate_cb(p, i);
+ if (r)
+ return r;
+ }
+ }
+ track->cb_dirty = false;
+ }
+
+ /* Check depth buffer */
+ if (track->db_dirty &&
+ G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
+ (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
+ G_028800_Z_ENABLE(track->db_depth_control))) {
+ r = r600_cs_track_validate_db(p);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * r600_cs_packet_parse_vline() - parse userspace VLINE packet
+ * @parser: parser structure holding parsing context.
+ *
+ * This is an R600-specific function for parsing VLINE packets.
+ * Real work is done by r600_cs_common_vline_parse function.
+ * Here we just set up ASIC-specific register table and call
+ * the common implementation function.
+ */
+static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
+{
+ static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END,
+ AVIVO_D2MODE_VLINE_START_END};
+ static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS,
+ AVIVO_D2MODE_VLINE_STATUS};
+
+ return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
+}
+
+/**
+ * r600_cs_common_vline_parse() - common vline parser
+ * @parser: parser structure holding parsing context.
+ * @vline_start_end: table of vline_start_end registers
+ * @vline_status: table of vline_status registers
+ *
+ * Userspace sends a special sequence for VLINE waits.
+ * PACKET0 - VLINE_START_END + value
+ * PACKET3 - WAIT_REG_MEM poll vline status reg
+ * RELOC (P3) - crtc_id in reloc.
+ *
+ * This function parses this and relocates the VLINE START END
+ * and WAIT_REG_MEM packets to the correct crtc.
+ * It also detects a switched off crtc and nulls out the
+ * wait in that case. This function is common for all ASICs that
+ * are R600 and newer. The parsing algorithm is the same, and only
+ * differs in which registers are used.
+ *
+ * Caller is the ASIC-specific function which passes the parser
+ * context and ASIC-specific register table
+ */
+int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
+ uint32_t *vline_start_end,
+ uint32_t *vline_status)
+{
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ struct radeon_cs_packet p3reloc, wait_reg_mem;
+ int crtc_id;
+ int r;
+ uint32_t header, h_idx, reg, wait_reg_mem_info;
+ volatile uint32_t *ib;
+
+ ib = p->ib.ptr;
+
+ /* parse the WAIT_REG_MEM */
+ r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx);
+ if (r)
+ return r;
+
+ /* check its a WAIT_REG_MEM */
+ if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
+ wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
+ DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
+ return -EINVAL;
+ }
+
+ wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
+ /* bit 4 is reg (0) or mem (1) */
+ if (wait_reg_mem_info & 0x10) {
+ DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
+ return -EINVAL;
+ }
+ /* bit 8 is me (0) or pfp (1) */
+ if (wait_reg_mem_info & 0x100) {
+ DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
+ return -EINVAL;
+ }
+ /* waiting for value to be equal */
+ if ((wait_reg_mem_info & 0x7) != 0x3) {
+ DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
+ return -EINVAL;
+ }
+ if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
+ DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
+ return -EINVAL;
+ }
+
+ if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
+ DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
+ return -EINVAL;
+ }
+
+ /* jump over the NOP */
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
+ if (r)
+ return r;
+
+ h_idx = p->idx - 2;
+ p->idx += wait_reg_mem.count + 2;
+ p->idx += p3reloc.count + 2;
+
+ header = radeon_get_ib_value(p, h_idx);
+ crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
+ reg = R600_CP_PACKET0_GET_REG(header);
+
+ obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_ERROR("cannot find crtc %d\n", crtc_id);
+ return -EINVAL;
+ }
+ crtc = obj_to_crtc(obj);
+ radeon_crtc = to_radeon_crtc(crtc);
+ crtc_id = radeon_crtc->crtc_id;
+
+ if (!crtc->enabled) {
+ /* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
+ ib[h_idx + 2] = PACKET2(0);
+ ib[h_idx + 3] = PACKET2(0);
+ ib[h_idx + 4] = PACKET2(0);
+ ib[h_idx + 5] = PACKET2(0);
+ ib[h_idx + 6] = PACKET2(0);
+ ib[h_idx + 7] = PACKET2(0);
+ ib[h_idx + 8] = PACKET2(0);
+ } else if (reg == vline_start_end[0]) {
+ header &= ~R600_CP_PACKET0_REG_MASK;
+ header |= vline_start_end[crtc_id] >> 2;
+ ib[h_idx] = header;
+ ib[h_idx + 4] = vline_status[crtc_id] >> 2;
+ } else {
+ DRM_ERROR("unknown crtc reloc\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int r600_packet0_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx, unsigned reg)
+{
+ int r;
+
+ switch (reg) {
+ case AVIVO_D1MODE_VLINE_START_END:
+ r = r600_cs_packet_parse_vline(p);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ return r;
+ }
+ break;
+ default:
+ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
+ reg, idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ unsigned reg, i;
+ unsigned idx;
+ int r;
+
+ idx = pkt->idx + 1;
+ reg = pkt->reg;
+ for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
+ r = r600_packet0_check(p, pkt, idx, reg);
+ if (r) {
+ return r;
+ }
+ }
+ return 0;
+}
+
+/**
+ * r600_cs_check_reg() - check if register is authorized or not
+ * @parser: parser structure holding parsing context
+ * @reg: register we are testing
+ * @idx: index into the cs buffer
+ *
+ * This function will test against r600_reg_safe_bm and return 0
+ * if register is safe. If register is not flag as safe this function
+ * will test it against a list of register needind special handling.
+ */
+static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+ struct r600_cs_track *track = (struct r600_cs_track *)p->track;
+ struct radeon_cs_reloc *reloc;
+ u32 m, i, tmp, *ib;
+ int r;
+
+ i = (reg >> 7);
+ if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ m = 1 << ((reg >> 2) & 31);
+ if (!(r600_reg_safe_bm[i] & m))
+ return 0;
+ ib = p->ib.ptr;
+ switch (reg) {
+ /* force following reg to 0 in an attempt to disable out buffer
+ * which will need us to better understand how it works to perform
+ * security check on it (Jerome)
+ */
+ case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
+ case R_008C44_SQ_ESGS_RING_SIZE:
+ case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
+ case R_008C54_SQ_ESTMP_RING_SIZE:
+ case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
+ case R_008C74_SQ_FBUF_RING_SIZE:
+ case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
+ case R_008C5C_SQ_GSTMP_RING_SIZE:
+ case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
+ case R_008C4C_SQ_GSVS_RING_SIZE:
+ case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
+ case R_008C6C_SQ_PSTMP_RING_SIZE:
+ case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
+ case R_008C7C_SQ_REDUC_RING_SIZE:
+ case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
+ case R_008C64_SQ_VSTMP_RING_SIZE:
+ case R_0288C8_SQ_GS_VERT_ITEMSIZE:
+ /* get value to populate the IB don't remove */
+ tmp =radeon_get_ib_value(p, idx);
+ ib[idx] = 0;
+ break;
+ case SQ_CONFIG:
+ track->sq_config = radeon_get_ib_value(p, idx);
+ break;
+ case R_028800_DB_DEPTH_CONTROL:
+ track->db_depth_control = radeon_get_ib_value(p, idx);
+ track->db_dirty = true;
+ break;
+ case R_028010_DB_DEPTH_INFO:
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
+ radeon_cs_packet_next_is_pkt3_nop(p)) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_depth_info = radeon_get_ib_value(p, idx);
+ ib[idx] &= C_028010_ARRAY_MODE;
+ track->db_depth_info &= C_028010_ARRAY_MODE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
+ track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
+ } else {
+ ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
+ track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
+ }
+ } else {
+ track->db_depth_info = radeon_get_ib_value(p, idx);
+ }
+ track->db_dirty = true;
+ break;
+ case R_028004_DB_DEPTH_VIEW:
+ track->db_depth_view = radeon_get_ib_value(p, idx);
+ track->db_dirty = true;
+ break;
+ case R_028000_DB_DEPTH_SIZE:
+ track->db_depth_size = radeon_get_ib_value(p, idx);
+ track->db_depth_size_idx = idx;
+ track->db_dirty = true;
+ break;
+ case R_028AB0_VGT_STRMOUT_EN:
+ track->vgt_strmout_en = radeon_get_ib_value(p, idx);
+ track->streamout_dirty = true;
+ break;
+ case R_028B20_VGT_STRMOUT_BUFFER_EN:
+ track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
+ track->streamout_dirty = true;
+ break;
+ case VGT_STRMOUT_BUFFER_BASE_0:
+ case VGT_STRMOUT_BUFFER_BASE_1:
+ case VGT_STRMOUT_BUFFER_BASE_2:
+ case VGT_STRMOUT_BUFFER_BASE_3:
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
+ track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->vgt_strmout_bo[tmp] = reloc->robj;
+ track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
+ track->streamout_dirty = true;
+ break;
+ case VGT_STRMOUT_BUFFER_SIZE_0:
+ case VGT_STRMOUT_BUFFER_SIZE_1:
+ case VGT_STRMOUT_BUFFER_SIZE_2:
+ case VGT_STRMOUT_BUFFER_SIZE_3:
+ tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
+ /* size in register is DWs, convert to bytes */
+ track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
+ track->streamout_dirty = true;
+ break;
+ case CP_COHER_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
+ case R_028238_CB_TARGET_MASK:
+ track->cb_target_mask = radeon_get_ib_value(p, idx);
+ track->cb_dirty = true;
+ break;
+ case R_02823C_CB_SHADER_MASK:
+ track->cb_shader_mask = radeon_get_ib_value(p, idx);
+ break;
+ case R_028C04_PA_SC_AA_CONFIG:
+ tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
+ track->log_nsamples = tmp;
+ track->nsamples = 1 << tmp;
+ track->cb_dirty = true;
+ break;
+ case R_028808_CB_COLOR_CONTROL:
+ tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
+ track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
+ track->cb_dirty = true;
+ break;
+ case R_0280A0_CB_COLOR0_INFO:
+ case R_0280A4_CB_COLOR1_INFO:
+ case R_0280A8_CB_COLOR2_INFO:
+ case R_0280AC_CB_COLOR3_INFO:
+ case R_0280B0_CB_COLOR4_INFO:
+ case R_0280B4_CB_COLOR5_INFO:
+ case R_0280B8_CB_COLOR6_INFO:
+ case R_0280BC_CB_COLOR7_INFO:
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
+ radeon_cs_packet_next_is_pkt3_nop(p)) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
+ track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
+ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
+ track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
+ }
+ } else {
+ tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ }
+ track->cb_dirty = true;
+ break;
+ case R_028080_CB_COLOR0_VIEW:
+ case R_028084_CB_COLOR1_VIEW:
+ case R_028088_CB_COLOR2_VIEW:
+ case R_02808C_CB_COLOR3_VIEW:
+ case R_028090_CB_COLOR4_VIEW:
+ case R_028094_CB_COLOR5_VIEW:
+ case R_028098_CB_COLOR6_VIEW:
+ case R_02809C_CB_COLOR7_VIEW:
+ tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
+ track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_dirty = true;
+ break;
+ case R_028060_CB_COLOR0_SIZE:
+ case R_028064_CB_COLOR1_SIZE:
+ case R_028068_CB_COLOR2_SIZE:
+ case R_02806C_CB_COLOR3_SIZE:
+ case R_028070_CB_COLOR4_SIZE:
+ case R_028074_CB_COLOR5_SIZE:
+ case R_028078_CB_COLOR6_SIZE:
+ case R_02807C_CB_COLOR7_SIZE:
+ tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
+ track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_size_idx[tmp] = idx;
+ track->cb_dirty = true;
+ break;
+ /* This register were added late, there is userspace
+ * which does provide relocation for those but set
+ * 0 offset. In order to avoid breaking old userspace
+ * we detect this and set address to point to last
+ * CB_COLOR0_BASE, note that if userspace doesn't set
+ * CB_COLOR0_BASE before this register we will report
+ * error. Old userspace always set CB_COLOR0_BASE
+ * before any of this.
+ */
+ case R_0280E0_CB_COLOR0_FRAG:
+ case R_0280E4_CB_COLOR1_FRAG:
+ case R_0280E8_CB_COLOR2_FRAG:
+ case R_0280EC_CB_COLOR3_FRAG:
+ case R_0280F0_CB_COLOR4_FRAG:
+ case R_0280F4_CB_COLOR5_FRAG:
+ case R_0280F8_CB_COLOR6_FRAG:
+ case R_0280FC_CB_COLOR7_FRAG:
+ tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
+ if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
+ if (!track->cb_color_base_last[tmp]) {
+ dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+ return -EINVAL;
+ }
+ track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
+ track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
+ ib[idx] = track->cb_color_base_last[tmp];
+ } else {
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->cb_color_frag_bo[tmp] = reloc->robj;
+ track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ }
+ if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+ track->cb_dirty = true;
+ }
+ break;
+ case R_0280C0_CB_COLOR0_TILE:
+ case R_0280C4_CB_COLOR1_TILE:
+ case R_0280C8_CB_COLOR2_TILE:
+ case R_0280CC_CB_COLOR3_TILE:
+ case R_0280D0_CB_COLOR4_TILE:
+ case R_0280D4_CB_COLOR5_TILE:
+ case R_0280D8_CB_COLOR6_TILE:
+ case R_0280DC_CB_COLOR7_TILE:
+ tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
+ if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
+ if (!track->cb_color_base_last[tmp]) {
+ dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+ return -EINVAL;
+ }
+ track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
+ track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
+ ib[idx] = track->cb_color_base_last[tmp];
+ } else {
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->cb_color_tile_bo[tmp] = reloc->robj;
+ track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ }
+ if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+ track->cb_dirty = true;
+ }
+ break;
+ case R_028100_CB_COLOR0_MASK:
+ case R_028104_CB_COLOR1_MASK:
+ case R_028108_CB_COLOR2_MASK:
+ case R_02810C_CB_COLOR3_MASK:
+ case R_028110_CB_COLOR4_MASK:
+ case R_028114_CB_COLOR5_MASK:
+ case R_028118_CB_COLOR6_MASK:
+ case R_02811C_CB_COLOR7_MASK:
+ tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
+ track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
+ if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+ track->cb_dirty = true;
+ }
+ break;
+ case CB_COLOR0_BASE:
+ case CB_COLOR1_BASE:
+ case CB_COLOR2_BASE:
+ case CB_COLOR3_BASE:
+ case CB_COLOR4_BASE:
+ case CB_COLOR5_BASE:
+ case CB_COLOR6_BASE:
+ case CB_COLOR7_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - CB_COLOR0_BASE) / 4;
+ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_base_last[tmp] = ib[idx];
+ track->cb_color_bo[tmp] = reloc->robj;
+ track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
+ track->cb_dirty = true;
+ break;
+ case DB_DEPTH_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_offset = radeon_get_ib_value(p, idx) << 8;
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_bo = reloc->robj;
+ track->db_bo_mc = reloc->lobj.gpu_offset;
+ track->db_dirty = true;
+ break;
+ case DB_HTILE_DATA_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->htile_offset = radeon_get_ib_value(p, idx) << 8;
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->htile_bo = reloc->robj;
+ track->db_dirty = true;
+ break;
+ case DB_HTILE_SURFACE:
+ track->htile_surface = radeon_get_ib_value(p, idx);
+ /* force 8x8 htile width and height */
+ ib[idx] |= 3;
+ track->db_dirty = true;
+ break;
+ case SQ_PGM_START_FS:
+ case SQ_PGM_START_ES:
+ case SQ_PGM_START_VS:
+ case SQ_PGM_START_GS:
+ case SQ_PGM_START_PS:
+ case SQ_ALU_CONST_CACHE_GS_0:
+ case SQ_ALU_CONST_CACHE_GS_1:
+ case SQ_ALU_CONST_CACHE_GS_2:
+ case SQ_ALU_CONST_CACHE_GS_3:
+ case SQ_ALU_CONST_CACHE_GS_4:
+ case SQ_ALU_CONST_CACHE_GS_5:
+ case SQ_ALU_CONST_CACHE_GS_6:
+ case SQ_ALU_CONST_CACHE_GS_7:
+ case SQ_ALU_CONST_CACHE_GS_8:
+ case SQ_ALU_CONST_CACHE_GS_9:
+ case SQ_ALU_CONST_CACHE_GS_10:
+ case SQ_ALU_CONST_CACHE_GS_11:
+ case SQ_ALU_CONST_CACHE_GS_12:
+ case SQ_ALU_CONST_CACHE_GS_13:
+ case SQ_ALU_CONST_CACHE_GS_14:
+ case SQ_ALU_CONST_CACHE_GS_15:
+ case SQ_ALU_CONST_CACHE_PS_0:
+ case SQ_ALU_CONST_CACHE_PS_1:
+ case SQ_ALU_CONST_CACHE_PS_2:
+ case SQ_ALU_CONST_CACHE_PS_3:
+ case SQ_ALU_CONST_CACHE_PS_4:
+ case SQ_ALU_CONST_CACHE_PS_5:
+ case SQ_ALU_CONST_CACHE_PS_6:
+ case SQ_ALU_CONST_CACHE_PS_7:
+ case SQ_ALU_CONST_CACHE_PS_8:
+ case SQ_ALU_CONST_CACHE_PS_9:
+ case SQ_ALU_CONST_CACHE_PS_10:
+ case SQ_ALU_CONST_CACHE_PS_11:
+ case SQ_ALU_CONST_CACHE_PS_12:
+ case SQ_ALU_CONST_CACHE_PS_13:
+ case SQ_ALU_CONST_CACHE_PS_14:
+ case SQ_ALU_CONST_CACHE_PS_15:
+ case SQ_ALU_CONST_CACHE_VS_0:
+ case SQ_ALU_CONST_CACHE_VS_1:
+ case SQ_ALU_CONST_CACHE_VS_2:
+ case SQ_ALU_CONST_CACHE_VS_3:
+ case SQ_ALU_CONST_CACHE_VS_4:
+ case SQ_ALU_CONST_CACHE_VS_5:
+ case SQ_ALU_CONST_CACHE_VS_6:
+ case SQ_ALU_CONST_CACHE_VS_7:
+ case SQ_ALU_CONST_CACHE_VS_8:
+ case SQ_ALU_CONST_CACHE_VS_9:
+ case SQ_ALU_CONST_CACHE_VS_10:
+ case SQ_ALU_CONST_CACHE_VS_11:
+ case SQ_ALU_CONST_CACHE_VS_12:
+ case SQ_ALU_CONST_CACHE_VS_13:
+ case SQ_ALU_CONST_CACHE_VS_14:
+ case SQ_ALU_CONST_CACHE_VS_15:
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
+ case SX_MEMORY_EXPORT_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
+ case SX_MISC:
+ track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
+ break;
+ default:
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+unsigned r600_mip_minify(unsigned size, unsigned level)
+{
+ unsigned val;
+
+ val = max(1U, size >> level);
+ if (level > 0)
+ val = roundup_pow_of_two(val);
+ return val;
+}
+
+static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
+ unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
+ unsigned block_align, unsigned height_align, unsigned base_align,
+ unsigned *l0_size, unsigned *mipmap_size)
+{
+ unsigned offset, i, level;
+ unsigned width, height, depth, size;
+ unsigned blocksize;
+ unsigned nbx, nby;
+ unsigned nlevels = llevel - blevel + 1;
+
+ *l0_size = -1;
+ blocksize = r600_fmt_get_blocksize(format);
+
+ w0 = r600_mip_minify(w0, 0);
+ h0 = r600_mip_minify(h0, 0);
+ d0 = r600_mip_minify(d0, 0);
+ for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
+ width = r600_mip_minify(w0, i);
+ nbx = r600_fmt_get_nblocksx(format, width);
+
+ nbx = round_up(nbx, block_align);
+
+ height = r600_mip_minify(h0, i);
+ nby = r600_fmt_get_nblocksy(format, height);
+ nby = round_up(nby, height_align);
+
+ depth = r600_mip_minify(d0, i);
+
+ size = nbx * nby * blocksize * nsamples;
+ if (nfaces)
+ size *= nfaces;
+ else
+ size *= depth;
+
+ if (i == 0)
+ *l0_size = size;
+
+ if (i == 0 || i == 1)
+ offset = round_up(offset, base_align);
+
+ offset += size;
+ }
+ *mipmap_size = offset;
+ if (llevel == 0)
+ *mipmap_size = *l0_size;
+ if (!blevel)
+ *mipmap_size -= *l0_size;
+}
+
+/**
+ * r600_check_texture_resource() - check if register is authorized or not
+ * @p: parser structure holding parsing context
+ * @idx: index into the cs buffer
+ * @texture: texture's bo structure
+ * @mipmap: mipmap's bo structure
+ *
+ * This function will check that the resource has valid field and that
+ * the texture and mipmap bo object are big enough to cover this resource.
+ */
+static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
+ struct radeon_bo *texture,
+ struct radeon_bo *mipmap,
+ u64 base_offset,
+ u64 mip_offset,
+ u32 tiling_flags)
+{
+ struct r600_cs_track *track = p->track;
+ u32 dim, nfaces, llevel, blevel, w0, h0, d0;
+ u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
+ u32 height_align, pitch, pitch_align, depth_align;
+ u32 barray, larray;
+ u64 base_align;
+ struct array_mode_checker array_check;
+ u32 format;
+ bool is_array;
+
+ /* on legacy kernel we don't perform advanced check */
+ if (p->rdev == NULL)
+ return 0;
+
+ /* convert to bytes */
+ base_offset <<= 8;
+ mip_offset <<= 8;
+
+ word0 = radeon_get_ib_value(p, idx + 0);
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (tiling_flags & RADEON_TILING_MACRO)
+ word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+ }
+ word1 = radeon_get_ib_value(p, idx + 1);
+ word2 = radeon_get_ib_value(p, idx + 2) << 8;
+ word3 = radeon_get_ib_value(p, idx + 3) << 8;
+ word4 = radeon_get_ib_value(p, idx + 4);
+ word5 = radeon_get_ib_value(p, idx + 5);
+ dim = G_038000_DIM(word0);
+ w0 = G_038000_TEX_WIDTH(word0) + 1;
+ pitch = (G_038000_PITCH(word0) + 1) * 8;
+ h0 = G_038004_TEX_HEIGHT(word1) + 1;
+ d0 = G_038004_TEX_DEPTH(word1);
+ format = G_038004_DATA_FORMAT(word1);
+ blevel = G_038010_BASE_LEVEL(word4);
+ llevel = G_038014_LAST_LEVEL(word5);
+ /* pitch in texels */
+ array_check.array_mode = G_038000_TILE_MODE(word0);
+ array_check.group_size = track->group_size;
+ array_check.nbanks = track->nbanks;
+ array_check.npipes = track->npipes;
+ array_check.nsamples = 1;
+ array_check.blocksize = r600_fmt_get_blocksize(format);
+ nfaces = 1;
+ is_array = false;
+ switch (dim) {
+ case V_038000_SQ_TEX_DIM_1D:
+ case V_038000_SQ_TEX_DIM_2D:
+ case V_038000_SQ_TEX_DIM_3D:
+ break;
+ case V_038000_SQ_TEX_DIM_CUBEMAP:
+ if (p->family >= CHIP_RV770)
+ nfaces = 8;
+ else
+ nfaces = 6;
+ break;
+ case V_038000_SQ_TEX_DIM_1D_ARRAY:
+ case V_038000_SQ_TEX_DIM_2D_ARRAY:
+ is_array = true;
+ break;
+ case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+ is_array = true;
+ /* fall through */
+ case V_038000_SQ_TEX_DIM_2D_MSAA:
+ array_check.nsamples = 1 << llevel;
+ llevel = 0;
+ break;
+ default:
+ dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
+ return -EINVAL;
+ }
+ if (!r600_fmt_is_valid_texture(format, p->family)) {
+ dev_warn(p->dev, "%s:%d texture invalid format %d\n",
+ __func__, __LINE__, format);
+ return -EINVAL;
+ }
+
+ if (r600_get_array_mode_alignment(&array_check,
+ &pitch_align, &height_align, &depth_align, &base_align)) {
+ dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
+ __func__, __LINE__, G_038000_TILE_MODE(word0));
+ return -EINVAL;
+ }
+
+ /* XXX check height as well... */
+
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(base_offset, base_align)) {
+ dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
+ __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(mip_offset, base_align)) {
+ dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
+ __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
+ return -EINVAL;
+ }
+
+ if (blevel > llevel) {
+ dev_warn(p->dev, "texture blevel %d > llevel %d\n",
+ blevel, llevel);
+ }
+ if (is_array) {
+ barray = G_038014_BASE_ARRAY(word5);
+ larray = G_038014_LAST_ARRAY(word5);
+
+ nfaces = larray - barray + 1;
+ }
+ r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
+ pitch_align, height_align, base_align,
+ &l0_size, &mipmap_size);
+ /* using get ib will give us the offset into the texture bo */
+ if ((l0_size + word2) > radeon_bo_size(texture)) {
+ dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
+ w0, h0, pitch_align, height_align,
+ array_check.array_mode, format, word2,
+ l0_size, radeon_bo_size(texture));
+ dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
+ return -EINVAL;
+ }
+ /* using get ib will give us the offset into the mipmap bo */
+ if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
+ /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
+ w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
+ }
+ return 0;
+}
+
+static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+ u32 m, i;
+
+ i = (reg >> 7);
+ if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return false;
+ }
+ m = 1 << ((reg >> 2) & 31);
+ if (!(r600_reg_safe_bm[i] & m))
+ return true;
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return false;
+}
+
+static int r600_packet3_check(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ struct radeon_cs_reloc *reloc;
+ struct r600_cs_track *track;
+ volatile u32 *ib;
+ unsigned idx;
+ unsigned i;
+ unsigned start_reg, end_reg, reg;
+ int r;
+ u32 idx_value;
+
+ track = (struct r600_cs_track *)p->track;
+ ib = p->ib.ptr;
+ idx = pkt->idx + 1;
+ idx_value = radeon_get_ib_value(p, idx);
+
+ switch (pkt->opcode) {
+ case PACKET3_SET_PREDICATION:
+ {
+ int pred_op;
+ int tmp;
+ uint64_t offset;
+
+ if (pkt->count != 1) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx + 1);
+ pred_op = (tmp >> 16) & 0x7;
+
+ /* for the clear predicate operation */
+ if (pred_op == 0)
+ return 0;
+
+ if (pred_op > 2) {
+ DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+ return -EINVAL;
+ }
+
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ offset = reloc->lobj.gpu_offset +
+ (idx_value & 0xfffffff0) +
+ ((u64)(tmp & 0xff) << 32);
+
+ ib[idx + 0] = offset;
+ ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ }
+ break;
+
+ case PACKET3_START_3D_CMDBUF:
+ if (p->family >= CHIP_RV770 || pkt->count) {
+ DRM_ERROR("bad START_3D\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_CONTEXT_CONTROL:
+ if (pkt->count != 1) {
+ DRM_ERROR("bad CONTEXT_CONTROL\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_INDEX_TYPE:
+ case PACKET3_NUM_INSTANCES:
+ if (pkt->count) {
+ DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_DRAW_INDEX:
+ {
+ uint64_t offset;
+ if (pkt->count != 3) {
+ DRM_ERROR("bad DRAW_INDEX\n");
+ return -EINVAL;
+ }
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ DRM_ERROR("bad DRAW_INDEX\n");
+ return -EINVAL;
+ }
+
+ offset = reloc->lobj.gpu_offset +
+ idx_value +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+ ib[idx+0] = offset;
+ ib[idx+1] = upper_32_bits(offset) & 0xff;
+
+ r = r600_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ }
+ case PACKET3_DRAW_INDEX_AUTO:
+ if (pkt->count != 1) {
+ DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+ return -EINVAL;
+ }
+ r = r600_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ return r;
+ }
+ break;
+ case PACKET3_DRAW_INDEX_IMMD_BE:
+ case PACKET3_DRAW_INDEX_IMMD:
+ if (pkt->count < 2) {
+ DRM_ERROR("bad DRAW_INDEX_IMMD\n");
+ return -EINVAL;
+ }
+ r = r600_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
+ case PACKET3_WAIT_REG_MEM:
+ if (pkt->count != 5) {
+ DRM_ERROR("bad WAIT_REG_MEM\n");
+ return -EINVAL;
+ }
+ /* bit 4 is reg (0) or mem (1) */
+ if (idx_value & 0x10) {
+ uint64_t offset;
+
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ DRM_ERROR("bad WAIT_REG_MEM\n");
+ return -EINVAL;
+ }
+
+ offset = reloc->lobj.gpu_offset +
+ (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+ ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
+ } else if (idx_value & 0x100) {
+ DRM_ERROR("cannot use PFP on REG wait\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_CP_DMA:
+ {
+ u32 command, size;
+ u64 offset, tmp;
+ if (pkt->count != 4) {
+ DRM_ERROR("bad CP DMA\n");
+ return -EINVAL;
+ }
+ command = radeon_get_ib_value(p, idx+4);
+ size = command & 0x1fffff;
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ DRM_ERROR("CP DMA SAS not supported\n");
+ return -EINVAL;
+ } else {
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ DRM_ERROR("CP DMA SAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ /* src address space is memory */
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ DRM_ERROR("bad CP DMA SRC\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx) +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx] = offset;
+ ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ DRM_ERROR("CP DMA DAS not supported\n");
+ return -EINVAL;
+ } else {
+ /* dst address space is memory */
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ DRM_ERROR("CP DMA DAIC only supported for registers\n");
+ return -EINVAL;
+ }
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ DRM_ERROR("bad CP DMA DST\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx+2) +
+ ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+ offset = reloc->lobj.gpu_offset + tmp;
+
+ if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+ dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+
+ ib[idx+2] = offset;
+ ib[idx+3] = upper_32_bits(offset) & 0xff;
+ }
+ break;
+ }
+ case PACKET3_SURFACE_SYNC:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad SURFACE_SYNC\n");
+ return -EINVAL;
+ }
+ /* 0xffffffff/0x0 is flush all cache flag */
+ if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
+ radeon_get_ib_value(p, idx + 2) != 0) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ DRM_ERROR("bad SURFACE_SYNC\n");
+ return -EINVAL;
+ }
+ ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ }
+ break;
+ case PACKET3_EVENT_WRITE:
+ if (pkt->count != 2 && pkt->count != 0) {
+ DRM_ERROR("bad EVENT_WRITE\n");
+ return -EINVAL;
+ }
+ if (pkt->count) {
+ uint64_t offset;
+
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ DRM_ERROR("bad EVENT_WRITE\n");
+ return -EINVAL;
+ }
+ offset = reloc->lobj.gpu_offset +
+ (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+ ib[idx+1] = offset & 0xfffffff8;
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
+ }
+ break;
+ case PACKET3_EVENT_WRITE_EOP:
+ {
+ uint64_t offset;
+
+ if (pkt->count != 4) {
+ DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ return -EINVAL;
+ }
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ DRM_ERROR("bad EVENT_WRITE\n");
+ return -EINVAL;
+ }
+
+ offset = reloc->lobj.gpu_offset +
+ (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+ ib[idx+1] = offset & 0xfffffffc;
+ ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ break;
+ }
+ case PACKET3_SET_CONFIG_REG:
+ start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
+ (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+ (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+ DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ r = r600_cs_check_reg(p, reg, idx+1+i);
+ if (r)
+ return r;
+ }
+ break;
+ case PACKET3_SET_CONTEXT_REG:
+ start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
+ (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
+ (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
+ DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ r = r600_cs_check_reg(p, reg, idx+1+i);
+ if (r)
+ return r;
+ }
+ break;
+ case PACKET3_SET_RESOURCE:
+ if (pkt->count % 7) {
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
+ (start_reg >= PACKET3_SET_RESOURCE_END) ||
+ (end_reg >= PACKET3_SET_RESOURCE_END)) {
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < (pkt->count / 7); i++) {
+ struct radeon_bo *texture, *mipmap;
+ u32 size, offset, base_offset, mip_offset;
+
+ switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
+ case SQ_TEX_VTX_VALID_TEXTURE:
+ /* tex base */
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+ }
+ texture = reloc->robj;
+ /* tex mip base */
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ mipmap = reloc->robj;
+ r = r600_check_texture_resource(p, idx+(i*7)+1,
+ texture, mipmap,
+ base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
+ mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
+ reloc->lobj.tiling_flags);
+ if (r)
+ return r;
+ ib[idx+1+(i*7)+2] += base_offset;
+ ib[idx+1+(i*7)+3] += mip_offset;
+ break;
+ case SQ_TEX_VTX_VALID_BUFFER:
+ {
+ uint64_t offset64;
+ /* vtx base */
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
+ size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
+ if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
+ /* force size to size of the buffer */
+ dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
+ size + offset, radeon_bo_size(reloc->robj));
+ ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
+ }
+
+ offset64 = reloc->lobj.gpu_offset + offset;
+ ib[idx+1+(i*8)+0] = offset64;
+ ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
+ (upper_32_bits(offset64) & 0xff);
+ break;
+ }
+ case SQ_TEX_VTX_INVALID_TEXTURE:
+ case SQ_TEX_VTX_INVALID_BUFFER:
+ default:
+ DRM_ERROR("bad SET_RESOURCE\n");
+ return -EINVAL;
+ }
+ }
+ break;
+ case PACKET3_SET_ALU_CONST:
+ if (track->sq_config & DX9_CONSTS) {
+ start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
+ (start_reg >= PACKET3_SET_ALU_CONST_END) ||
+ (end_reg >= PACKET3_SET_ALU_CONST_END)) {
+ DRM_ERROR("bad SET_ALU_CONST\n");
+ return -EINVAL;
+ }
+ }
+ break;
+ case PACKET3_SET_BOOL_CONST:
+ start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
+ (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
+ (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
+ DRM_ERROR("bad SET_BOOL_CONST\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_LOOP_CONST:
+ start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
+ (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
+ (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
+ DRM_ERROR("bad SET_LOOP_CONST\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_CTL_CONST:
+ start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
+ (start_reg >= PACKET3_SET_CTL_CONST_END) ||
+ (end_reg >= PACKET3_SET_CTL_CONST_END)) {
+ DRM_ERROR("bad SET_CTL_CONST\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_SAMPLER:
+ if (pkt->count % 3) {
+ DRM_ERROR("bad SET_SAMPLER\n");
+ return -EINVAL;
+ }
+ start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
+ (start_reg >= PACKET3_SET_SAMPLER_END) ||
+ (end_reg >= PACKET3_SET_SAMPLER_END)) {
+ DRM_ERROR("bad SET_SAMPLER\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_STRMOUT_BASE_UPDATE:
+ /* RS780 and RS880 also need this */
+ if (p->family < CHIP_RS780) {
+ DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
+ return -EINVAL;
+ }
+ if (pkt->count != 1) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
+ return -EINVAL;
+ }
+ if (idx_value > 3) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
+ return -EINVAL;
+ }
+ {
+ u64 offset;
+
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
+ return -EINVAL;
+ }
+
+ if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
+ return -EINVAL;
+ }
+
+ offset = radeon_get_ib_value(p, idx+1) << 8;
+ if (offset != track->vgt_strmout_bo_offset[idx_value]) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
+ offset, track->vgt_strmout_bo_offset[idx_value]);
+ return -EINVAL;
+ }
+
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ }
+ break;
+ case PACKET3_SURFACE_BASE_UPDATE:
+ if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
+ DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
+ return -EINVAL;
+ }
+ if (pkt->count) {
+ DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_STRMOUT_BUFFER_UPDATE:
+ if (pkt->count != 4) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
+ return -EINVAL;
+ }
+ /* Updating memory at DST_ADDRESS. */
+ if (idx_value & 0x1) {
+ u64 offset;
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+1);
+ offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+1] = offset;
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
+ }
+ /* Reading data from SRC_ADDRESS. */
+ if (((idx_value >> 1) & 0x3) == 2) {
+ u64 offset;
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+3);
+ offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+3] = offset;
+ ib[idx+4] = upper_32_bits(offset) & 0xff;
+ }
+ break;
+ case PACKET3_MEM_WRITE:
+ {
+ u64 offset;
+
+ if (pkt->count != 3) {
+ DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+ return -EINVAL;
+ }
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+0);
+ offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+ if (offset & 0x7) {
+ DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+ return -EINVAL;
+ }
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+0] = offset;
+ ib[idx+1] = upper_32_bits(offset) & 0xff;
+ break;
+ }
+ case PACKET3_COPY_DW:
+ if (pkt->count != 4) {
+ DRM_ERROR("bad COPY_DW (invalid count)\n");
+ return -EINVAL;
+ }
+ if (idx_value & 0x1) {
+ u64 offset;
+ /* SRC is memory. */
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ DRM_ERROR("bad COPY_DW (missing src reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+1);
+ offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+1] = offset;
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
+ } else {
+ /* SRC is a reg. */
+ reg = radeon_get_ib_value(p, idx+1) << 2;
+ if (!r600_is_safe_reg(p, reg, idx+1))
+ return -EINVAL;
+ }
+ if (idx_value & 0x2) {
+ u64 offset;
+ /* DST is memory. */
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ if (r) {
+ DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+3);
+ offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+3] = offset;
+ ib[idx+4] = upper_32_bits(offset) & 0xff;
+ } else {
+ /* DST is a reg. */
+ reg = radeon_get_ib_value(p, idx+3) << 2;
+ if (!r600_is_safe_reg(p, reg, idx+3))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_NOP:
+ break;
+ default:
+ DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int r600_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet pkt;
+ struct r600_cs_track *track;
+ int r;
+
+ if (p->track == NULL) {
+ /* initialize tracker, we are in kms */
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (track == NULL)
+ return -ENOMEM;
+ r600_cs_track_init(track);
+ if (p->rdev->family < CHIP_RV770) {
+ track->npipes = p->rdev->config.r600.tiling_npipes;
+ track->nbanks = p->rdev->config.r600.tiling_nbanks;
+ track->group_size = p->rdev->config.r600.tiling_group_size;
+ } else if (p->rdev->family <= CHIP_RV740) {
+ track->npipes = p->rdev->config.rv770.tiling_npipes;
+ track->nbanks = p->rdev->config.rv770.tiling_nbanks;
+ track->group_size = p->rdev->config.rv770.tiling_group_size;
+ }
+ p->track = track;
+ }
+ do {
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
+ if (r) {
+ kfree(p->track);
+ p->track = NULL;
+ return r;
+ }
+ p->idx += pkt.count + 2;
+ switch (pkt.type) {
+ case RADEON_PACKET_TYPE0:
+ r = r600_cs_parse_packet0(p, &pkt);
+ break;
+ case RADEON_PACKET_TYPE2:
+ break;
+ case RADEON_PACKET_TYPE3:
+ r = r600_packet3_check(p, &pkt);
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ kfree(p->track);
+ p->track = NULL;
+ return -EINVAL;
+ }
+ if (r) {
+ kfree(p->track);
+ p->track = NULL;
+ return r;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+ for (r = 0; r < p->ib.length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
+ mdelay(1);
+ }
+#endif
+ kfree(p->track);
+ p->track = NULL;
+ return 0;
+}
+
+#ifdef CPTCFG_DRM_RADEON_UMS
+
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser: parser structure holding parsing context.
+ * @error: error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
+{
+ unsigned i;
+
+ kfree(parser->relocs);
+ for (i = 0; i < parser->nchunks; i++) {
+ kfree(parser->chunks[i].kdata);
+ if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
+ kfree(parser->chunks[i].kpage[0]);
+ kfree(parser->chunks[i].kpage[1]);
+ }
+ }
+ kfree(parser->chunks);
+ kfree(parser->chunks_array);
+}
+
+static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
+{
+ if (p->chunk_relocs_idx == -1) {
+ return 0;
+ }
+ p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ if (p->relocs == NULL) {
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
+ unsigned family, u32 *ib, int *l)
+{
+ struct radeon_cs_parser parser;
+ struct radeon_cs_chunk *ib_chunk;
+ struct r600_cs_track *track;
+ int r;
+
+ /* initialize tracker */
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (track == NULL)
+ return -ENOMEM;
+ r600_cs_track_init(track);
+ r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
+ /* initialize parser */
+ memset(&parser, 0, sizeof(struct radeon_cs_parser));
+ parser.filp = filp;
+ parser.dev = &dev->pdev->dev;
+ parser.rdev = NULL;
+ parser.family = family;
+ parser.track = track;
+ parser.ib.ptr = ib;
+ r = radeon_cs_parser_init(&parser, data);
+ if (r) {
+ DRM_ERROR("Failed to initialize parser !\n");
+ r600_cs_parser_fini(&parser, r);
+ return r;
+ }
+ r = r600_cs_parser_relocs_legacy(&parser);
+ if (r) {
+ DRM_ERROR("Failed to parse relocation !\n");
+ r600_cs_parser_fini(&parser, r);
+ return r;
+ }
+ /* Copy the packet into the IB, the parser will read from the
+ * input memory (cached) and write to the IB (which can be
+ * uncached). */
+ ib_chunk = &parser.chunks[parser.chunk_ib_idx];
+ parser.ib.length_dw = ib_chunk->length_dw;
+ *l = parser.ib.length_dw;
+ r = r600_cs_parse(&parser);
+ if (r) {
+ DRM_ERROR("Invalid command stream !\n");
+ r600_cs_parser_fini(&parser, r);
+ return r;
+ }
+ r = radeon_cs_finish_pages(&parser);
+ if (r) {
+ DRM_ERROR("Invalid command stream !\n");
+ r600_cs_parser_fini(&parser, r);
+ return r;
+ }
+ r600_cs_parser_fini(&parser, r);
+ return r;
+}
+
+void r600_cs_legacy_init(void)
+{
+ r600_nomm = 1;
+}
+
+#endif
+
+/*
+ * DMA
+ */
+/**
+ * r600_dma_cs_next_reloc() - parse next reloc
+ * @p: parser structure holding parsing context.
+ * @cs_reloc: reloc informations
+ *
+ * Return the next reloc, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ unsigned idx;
+
+ *cs_reloc = NULL;
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ idx = p->dma_reloc_idx;
+ if (idx >= p->nrelocs) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, p->nrelocs);
+ return -EINVAL;
+ }
+ *cs_reloc = p->relocs_ptr[idx];
+ p->dma_reloc_idx++;
+ return 0;
+}
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+
+/**
+ * r600_dma_cs_parse() - parse the DMA IB
+ * @p: parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (R6xx-R7xx)
+ * Returns 0 for success and an error on failure.
+ **/
+int r600_dma_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_reloc *src_reloc, *dst_reloc;
+ u32 header, cmd, count, tiled;
+ volatile u32 *ib = p->ib.ptr;
+ u32 idx, idx_value;
+ u64 src_offset, dst_offset;
+ int r;
+
+ do {
+ if (p->idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ p->idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ idx = p->idx;
+ header = radeon_get_ib_value(p, idx);
+ cmd = GET_DMA_CMD(header);
+ count = GET_DMA_COUNT(header);
+ tiled = GET_DMA_T(header);
+
+ switch (cmd) {
+ case DMA_PACKET_WRITE:
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += count + 5;
+ } else {
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += count + 3;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_COPY:
+ r = r600_dma_cs_next_reloc(p, &src_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ if (tiled) {
+ idx_value = radeon_get_ib_value(p, idx + 2);
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = radeon_get_ib_value(p, idx+1);
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = radeon_get_ib_value(p, idx+5);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+ ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = radeon_get_ib_value(p, idx+5);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+ ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ p->idx += 7;
+ } else {
+ if (p->family >= CHIP_RV770) {
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ } else {
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
+ p->idx += 4;
+ }
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ break;
+ case DMA_PACKET_CONSTANT_FILL:
+ if (p->family < CHIP_RV770) {
+ DRM_ERROR("Constant Fill is 7xx only !\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst_reloc);
+ if (r) {
+ DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+ p->idx += 4;
+ break;
+ case DMA_PACKET_NOP:
+ p->idx += 1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ return -EINVAL;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+ for (r = 0; r < p->ib->length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
+ mdelay(1);
+ }
+#endif
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
new file mode 100644
index 0000000..0efe2a9
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -0,0 +1,534 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#include <linux/hdmi.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r600d.h"
+#include "atom.h"
+
+/*
+ * HDMI color format
+ */
+enum r600_hdmi_color_format {
+ RGB = 0,
+ YCC_422 = 1,
+ YCC_444 = 2
+};
+
+/*
+ * IEC60958 status bits
+ */
+enum r600_hdmi_iec_status_bits {
+ AUDIO_STATUS_DIG_ENABLE = 0x01,
+ AUDIO_STATUS_V = 0x02,
+ AUDIO_STATUS_VCFG = 0x04,
+ AUDIO_STATUS_EMPHASIS = 0x08,
+ AUDIO_STATUS_COPYRIGHT = 0x10,
+ AUDIO_STATUS_NONAUDIO = 0x20,
+ AUDIO_STATUS_PROFESSIONAL = 0x40,
+ AUDIO_STATUS_LEVEL = 0x80
+};
+
+static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
+ /* 32kHz 44.1kHz 48kHz */
+ /* Clock N CTS N CTS N CTS */
+ { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
+ { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
+ { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
+ { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
+ { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
+ { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
+ { 74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
+ { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
+ { 148351, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
+ { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
+ { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
+};
+
+/*
+ * calculate CTS value if it's not found in the table
+ */
+static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
+{
+ if (*CTS == 0)
+ *CTS = clock * N / (128 * freq) * 1000;
+ DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
+ N, *CTS, freq);
+}
+
+struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
+{
+ struct radeon_hdmi_acr res;
+ u8 i;
+
+ for (i = 0; r600_hdmi_predefined_acr[i].clock != clock &&
+ r600_hdmi_predefined_acr[i].clock != 0; i++)
+ ;
+ res = r600_hdmi_predefined_acr[i];
+
+ /* In case some CTS are missing */
+ r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000);
+ r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100);
+ r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000);
+
+ return res;
+}
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr.cts_32khz));
+ WREG32(HDMI0_ACR_32_1 + offset, acr.n_32khz);
+
+ WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr.cts_44_1khz));
+ WREG32(HDMI0_ACR_44_1 + offset, acr.n_44_1khz);
+
+ WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr.cts_48khz));
+ WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz);
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
+ void *buffer, size_t size)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+ uint8_t *frame = buffer + 3;
+ uint8_t *header = buffer;
+
+ WREG32(HDMI0_AVI_INFO0 + offset,
+ frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+ WREG32(HDMI0_AVI_INFO1 + offset,
+ frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+ WREG32(HDMI0_AVI_INFO2 + offset,
+ frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+ WREG32(HDMI0_AVI_INFO3 + offset,
+ frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
+}
+
+/*
+ * build a Audio Info Frame
+ */
+static void r600_hdmi_update_audio_infoframe(struct drm_encoder *encoder,
+ const void *buffer, size_t size)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+ const u8 *frame = buffer + 3;
+
+ WREG32(HDMI0_AUDIO_INFO0 + offset,
+ frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+ WREG32(HDMI0_AUDIO_INFO1 + offset,
+ frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
+}
+
+/*
+ * test if audio buffer is filled enough to start playing
+ */
+static bool r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ return (RREG32(HDMI0_STATUS + offset) & 0x10) != 0;
+}
+
+/*
+ * have buffer status changed since last call?
+ */
+int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ int status, result;
+
+ if (!dig->afmt || !dig->afmt->enabled)
+ return 0;
+
+ status = r600_hdmi_is_audio_buffer_filled(encoder);
+ result = dig->afmt->last_buffer_filled_status != status;
+ dig->afmt->last_buffer_filled_status = status;
+
+ return result;
+}
+
+/*
+ * write the audio workaround status to the hardware
+ */
+static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+ bool hdmi_audio_workaround = false; /* FIXME */
+ u32 value;
+
+ if (!hdmi_audio_workaround ||
+ r600_hdmi_is_audio_buffer_filled(encoder))
+ value = 0; /* disable workaround */
+ else
+ value = HDMI0_AUDIO_TEST_EN; /* enable workaround */
+ WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ value, ~HDMI0_AUDIO_TEST_EN);
+}
+
+void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u32 base_rate = 24000;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
+ * doesn't matter which one you use. Just use the first one.
+ */
+ /* XXX two dtos; generally use dto0 for hdmi */
+ /* Express [24MHz / target pixel clock] as an exact rational
+ * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
+ if (ASIC_IS_DCE32(rdev)) {
+ if (dig->dig_encoder == 0) {
+ WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ } else {
+ WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
+ WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+ }
+ } else if (ASIC_IS_DCE3(rdev)) {
+ /* according to the reg specs, this should DCE3.2 only, but in
+ * practice it seems to cover DCE3.0/3.1 as well.
+ */
+ if (dig->dig_encoder == 0) {
+ WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ } else {
+ WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
+ WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+ }
+ } else {
+ /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
+ WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
+ AUDIO_DTO_MODULE(clock / 10));
+ }
+}
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ struct hdmi_avi_infoframe frame;
+ uint32_t offset;
+ ssize_t err;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (!dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
+
+ r600_audio_set_dto(encoder, mode->clock);
+
+ WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND); /* send null packets when required */
+
+ WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+ if (ASIC_IS_DCE32(rdev)) {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+ AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+ } else {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
+ HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+ }
+
+ WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
+ HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+ HDMI0_ACR_SOURCE); /* select SW CTS value */
+
+ WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND | /* send null packets when required */
+ HDMI0_GC_SEND | /* send general control packets */
+ HDMI0_GC_CONT); /* send general control packets every frame */
+
+ /* TODO: HDMI0_AUDIO_INFO_UPDATE */
+ WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
+ HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+ HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
+
+ WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
+ HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
+ HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+ WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
+
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
+ r600_hdmi_update_ACR(encoder, mode->clock);
+
+ /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
+ WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+ WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
+ WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
+ WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
+
+ r600_hdmi_audio_workaround(encoder);
+}
+
+/*
+ * update settings with current parameters from audio engine
+ */
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct r600_audio audio = r600_audio_status(rdev);
+ uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
+ struct hdmi_audio_infoframe frame;
+ uint32_t offset;
+ uint32_t iec;
+ ssize_t err;
+
+ if (!dig->afmt || !dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
+
+ DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
+ r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
+ audio.channels, audio.rate, audio.bits_per_sample);
+ DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
+ (int)audio.status_bits, (int)audio.category_code);
+
+ iec = 0;
+ if (audio.status_bits & AUDIO_STATUS_PROFESSIONAL)
+ iec |= 1 << 0;
+ if (audio.status_bits & AUDIO_STATUS_NONAUDIO)
+ iec |= 1 << 1;
+ if (audio.status_bits & AUDIO_STATUS_COPYRIGHT)
+ iec |= 1 << 2;
+ if (audio.status_bits & AUDIO_STATUS_EMPHASIS)
+ iec |= 1 << 3;
+
+ iec |= HDMI0_60958_CS_CATEGORY_CODE(audio.category_code);
+
+ switch (audio.rate) {
+ case 32000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x3);
+ break;
+ case 44100:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x0);
+ break;
+ case 48000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x2);
+ break;
+ case 88200:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x8);
+ break;
+ case 96000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xa);
+ break;
+ case 176400:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xc);
+ break;
+ case 192000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xe);
+ break;
+ }
+
+ WREG32(HDMI0_60958_0 + offset, iec);
+
+ iec = 0;
+ switch (audio.bits_per_sample) {
+ case 16:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0x2);
+ break;
+ case 20:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0x3);
+ break;
+ case 24:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0xb);
+ break;
+ }
+ if (audio.status_bits & AUDIO_STATUS_V)
+ iec |= 0x5 << 16;
+ WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
+
+ err = hdmi_audio_infoframe_init(&frame);
+ if (err < 0) {
+ DRM_ERROR("failed to setup audio infoframe\n");
+ return;
+ }
+
+ frame.channels = audio.channels;
+
+ err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack audio infoframe\n");
+ return;
+ }
+
+ r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer));
+ r600_hdmi_audio_workaround(encoder);
+}
+
+/*
+ * enable the HDMI engine
+ */
+void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u32 hdmi = HDMI0_ERROR_ACK;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (enable && dig->afmt->enabled)
+ return;
+ if (!enable && !dig->afmt->enabled)
+ return;
+
+ /* Older chipsets require setting HDMI and routing manually */
+ if (!ASIC_IS_DCE3(rdev)) {
+ if (enable)
+ hdmi |= HDMI0_ENABLE;
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ if (enable) {
+ WREG32_OR(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
+ } else {
+ WREG32_AND(AVIVO_TMDSA_CNTL, ~AVIVO_TMDSA_CNTL_HDMI_EN);
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ if (enable) {
+ WREG32_OR(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
+ } else {
+ WREG32_AND(AVIVO_LVTMA_CNTL, ~AVIVO_LVTMA_CNTL_HDMI_EN);
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ if (enable) {
+ WREG32_OR(DDIA_CNTL, DDIA_HDMI_EN);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
+ } else {
+ WREG32_AND(DDIA_CNTL, ~DDIA_HDMI_EN);
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ if (enable)
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
+ break;
+ default:
+ dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
+ radeon_encoder->encoder_id);
+ break;
+ }
+ WREG32(HDMI0_CONTROL + dig->afmt->offset, hdmi);
+ }
+
+ if (rdev->irq.installed) {
+ /* if irq is available use it */
+ /* XXX: shouldn't need this on any asics. Double check DCE2/3 */
+ if (enable)
+ radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
+ else
+ radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
+ }
+
+ dig->afmt->enabled = enable;
+
+ DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+ enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
+}
+
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
new file mode 100644
index 0000000..909219b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __R600_REG_H__
+#define __R600_REG_H__
+
+#define R600_PCIE_PORT_INDEX 0x0038
+#define R600_PCIE_PORT_DATA 0x003c
+
+#define R600_MC_VM_FB_LOCATION 0x2180
+#define R600_MC_FB_BASE_MASK 0x0000FFFF
+#define R600_MC_FB_BASE_SHIFT 0
+#define R600_MC_FB_TOP_MASK 0xFFFF0000
+#define R600_MC_FB_TOP_SHIFT 16
+#define R600_MC_VM_AGP_TOP 0x2184
+#define R600_MC_AGP_TOP_MASK 0x0003FFFF
+#define R600_MC_AGP_TOP_SHIFT 0
+#define R600_MC_VM_AGP_BOT 0x2188
+#define R600_MC_AGP_BOT_MASK 0x0003FFFF
+#define R600_MC_AGP_BOT_SHIFT 0
+#define R600_MC_VM_AGP_BASE 0x218c
+#define R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2190
+#define R600_LOGICAL_PAGE_NUMBER_MASK 0x000FFFFF
+#define R600_LOGICAL_PAGE_NUMBER_SHIFT 0
+#define R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194
+#define R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198
+
+#define R700_MC_VM_FB_LOCATION 0x2024
+#define R700_MC_FB_BASE_MASK 0x0000FFFF
+#define R700_MC_FB_BASE_SHIFT 0
+#define R700_MC_FB_TOP_MASK 0xFFFF0000
+#define R700_MC_FB_TOP_SHIFT 16
+#define R700_MC_VM_AGP_TOP 0x2028
+#define R700_MC_AGP_TOP_MASK 0x0003FFFF
+#define R700_MC_AGP_TOP_SHIFT 0
+#define R700_MC_VM_AGP_BOT 0x202c
+#define R700_MC_AGP_BOT_MASK 0x0003FFFF
+#define R700_MC_AGP_BOT_SHIFT 0
+#define R700_MC_VM_AGP_BASE 0x2030
+#define R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
+#define R700_LOGICAL_PAGE_NUMBER_MASK 0x000FFFFF
+#define R700_LOGICAL_PAGE_NUMBER_SHIFT 0
+#define R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
+#define R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203c
+
+#define R600_RAMCFG 0x2408
+# define R600_CHANSIZE (1 << 7)
+# define R600_CHANSIZE_OVERRIDE (1 << 10)
+
+
+#define R600_GENERAL_PWRMGT 0x618
+# define R600_OPEN_DRAIN_PADS (1 << 11)
+
+#define R600_LOWER_GPIO_ENABLE 0x710
+#define R600_CTXSW_VID_LOWER_GPIO_CNTL 0x718
+#define R600_HIGH_VID_LOWER_GPIO_CNTL 0x71c
+#define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720
+#define R600_LOW_VID_LOWER_GPIO_CNTL 0x724
+
+#define R600_D1GRPH_SWAP_CONTROL 0x610C
+# define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0)
+# define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0)
+# define R600_D1GRPH_SWAP_ENDIAN_32BIT (2 << 0)
+# define R600_D1GRPH_SWAP_ENDIAN_64BIT (3 << 0)
+
+#define R600_HDP_NONSURFACE_BASE 0x2c04
+
+#define R600_BUS_CNTL 0x5420
+# define R600_BIOS_ROM_DIS (1 << 1)
+#define R600_CONFIG_CNTL 0x5424
+#define R600_CONFIG_MEMSIZE 0x5428
+#define R600_CONFIG_F0_BASE 0x542C
+#define R600_CONFIG_APER_SIZE 0x5430
+
+#define R600_BIF_FB_EN 0x5490
+#define R600_FB_READ_EN (1 << 0)
+#define R600_FB_WRITE_EN (1 << 1)
+
+#define R600_CITF_CNTL 0x200c
+#define R600_BLACKOUT_MASK 0x00000003
+
+#define R700_MC_CITF_CNTL 0x25c0
+
+#define R600_ROM_CNTL 0x1600
+# define R600_SCK_OVERWRITE (1 << 1)
+# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
+# define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK (0xf << 28)
+
+#define R600_CG_SPLL_FUNC_CNTL 0x600
+# define R600_SPLL_BYPASS_EN (1 << 3)
+#define R600_CG_SPLL_STATUS 0x60c
+# define R600_SPLL_CHG_STATUS (1 << 1)
+
+#define R600_BIOS_0_SCRATCH 0x1724
+#define R600_BIOS_1_SCRATCH 0x1728
+#define R600_BIOS_2_SCRATCH 0x172c
+#define R600_BIOS_3_SCRATCH 0x1730
+#define R600_BIOS_4_SCRATCH 0x1734
+#define R600_BIOS_5_SCRATCH 0x1738
+#define R600_BIOS_6_SCRATCH 0x173c
+#define R600_BIOS_7_SCRATCH 0x1740
+
+/* Audio, these regs were reverse enginered,
+ * so the chance is high that the naming is wrong
+ * R6xx+ ??? */
+
+/* Audio clocks */
+#define R600_AUDIO_PLL1_MUL 0x0514
+#define R600_AUDIO_PLL1_DIV 0x0518
+#define R600_AUDIO_PLL2_MUL 0x0524
+#define R600_AUDIO_PLL2_DIV 0x0528
+#define R600_AUDIO_CLK_SRCSEL 0x0534
+
+/* Audio general */
+#define R600_AUDIO_ENABLE 0x7300
+#define R600_AUDIO_TIMING 0x7344
+
+/* Audio params */
+#define R600_AUDIO_VENDOR_ID 0x7380
+#define R600_AUDIO_REVISION_ID 0x7384
+#define R600_AUDIO_ROOT_NODE_COUNT 0x7388
+#define R600_AUDIO_NID1_NODE_COUNT 0x738c
+#define R600_AUDIO_NID1_TYPE 0x7390
+#define R600_AUDIO_SUPPORTED_SIZE_RATE 0x7394
+#define R600_AUDIO_SUPPORTED_CODEC 0x7398
+#define R600_AUDIO_SUPPORTED_POWER_STATES 0x739c
+#define R600_AUDIO_NID2_CAPS 0x73a0
+#define R600_AUDIO_NID3_CAPS 0x73a4
+#define R600_AUDIO_NID3_PIN_CAPS 0x73a8
+
+/* Audio conn list */
+#define R600_AUDIO_CONN_LIST_LEN 0x73ac
+#define R600_AUDIO_CONN_LIST 0x73b0
+
+/* Audio verbs */
+#define R600_AUDIO_RATE_BPS_CHANNEL 0x73c0
+#define R600_AUDIO_PLAYING 0x73c4
+#define R600_AUDIO_IMPLEMENTATION_ID 0x73c8
+#define R600_AUDIO_CONFIG_DEFAULT 0x73cc
+#define R600_AUDIO_PIN_SENSE 0x73d0
+#define R600_AUDIO_PIN_WIDGET_CNTL 0x73d4
+#define R600_AUDIO_STATUS_BITS 0x73d8
+
+#define DCE2_HDMI_OFFSET0 (0x7400 - 0x7400)
+#define DCE2_HDMI_OFFSET1 (0x7700 - 0x7400)
+/* DCE3.2 second instance starts at 0x7800 */
+#define DCE3_HDMI_OFFSET0 (0x7400 - 0x7400)
+#define DCE3_HDMI_OFFSET1 (0x7800 - 0x7400)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
new file mode 100644
index 0000000..2fd2241
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -0,0 +1,2002 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef R600D_H
+#define R600D_H
+
+#define CP_PACKET2 0x80000000
+#define PACKET2_PAD_SHIFT 0
+#define PACKET2_PAD_MASK (0x3fffffff << 0)
+
+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define R6XX_MAX_SH_GPRS 256
+#define R6XX_MAX_TEMP_GPRS 16
+#define R6XX_MAX_SH_THREADS 256
+#define R6XX_MAX_SH_STACK_ENTRIES 4096
+#define R6XX_MAX_BACKENDS 8
+#define R6XX_MAX_BACKENDS_MASK 0xff
+#define R6XX_MAX_SIMDS 8
+#define R6XX_MAX_SIMDS_MASK 0xff
+#define R6XX_MAX_PIPES 8
+#define R6XX_MAX_PIPES_MASK 0xff
+
+/* PTE flags */
+#define PTE_VALID (1 << 0)
+#define PTE_SYSTEM (1 << 1)
+#define PTE_SNOOPED (1 << 2)
+#define PTE_READABLE (1 << 5)
+#define PTE_WRITEABLE (1 << 6)
+
+/* tiling bits */
+#define ARRAY_LINEAR_GENERAL 0x00000000
+#define ARRAY_LINEAR_ALIGNED 0x00000001
+#define ARRAY_1D_TILED_THIN1 0x00000002
+#define ARRAY_2D_TILED_THIN1 0x00000004
+
+/* Registers */
+#define ARB_POP 0x2418
+#define ENABLE_TC128 (1 << 30)
+#define ARB_GDEC_RD_CNTL 0x246C
+
+#define CC_GC_SHADER_PIPE_CONFIG 0x8950
+#define CC_RB_BACKEND_DISABLE 0x98F4
+#define BACKEND_DISABLE(x) ((x) << 16)
+
+#define R_028808_CB_COLOR_CONTROL 0x28808
+#define S_028808_SPECIAL_OP(x) (((x) & 0x7) << 4)
+#define G_028808_SPECIAL_OP(x) (((x) >> 4) & 0x7)
+#define C_028808_SPECIAL_OP 0xFFFFFF8F
+#define V_028808_SPECIAL_NORMAL 0x00
+#define V_028808_SPECIAL_DISABLE 0x01
+#define V_028808_SPECIAL_RESOLVE_BOX 0x07
+
+#define CB_COLOR0_BASE 0x28040
+#define CB_COLOR1_BASE 0x28044
+#define CB_COLOR2_BASE 0x28048
+#define CB_COLOR3_BASE 0x2804C
+#define CB_COLOR4_BASE 0x28050
+#define CB_COLOR5_BASE 0x28054
+#define CB_COLOR6_BASE 0x28058
+#define CB_COLOR7_BASE 0x2805C
+#define CB_COLOR7_FRAG 0x280FC
+
+#define CB_COLOR0_SIZE 0x28060
+#define CB_COLOR0_VIEW 0x28080
+#define R_028080_CB_COLOR0_VIEW 0x028080
+#define S_028080_SLICE_START(x) (((x) & 0x7FF) << 0)
+#define G_028080_SLICE_START(x) (((x) >> 0) & 0x7FF)
+#define C_028080_SLICE_START 0xFFFFF800
+#define S_028080_SLICE_MAX(x) (((x) & 0x7FF) << 13)
+#define G_028080_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
+#define C_028080_SLICE_MAX 0xFF001FFF
+#define R_028084_CB_COLOR1_VIEW 0x028084
+#define R_028088_CB_COLOR2_VIEW 0x028088
+#define R_02808C_CB_COLOR3_VIEW 0x02808C
+#define R_028090_CB_COLOR4_VIEW 0x028090
+#define R_028094_CB_COLOR5_VIEW 0x028094
+#define R_028098_CB_COLOR6_VIEW 0x028098
+#define R_02809C_CB_COLOR7_VIEW 0x02809C
+#define R_028100_CB_COLOR0_MASK 0x028100
+#define S_028100_CMASK_BLOCK_MAX(x) (((x) & 0xFFF) << 0)
+#define G_028100_CMASK_BLOCK_MAX(x) (((x) >> 0) & 0xFFF)
+#define C_028100_CMASK_BLOCK_MAX 0xFFFFF000
+#define S_028100_FMASK_TILE_MAX(x) (((x) & 0xFFFFF) << 12)
+#define G_028100_FMASK_TILE_MAX(x) (((x) >> 12) & 0xFFFFF)
+#define C_028100_FMASK_TILE_MAX 0x00000FFF
+#define R_028104_CB_COLOR1_MASK 0x028104
+#define R_028108_CB_COLOR2_MASK 0x028108
+#define R_02810C_CB_COLOR3_MASK 0x02810C
+#define R_028110_CB_COLOR4_MASK 0x028110
+#define R_028114_CB_COLOR5_MASK 0x028114
+#define R_028118_CB_COLOR6_MASK 0x028118
+#define R_02811C_CB_COLOR7_MASK 0x02811C
+#define CB_COLOR0_INFO 0x280a0
+# define CB_FORMAT(x) ((x) << 2)
+# define CB_ARRAY_MODE(x) ((x) << 8)
+# define CB_SOURCE_FORMAT(x) ((x) << 27)
+# define CB_SF_EXPORT_FULL 0
+# define CB_SF_EXPORT_NORM 1
+#define CB_COLOR0_TILE 0x280c0
+#define CB_COLOR0_FRAG 0x280e0
+#define CB_COLOR0_MASK 0x28100
+
+#define SQ_ALU_CONST_CACHE_PS_0 0x28940
+#define SQ_ALU_CONST_CACHE_PS_1 0x28944
+#define SQ_ALU_CONST_CACHE_PS_2 0x28948
+#define SQ_ALU_CONST_CACHE_PS_3 0x2894c
+#define SQ_ALU_CONST_CACHE_PS_4 0x28950
+#define SQ_ALU_CONST_CACHE_PS_5 0x28954
+#define SQ_ALU_CONST_CACHE_PS_6 0x28958
+#define SQ_ALU_CONST_CACHE_PS_7 0x2895c
+#define SQ_ALU_CONST_CACHE_PS_8 0x28960
+#define SQ_ALU_CONST_CACHE_PS_9 0x28964
+#define SQ_ALU_CONST_CACHE_PS_10 0x28968
+#define SQ_ALU_CONST_CACHE_PS_11 0x2896c
+#define SQ_ALU_CONST_CACHE_PS_12 0x28970
+#define SQ_ALU_CONST_CACHE_PS_13 0x28974
+#define SQ_ALU_CONST_CACHE_PS_14 0x28978
+#define SQ_ALU_CONST_CACHE_PS_15 0x2897c
+#define SQ_ALU_CONST_CACHE_VS_0 0x28980
+#define SQ_ALU_CONST_CACHE_VS_1 0x28984
+#define SQ_ALU_CONST_CACHE_VS_2 0x28988
+#define SQ_ALU_CONST_CACHE_VS_3 0x2898c
+#define SQ_ALU_CONST_CACHE_VS_4 0x28990
+#define SQ_ALU_CONST_CACHE_VS_5 0x28994
+#define SQ_ALU_CONST_CACHE_VS_6 0x28998
+#define SQ_ALU_CONST_CACHE_VS_7 0x2899c
+#define SQ_ALU_CONST_CACHE_VS_8 0x289a0
+#define SQ_ALU_CONST_CACHE_VS_9 0x289a4
+#define SQ_ALU_CONST_CACHE_VS_10 0x289a8
+#define SQ_ALU_CONST_CACHE_VS_11 0x289ac
+#define SQ_ALU_CONST_CACHE_VS_12 0x289b0
+#define SQ_ALU_CONST_CACHE_VS_13 0x289b4
+#define SQ_ALU_CONST_CACHE_VS_14 0x289b8
+#define SQ_ALU_CONST_CACHE_VS_15 0x289bc
+#define SQ_ALU_CONST_CACHE_GS_0 0x289c0
+#define SQ_ALU_CONST_CACHE_GS_1 0x289c4
+#define SQ_ALU_CONST_CACHE_GS_2 0x289c8
+#define SQ_ALU_CONST_CACHE_GS_3 0x289cc
+#define SQ_ALU_CONST_CACHE_GS_4 0x289d0
+#define SQ_ALU_CONST_CACHE_GS_5 0x289d4
+#define SQ_ALU_CONST_CACHE_GS_6 0x289d8
+#define SQ_ALU_CONST_CACHE_GS_7 0x289dc
+#define SQ_ALU_CONST_CACHE_GS_8 0x289e0
+#define SQ_ALU_CONST_CACHE_GS_9 0x289e4
+#define SQ_ALU_CONST_CACHE_GS_10 0x289e8
+#define SQ_ALU_CONST_CACHE_GS_11 0x289ec
+#define SQ_ALU_CONST_CACHE_GS_12 0x289f0
+#define SQ_ALU_CONST_CACHE_GS_13 0x289f4
+#define SQ_ALU_CONST_CACHE_GS_14 0x289f8
+#define SQ_ALU_CONST_CACHE_GS_15 0x289fc
+
+#define CONFIG_MEMSIZE 0x5428
+#define CONFIG_CNTL 0x5424
+#define CP_STALLED_STAT1 0x8674
+#define CP_STALLED_STAT2 0x8678
+#define CP_BUSY_STAT 0x867C
+#define CP_STAT 0x8680
+#define CP_COHER_BASE 0x85F8
+#define CP_DEBUG 0xC1FC
+#define R_0086D8_CP_ME_CNTL 0x86D8
+#define S_0086D8_CP_PFP_HALT(x) (((x) & 1)<<26)
+#define C_0086D8_CP_PFP_HALT(x) ((x) & 0xFBFFFFFF)
+#define S_0086D8_CP_ME_HALT(x) (((x) & 1)<<28)
+#define C_0086D8_CP_ME_HALT(x) ((x) & 0xEFFFFFFF)
+#define CP_ME_RAM_DATA 0xC160
+#define CP_ME_RAM_RADDR 0xC158
+#define CP_ME_RAM_WADDR 0xC15C
+#define CP_MEQ_THRESHOLDS 0x8764
+#define MEQ_END(x) ((x) << 16)
+#define ROQ_END(x) ((x) << 24)
+#define CP_PERFMON_CNTL 0x87FC
+#define CP_PFP_UCODE_ADDR 0xC150
+#define CP_PFP_UCODE_DATA 0xC154
+#define CP_QUEUE_THRESHOLDS 0x8760
+#define ROQ_IB1_START(x) ((x) << 0)
+#define ROQ_IB2_START(x) ((x) << 8)
+#define CP_RB_BASE 0xC100
+#define CP_RB_CNTL 0xC104
+#define RB_BUFSZ(x) ((x) << 0)
+#define RB_BLKSZ(x) ((x) << 8)
+#define RB_NO_UPDATE (1 << 27)
+#define RB_RPTR_WR_ENA (1 << 31)
+#define BUF_SWAP_32BIT (2 << 16)
+#define CP_RB_RPTR 0x8700
+#define CP_RB_RPTR_ADDR 0xC10C
+#define RB_RPTR_SWAP(x) ((x) << 0)
+#define CP_RB_RPTR_ADDR_HI 0xC110
+#define CP_RB_RPTR_WR 0xC108
+#define CP_RB_WPTR 0xC114
+#define CP_RB_WPTR_ADDR 0xC118
+#define CP_RB_WPTR_ADDR_HI 0xC11C
+#define CP_RB_WPTR_DELAY 0x8704
+#define CP_ROQ_IB1_STAT 0x8784
+#define CP_ROQ_IB2_STAT 0x8788
+#define CP_SEM_WAIT_TIMER 0x85BC
+
+#define DB_DEBUG 0x9830
+#define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31)
+#define DB_DEPTH_BASE 0x2800C
+#define DB_HTILE_DATA_BASE 0x28014
+#define DB_HTILE_SURFACE 0x28D24
+#define S_028D24_HTILE_WIDTH(x) (((x) & 0x1) << 0)
+#define G_028D24_HTILE_WIDTH(x) (((x) >> 0) & 0x1)
+#define C_028D24_HTILE_WIDTH 0xFFFFFFFE
+#define S_028D24_HTILE_HEIGHT(x) (((x) & 0x1) << 1)
+#define G_028D24_HTILE_HEIGHT(x) (((x) >> 1) & 0x1)
+#define C_028D24_HTILE_HEIGHT 0xFFFFFFFD
+#define G_028D24_LINEAR(x) (((x) >> 2) & 0x1)
+#define DB_WATERMARKS 0x9838
+#define DEPTH_FREE(x) ((x) << 0)
+#define DEPTH_FLUSH(x) ((x) << 5)
+#define DEPTH_PENDING_FREE(x) ((x) << 15)
+#define DEPTH_CACHELINE_FREE(x) ((x) << 20)
+
+#define DCP_TILING_CONFIG 0x6CA0
+#define PIPE_TILING(x) ((x) << 1)
+#define BANK_TILING(x) ((x) << 4)
+#define GROUP_SIZE(x) ((x) << 6)
+#define ROW_TILING(x) ((x) << 8)
+#define BANK_SWAPS(x) ((x) << 11)
+#define SAMPLE_SPLIT(x) ((x) << 14)
+#define BACKEND_MAP(x) ((x) << 16)
+
+#define GB_TILING_CONFIG 0x98F0
+#define PIPE_TILING__SHIFT 1
+#define PIPE_TILING__MASK 0x0000000e
+
+#define GC_USER_SHADER_PIPE_CONFIG 0x8954
+#define INACTIVE_QD_PIPES(x) ((x) << 8)
+#define INACTIVE_QD_PIPES_MASK 0x0000FF00
+#define INACTIVE_SIMDS(x) ((x) << 16)
+#define INACTIVE_SIMDS_MASK 0x00FF0000
+
+#define SQ_CONFIG 0x8c00
+# define VC_ENABLE (1 << 0)
+# define EXPORT_SRC_C (1 << 1)
+# define DX9_CONSTS (1 << 2)
+# define ALU_INST_PREFER_VECTOR (1 << 3)
+# define DX10_CLAMP (1 << 4)
+# define CLAUSE_SEQ_PRIO(x) ((x) << 8)
+# define PS_PRIO(x) ((x) << 24)
+# define VS_PRIO(x) ((x) << 26)
+# define GS_PRIO(x) ((x) << 28)
+# define ES_PRIO(x) ((x) << 30)
+#define SQ_GPR_RESOURCE_MGMT_1 0x8c04
+# define NUM_PS_GPRS(x) ((x) << 0)
+# define NUM_VS_GPRS(x) ((x) << 16)
+# define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
+#define SQ_GPR_RESOURCE_MGMT_2 0x8c08
+# define NUM_GS_GPRS(x) ((x) << 0)
+# define NUM_ES_GPRS(x) ((x) << 16)
+#define SQ_THREAD_RESOURCE_MGMT 0x8c0c
+# define NUM_PS_THREADS(x) ((x) << 0)
+# define NUM_VS_THREADS(x) ((x) << 8)
+# define NUM_GS_THREADS(x) ((x) << 16)
+# define NUM_ES_THREADS(x) ((x) << 24)
+#define SQ_STACK_RESOURCE_MGMT_1 0x8c10
+# define NUM_PS_STACK_ENTRIES(x) ((x) << 0)
+# define NUM_VS_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_STACK_RESOURCE_MGMT_2 0x8c14
+# define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
+# define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_ESGS_RING_BASE 0x8c40
+#define SQ_GSVS_RING_BASE 0x8c48
+#define SQ_ESTMP_RING_BASE 0x8c50
+#define SQ_GSTMP_RING_BASE 0x8c58
+#define SQ_VSTMP_RING_BASE 0x8c60
+#define SQ_PSTMP_RING_BASE 0x8c68
+#define SQ_FBUF_RING_BASE 0x8c70
+#define SQ_REDUC_RING_BASE 0x8c78
+
+#define GRBM_CNTL 0x8000
+# define GRBM_READ_TIMEOUT(x) ((x) << 0)
+#define GRBM_STATUS 0x8010
+#define CMDFIFO_AVAIL_MASK 0x0000001F
+#define GUI_ACTIVE (1<<31)
+#define GRBM_STATUS2 0x8014
+#define GRBM_SOFT_RESET 0x8020
+#define SOFT_RESET_CP (1<<0)
+
+#define CG_THERMAL_STATUS 0x7F4
+#define ASIC_T(x) ((x) << 0)
+#define ASIC_T_MASK 0x1FF
+#define ASIC_T_SHIFT 0
+
+#define HDP_HOST_PATH_CNTL 0x2C00
+#define HDP_NONSURFACE_BASE 0x2C04
+#define HDP_NONSURFACE_INFO 0x2C08
+#define HDP_NONSURFACE_SIZE 0x2C0C
+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
+#define HDP_TILING_CONFIG 0x2F3C
+#define HDP_DEBUG1 0x2F34
+
+#define MC_VM_AGP_TOP 0x2184
+#define MC_VM_AGP_BOT 0x2188
+#define MC_VM_AGP_BASE 0x218C
+#define MC_VM_FB_LOCATION 0x2180
+#define MC_VM_L1_TLB_MCD_RD_A_CNTL 0x219C
+#define ENABLE_L1_TLB (1 << 0)
+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
+#define ENABLE_L1_STRICT_ORDERING (1 << 2)
+#define SYSTEM_ACCESS_MODE_MASK 0x000000C0
+#define SYSTEM_ACCESS_MODE_SHIFT 6
+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 6)
+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 6)
+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 6)
+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 6)
+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 8)
+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 8)
+#define ENABLE_SEMAPHORE_MODE (1 << 10)
+#define ENABLE_WAIT_L2_QUERY (1 << 11)
+#define EFFECTIVE_L1_TLB_SIZE(x) (((x) & 7) << 12)
+#define EFFECTIVE_L1_TLB_SIZE_MASK 0x00007000
+#define EFFECTIVE_L1_TLB_SIZE_SHIFT 12
+#define EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 7) << 15)
+#define EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00038000
+#define EFFECTIVE_L1_QUEUE_SIZE_SHIFT 15
+#define MC_VM_L1_TLB_MCD_RD_B_CNTL 0x21A0
+#define MC_VM_L1_TLB_MCB_RD_GFX_CNTL 0x21FC
+#define MC_VM_L1_TLB_MCB_RD_HDP_CNTL 0x2204
+#define MC_VM_L1_TLB_MCB_RD_PDMA_CNTL 0x2208
+#define MC_VM_L1_TLB_MCB_RD_SEM_CNTL 0x220C
+#define MC_VM_L1_TLB_MCB_RD_SYS_CNTL 0x2200
+#define MC_VM_L1_TLB_MCD_WR_A_CNTL 0x21A4
+#define MC_VM_L1_TLB_MCD_WR_B_CNTL 0x21A8
+#define MC_VM_L1_TLB_MCB_WR_GFX_CNTL 0x2210
+#define MC_VM_L1_TLB_MCB_WR_HDP_CNTL 0x2218
+#define MC_VM_L1_TLB_MCB_WR_PDMA_CNTL 0x221C
+#define MC_VM_L1_TLB_MCB_WR_SEM_CNTL 0x2220
+#define MC_VM_L1_TLB_MCB_WR_SYS_CNTL 0x2214
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2190
+#define LOGICAL_PAGE_NUMBER_MASK 0x000FFFFF
+#define LOGICAL_PAGE_NUMBER_SHIFT 0
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198
+
+#define PA_CL_ENHANCE 0x8A14
+#define CLIP_VTX_REORDER_ENA (1 << 0)
+#define NUM_CLIP_SEQ(x) ((x) << 1)
+#define PA_SC_AA_CONFIG 0x28C04
+#define PA_SC_AA_SAMPLE_LOCS_2S 0x8B40
+#define PA_SC_AA_SAMPLE_LOCS_4S 0x8B44
+#define PA_SC_AA_SAMPLE_LOCS_8S_WD0 0x8B48
+#define PA_SC_AA_SAMPLE_LOCS_8S_WD1 0x8B4C
+#define S0_X(x) ((x) << 0)
+#define S0_Y(x) ((x) << 4)
+#define S1_X(x) ((x) << 8)
+#define S1_Y(x) ((x) << 12)
+#define S2_X(x) ((x) << 16)
+#define S2_Y(x) ((x) << 20)
+#define S3_X(x) ((x) << 24)
+#define S3_Y(x) ((x) << 28)
+#define S4_X(x) ((x) << 0)
+#define S4_Y(x) ((x) << 4)
+#define S5_X(x) ((x) << 8)
+#define S5_Y(x) ((x) << 12)
+#define S6_X(x) ((x) << 16)
+#define S6_Y(x) ((x) << 20)
+#define S7_X(x) ((x) << 24)
+#define S7_Y(x) ((x) << 28)
+#define PA_SC_CLIPRECT_RULE 0x2820c
+#define PA_SC_ENHANCE 0x8BF0
+#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
+#define FORCE_EOV_MAX_TILE_CNT(x) ((x) << 12)
+#define PA_SC_LINE_STIPPLE 0x28A0C
+#define PA_SC_LINE_STIPPLE_STATE 0x8B10
+#define PA_SC_MODE_CNTL 0x28A4C
+#define PA_SC_MULTI_CHIP_CNTL 0x8B20
+
+#define PA_SC_SCREEN_SCISSOR_TL 0x28030
+#define PA_SC_GENERIC_SCISSOR_TL 0x28240
+#define PA_SC_WINDOW_SCISSOR_TL 0x28204
+
+#define PCIE_PORT_INDEX 0x0038
+#define PCIE_PORT_DATA 0x003C
+
+#define CHMAP 0x2004
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x00003000
+
+#define RAMCFG 0x2408
+#define NOOFBANK_SHIFT 0
+#define NOOFBANK_MASK 0x00000001
+#define NOOFRANK_SHIFT 1
+#define NOOFRANK_MASK 0x00000002
+#define NOOFROWS_SHIFT 2
+#define NOOFROWS_MASK 0x0000001C
+#define NOOFCOLS_SHIFT 5
+#define NOOFCOLS_MASK 0x00000060
+#define CHANSIZE_SHIFT 7
+#define CHANSIZE_MASK 0x00000080
+#define BURSTLENGTH_SHIFT 8
+#define BURSTLENGTH_MASK 0x00000100
+#define CHANSIZE_OVERRIDE (1 << 10)
+
+#define SCRATCH_REG0 0x8500
+#define SCRATCH_REG1 0x8504
+#define SCRATCH_REG2 0x8508
+#define SCRATCH_REG3 0x850C
+#define SCRATCH_REG4 0x8510
+#define SCRATCH_REG5 0x8514
+#define SCRATCH_REG6 0x8518
+#define SCRATCH_REG7 0x851C
+#define SCRATCH_UMSK 0x8540
+#define SCRATCH_ADDR 0x8544
+
+#define SPI_CONFIG_CNTL 0x9100
+#define GPR_WRITE_PRIORITY(x) ((x) << 0)
+#define DISABLE_INTERP_1 (1 << 5)
+#define SPI_CONFIG_CNTL_1 0x913C
+#define VTX_DONE_DELAY(x) ((x) << 0)
+#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
+#define SPI_INPUT_Z 0x286D8
+#define SPI_PS_IN_CONTROL_0 0x286CC
+#define NUM_INTERP(x) ((x)<<0)
+#define POSITION_ENA (1<<8)
+#define POSITION_CENTROID (1<<9)
+#define POSITION_ADDR(x) ((x)<<10)
+#define PARAM_GEN(x) ((x)<<15)
+#define PARAM_GEN_ADDR(x) ((x)<<19)
+#define BARYC_SAMPLE_CNTL(x) ((x)<<26)
+#define PERSP_GRADIENT_ENA (1<<28)
+#define LINEAR_GRADIENT_ENA (1<<29)
+#define POSITION_SAMPLE (1<<30)
+#define BARYC_AT_SAMPLE_ENA (1<<31)
+#define SPI_PS_IN_CONTROL_1 0x286D0
+#define GEN_INDEX_PIX (1<<0)
+#define GEN_INDEX_PIX_ADDR(x) ((x)<<1)
+#define FRONT_FACE_ENA (1<<8)
+#define FRONT_FACE_CHAN(x) ((x)<<9)
+#define FRONT_FACE_ALL_BITS (1<<11)
+#define FRONT_FACE_ADDR(x) ((x)<<12)
+#define FOG_ADDR(x) ((x)<<17)
+#define FIXED_PT_POSITION_ENA (1<<24)
+#define FIXED_PT_POSITION_ADDR(x) ((x)<<25)
+
+#define SQ_MS_FIFO_SIZES 0x8CF0
+#define CACHE_FIFO_SIZE(x) ((x) << 0)
+#define FETCH_FIFO_HIWATER(x) ((x) << 8)
+#define DONE_FIFO_HIWATER(x) ((x) << 16)
+#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
+#define SQ_PGM_START_ES 0x28880
+#define SQ_PGM_START_FS 0x28894
+#define SQ_PGM_START_GS 0x2886C
+#define SQ_PGM_START_PS 0x28840
+#define SQ_PGM_RESOURCES_PS 0x28850
+#define SQ_PGM_EXPORTS_PS 0x28854
+#define SQ_PGM_CF_OFFSET_PS 0x288cc
+#define SQ_PGM_START_VS 0x28858
+#define SQ_PGM_RESOURCES_VS 0x28868
+#define SQ_PGM_CF_OFFSET_VS 0x288d0
+
+#define SQ_VTX_CONSTANT_WORD0_0 0x30000
+#define SQ_VTX_CONSTANT_WORD1_0 0x30004
+#define SQ_VTX_CONSTANT_WORD2_0 0x30008
+# define SQ_VTXC_BASE_ADDR_HI(x) ((x) << 0)
+# define SQ_VTXC_STRIDE(x) ((x) << 8)
+# define SQ_VTXC_ENDIAN_SWAP(x) ((x) << 30)
+# define SQ_ENDIAN_NONE 0
+# define SQ_ENDIAN_8IN16 1
+# define SQ_ENDIAN_8IN32 2
+#define SQ_VTX_CONSTANT_WORD3_0 0x3000c
+#define SQ_VTX_CONSTANT_WORD6_0 0x38018
+#define S__SQ_VTX_CONSTANT_TYPE(x) (((x) & 3) << 30)
+#define G__SQ_VTX_CONSTANT_TYPE(x) (((x) >> 30) & 3)
+#define SQ_TEX_VTX_INVALID_TEXTURE 0x0
+#define SQ_TEX_VTX_INVALID_BUFFER 0x1
+#define SQ_TEX_VTX_VALID_TEXTURE 0x2
+#define SQ_TEX_VTX_VALID_BUFFER 0x3
+
+
+#define SX_MISC 0x28350
+#define SX_MEMORY_EXPORT_BASE 0x9010
+#define SX_DEBUG_1 0x9054
+#define SMX_EVENT_RELEASE (1 << 0)
+#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
+
+#define TA_CNTL_AUX 0x9508
+#define DISABLE_CUBE_WRAP (1 << 0)
+#define DISABLE_CUBE_ANISO (1 << 1)
+#define SYNC_GRADIENT (1 << 24)
+#define SYNC_WALKER (1 << 25)
+#define SYNC_ALIGNER (1 << 26)
+#define BILINEAR_PRECISION_6_BIT (0 << 31)
+#define BILINEAR_PRECISION_8_BIT (1 << 31)
+
+#define TC_CNTL 0x9608
+#define TC_L2_SIZE(x) ((x)<<5)
+#define L2_DISABLE_LATE_HIT (1<<9)
+
+#define VC_ENHANCE 0x9714
+
+#define VGT_CACHE_INVALIDATION 0x88C4
+#define CACHE_INVALIDATION(x) ((x)<<0)
+#define VC_ONLY 0
+#define TC_ONLY 1
+#define VC_AND_TC 2
+#define VGT_DMA_BASE 0x287E8
+#define VGT_DMA_BASE_HI 0x287E4
+#define VGT_ES_PER_GS 0x88CC
+#define VGT_GS_PER_ES 0x88C8
+#define VGT_GS_PER_VS 0x88E8
+#define VGT_GS_VERTEX_REUSE 0x88D4
+#define VGT_PRIMITIVE_TYPE 0x8958
+#define VGT_NUM_INSTANCES 0x8974
+#define VGT_OUT_DEALLOC_CNTL 0x28C5C
+#define DEALLOC_DIST_MASK 0x0000007F
+#define VGT_STRMOUT_BASE_OFFSET_0 0x28B10
+#define VGT_STRMOUT_BASE_OFFSET_1 0x28B14
+#define VGT_STRMOUT_BASE_OFFSET_2 0x28B18
+#define VGT_STRMOUT_BASE_OFFSET_3 0x28B1c
+#define VGT_STRMOUT_BASE_OFFSET_HI_0 0x28B44
+#define VGT_STRMOUT_BASE_OFFSET_HI_1 0x28B48
+#define VGT_STRMOUT_BASE_OFFSET_HI_2 0x28B4c
+#define VGT_STRMOUT_BASE_OFFSET_HI_3 0x28B50
+#define VGT_STRMOUT_BUFFER_BASE_0 0x28AD8
+#define VGT_STRMOUT_BUFFER_BASE_1 0x28AE8
+#define VGT_STRMOUT_BUFFER_BASE_2 0x28AF8
+#define VGT_STRMOUT_BUFFER_BASE_3 0x28B08
+#define VGT_STRMOUT_BUFFER_OFFSET_0 0x28ADC
+#define VGT_STRMOUT_BUFFER_OFFSET_1 0x28AEC
+#define VGT_STRMOUT_BUFFER_OFFSET_2 0x28AFC
+#define VGT_STRMOUT_BUFFER_OFFSET_3 0x28B0C
+#define VGT_STRMOUT_BUFFER_SIZE_0 0x28AD0
+#define VGT_STRMOUT_BUFFER_SIZE_1 0x28AE0
+#define VGT_STRMOUT_BUFFER_SIZE_2 0x28AF0
+#define VGT_STRMOUT_BUFFER_SIZE_3 0x28B00
+
+#define VGT_STRMOUT_EN 0x28AB0
+#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
+#define VTX_REUSE_DEPTH_MASK 0x000000FF
+#define VGT_EVENT_INITIATOR 0x28a90
+# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
+# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
+
+#define VM_CONTEXT0_CNTL 0x1410
+#define ENABLE_CONTEXT (1 << 0)
+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define VM_CONTEXT0_INVALIDATION_LOW_ADDR 0x1490
+#define VM_CONTEXT0_INVALIDATION_HIGH_ADDR 0x14B0
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x1574
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x1594
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x15B4
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1554
+#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
+#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
+#define RESPONSE_TYPE_MASK 0x000000F0
+#define RESPONSE_TYPE_SHIFT 4
+#define VM_L2_CNTL 0x1400
+#define ENABLE_L2_CACHE (1 << 0)
+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 13)
+#define VM_L2_CNTL2 0x1404
+#define INVALIDATE_ALL_L1_TLBS (1 << 0)
+#define INVALIDATE_L2_CACHE (1 << 1)
+#define VM_L2_CNTL3 0x1408
+#define BANK_SELECT_0(x) (((x) & 0x1f) << 0)
+#define BANK_SELECT_1(x) (((x) & 0x1f) << 5)
+#define L2_CACHE_UPDATE_MODE(x) (((x) & 3) << 10)
+#define VM_L2_STATUS 0x140C
+#define L2_BUSY (1 << 0)
+
+#define WAIT_UNTIL 0x8040
+#define WAIT_2D_IDLE_bit (1 << 14)
+#define WAIT_3D_IDLE_bit (1 << 15)
+#define WAIT_2D_IDLECLEAN_bit (1 << 16)
+#define WAIT_3D_IDLECLEAN_bit (1 << 17)
+
+/* async DMA */
+#define DMA_TILING_CONFIG 0x3ec4
+#define DMA_CONFIG 0x3e4c
+
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_RB_BASE 0xd004
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI 0xd01c
+#define DMA_RB_RPTR_ADDR_LO 0xd020
+
+#define DMA_IB_CNTL 0xd024
+# define DMA_IB_ENABLE (1 << 0)
+# define DMA_IB_SWAP_ENABLE (1 << 4)
+#define DMA_IB_RPTR 0xd028
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048
+#define DMA_MODE 0xd0bc
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_CONSTANT_FILL 0xd /* 7xx only */
+#define DMA_PACKET_NOP 0xf
+
+#define IH_RB_CNTL 0x3e00
+# define IH_RB_ENABLE (1 << 0)
+# define IH_RB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
+# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
+# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
+# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
+# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
+#define IH_RB_BASE 0x3e04
+#define IH_RB_RPTR 0x3e08
+#define IH_RB_WPTR 0x3e0c
+# define RB_OVERFLOW (1 << 0)
+# define WPTR_OFFSET_MASK 0x3fffc
+#define IH_RB_WPTR_ADDR_HI 0x3e10
+#define IH_RB_WPTR_ADDR_LO 0x3e14
+#define IH_CNTL 0x3e18
+# define ENABLE_INTR (1 << 0)
+# define IH_MC_SWAP(x) ((x) << 1)
+# define IH_MC_SWAP_NONE 0
+# define IH_MC_SWAP_16BIT 1
+# define IH_MC_SWAP_32BIT 2
+# define IH_MC_SWAP_64BIT 3
+# define RPTR_REARM (1 << 4)
+# define MC_WRREQ_CREDIT(x) ((x) << 15)
+# define MC_WR_CLEAN_CNT(x) ((x) << 20)
+
+#define RLC_CNTL 0x3f00
+# define RLC_ENABLE (1 << 0)
+#define RLC_HB_BASE 0x3f10
+#define RLC_HB_CNTL 0x3f0c
+#define RLC_HB_RPTR 0x3f20
+#define RLC_HB_WPTR 0x3f1c
+#define RLC_HB_WPTR_LSB_ADDR 0x3f14
+#define RLC_HB_WPTR_MSB_ADDR 0x3f18
+#define RLC_GPU_CLOCK_COUNT_LSB 0x3f38
+#define RLC_GPU_CLOCK_COUNT_MSB 0x3f3c
+#define RLC_CAPTURE_GPU_CLOCK_COUNT 0x3f40
+#define RLC_MC_CNTL 0x3f44
+#define RLC_UCODE_CNTL 0x3f48
+#define RLC_UCODE_ADDR 0x3f2c
+#define RLC_UCODE_DATA 0x3f30
+
+/* new for TN */
+#define TN_RLC_SAVE_AND_RESTORE_BASE 0x3f10
+#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20
+
+#define SRBM_SOFT_RESET 0xe60
+# define SOFT_RESET_DMA (1 << 12)
+# define SOFT_RESET_RLC (1 << 13)
+# define SOFT_RESET_UVD (1 << 18)
+# define RV770_SOFT_RESET_DMA (1 << 20)
+
+#define CP_INT_CNTL 0xc124
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define SCRATCH_INT_ENABLE (1 << 25)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+# define IB2_INT_ENABLE (1 << 29)
+# define IB1_INT_ENABLE (1 << 30)
+# define RB_INT_ENABLE (1 << 31)
+#define CP_INT_STATUS 0xc128
+# define SCRATCH_INT_STAT (1 << 25)
+# define TIME_STAMP_INT_STAT (1 << 26)
+# define IB2_INT_STAT (1 << 29)
+# define IB1_INT_STAT (1 << 30)
+# define RB_INT_STAT (1 << 31)
+
+#define GRBM_INT_CNTL 0x8060
+# define RDERR_INT_ENABLE (1 << 0)
+# define WAIT_COUNT_TIMEOUT_INT_ENABLE (1 << 1)
+# define GUI_IDLE_INT_ENABLE (1 << 19)
+
+#define INTERRUPT_CNTL 0x5468
+# define IH_DUMMY_RD_OVERRIDE (1 << 0)
+# define IH_DUMMY_RD_EN (1 << 1)
+# define IH_REQ_NONSNOOP_EN (1 << 3)
+# define GEN_IH_INT_EN (1 << 8)
+#define INTERRUPT_CNTL2 0x546c
+
+#define D1MODE_VBLANK_STATUS 0x6534
+#define D2MODE_VBLANK_STATUS 0x6d34
+# define DxMODE_VBLANK_OCCURRED (1 << 0)
+# define DxMODE_VBLANK_ACK (1 << 4)
+# define DxMODE_VBLANK_STAT (1 << 12)
+# define DxMODE_VBLANK_INTERRUPT (1 << 16)
+# define DxMODE_VBLANK_INTERRUPT_TYPE (1 << 17)
+#define D1MODE_VLINE_STATUS 0x653c
+#define D2MODE_VLINE_STATUS 0x6d3c
+# define DxMODE_VLINE_OCCURRED (1 << 0)
+# define DxMODE_VLINE_ACK (1 << 4)
+# define DxMODE_VLINE_STAT (1 << 12)
+# define DxMODE_VLINE_INTERRUPT (1 << 16)
+# define DxMODE_VLINE_INTERRUPT_TYPE (1 << 17)
+#define DxMODE_INT_MASK 0x6540
+# define D1MODE_VBLANK_INT_MASK (1 << 0)
+# define D1MODE_VLINE_INT_MASK (1 << 4)
+# define D2MODE_VBLANK_INT_MASK (1 << 8)
+# define D2MODE_VLINE_INT_MASK (1 << 12)
+#define DCE3_DISP_INTERRUPT_STATUS 0x7ddc
+# define DC_HPD1_INTERRUPT (1 << 18)
+# define DC_HPD2_INTERRUPT (1 << 19)
+#define DISP_INTERRUPT_STATUS 0x7edc
+# define LB_D1_VLINE_INTERRUPT (1 << 2)
+# define LB_D2_VLINE_INTERRUPT (1 << 3)
+# define LB_D1_VBLANK_INTERRUPT (1 << 4)
+# define LB_D2_VBLANK_INTERRUPT (1 << 5)
+# define DACA_AUTODETECT_INTERRUPT (1 << 16)
+# define DACB_AUTODETECT_INTERRUPT (1 << 17)
+# define DC_HOT_PLUG_DETECT1_INTERRUPT (1 << 18)
+# define DC_HOT_PLUG_DETECT2_INTERRUPT (1 << 19)
+# define DC_I2C_SW_DONE_INTERRUPT (1 << 20)
+# define DC_I2C_HW_DONE_INTERRUPT (1 << 21)
+#define DISP_INTERRUPT_STATUS_CONTINUE 0x7ee8
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE 0x7de8
+# define DC_HPD4_INTERRUPT (1 << 14)
+# define DC_HPD4_RX_INTERRUPT (1 << 15)
+# define DC_HPD3_INTERRUPT (1 << 28)
+# define DC_HPD1_RX_INTERRUPT (1 << 29)
+# define DC_HPD2_RX_INTERRUPT (1 << 30)
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2 0x7dec
+# define DC_HPD3_RX_INTERRUPT (1 << 0)
+# define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 1)
+# define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 2)
+# define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 3)
+# define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 4)
+# define AUX1_SW_DONE_INTERRUPT (1 << 5)
+# define AUX1_LS_DONE_INTERRUPT (1 << 6)
+# define AUX2_SW_DONE_INTERRUPT (1 << 7)
+# define AUX2_LS_DONE_INTERRUPT (1 << 8)
+# define AUX3_SW_DONE_INTERRUPT (1 << 9)
+# define AUX3_LS_DONE_INTERRUPT (1 << 10)
+# define AUX4_SW_DONE_INTERRUPT (1 << 11)
+# define AUX4_LS_DONE_INTERRUPT (1 << 12)
+# define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 13)
+# define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 14)
+/* DCE 3.2 */
+# define AUX5_SW_DONE_INTERRUPT (1 << 15)
+# define AUX5_LS_DONE_INTERRUPT (1 << 16)
+# define AUX6_SW_DONE_INTERRUPT (1 << 17)
+# define AUX6_LS_DONE_INTERRUPT (1 << 18)
+# define DC_HPD5_INTERRUPT (1 << 19)
+# define DC_HPD5_RX_INTERRUPT (1 << 20)
+# define DC_HPD6_INTERRUPT (1 << 21)
+# define DC_HPD6_RX_INTERRUPT (1 << 22)
+
+#define DACA_AUTO_DETECT_CONTROL 0x7828
+#define DACB_AUTO_DETECT_CONTROL 0x7a28
+#define DCE3_DACA_AUTO_DETECT_CONTROL 0x7028
+#define DCE3_DACB_AUTO_DETECT_CONTROL 0x7128
+# define DACx_AUTODETECT_MODE(x) ((x) << 0)
+# define DACx_AUTODETECT_MODE_NONE 0
+# define DACx_AUTODETECT_MODE_CONNECT 1
+# define DACx_AUTODETECT_MODE_DISCONNECT 2
+# define DACx_AUTODETECT_FRAME_TIME_COUNTER(x) ((x) << 8)
+/* bit 18 = R/C, 17 = G/Y, 16 = B/Comp */
+# define DACx_AUTODETECT_CHECK_MASK(x) ((x) << 16)
+
+#define DCE3_DACA_AUTODETECT_INT_CONTROL 0x7038
+#define DCE3_DACB_AUTODETECT_INT_CONTROL 0x7138
+#define DACA_AUTODETECT_INT_CONTROL 0x7838
+#define DACB_AUTODETECT_INT_CONTROL 0x7a38
+# define DACx_AUTODETECT_ACK (1 << 0)
+# define DACx_AUTODETECT_INT_ENABLE (1 << 16)
+
+#define DC_HOT_PLUG_DETECT1_CONTROL 0x7d00
+#define DC_HOT_PLUG_DETECT2_CONTROL 0x7d10
+#define DC_HOT_PLUG_DETECT3_CONTROL 0x7d24
+# define DC_HOT_PLUG_DETECTx_EN (1 << 0)
+
+#define DC_HOT_PLUG_DETECT1_INT_STATUS 0x7d04
+#define DC_HOT_PLUG_DETECT2_INT_STATUS 0x7d14
+#define DC_HOT_PLUG_DETECT3_INT_STATUS 0x7d28
+# define DC_HOT_PLUG_DETECTx_INT_STATUS (1 << 0)
+# define DC_HOT_PLUG_DETECTx_SENSE (1 << 1)
+
+/* DCE 3.0 */
+#define DC_HPD1_INT_STATUS 0x7d00
+#define DC_HPD2_INT_STATUS 0x7d0c
+#define DC_HPD3_INT_STATUS 0x7d18
+#define DC_HPD4_INT_STATUS 0x7d24
+/* DCE 3.2 */
+#define DC_HPD5_INT_STATUS 0x7dc0
+#define DC_HPD6_INT_STATUS 0x7df4
+# define DC_HPDx_INT_STATUS (1 << 0)
+# define DC_HPDx_SENSE (1 << 1)
+# define DC_HPDx_RX_INT_STATUS (1 << 8)
+
+#define DC_HOT_PLUG_DETECT1_INT_CONTROL 0x7d08
+#define DC_HOT_PLUG_DETECT2_INT_CONTROL 0x7d18
+#define DC_HOT_PLUG_DETECT3_INT_CONTROL 0x7d2c
+# define DC_HOT_PLUG_DETECTx_INT_ACK (1 << 0)
+# define DC_HOT_PLUG_DETECTx_INT_POLARITY (1 << 8)
+# define DC_HOT_PLUG_DETECTx_INT_EN (1 << 16)
+/* DCE 3.0 */
+#define DC_HPD1_INT_CONTROL 0x7d04
+#define DC_HPD2_INT_CONTROL 0x7d10
+#define DC_HPD3_INT_CONTROL 0x7d1c
+#define DC_HPD4_INT_CONTROL 0x7d28
+/* DCE 3.2 */
+#define DC_HPD5_INT_CONTROL 0x7dc4
+#define DC_HPD6_INT_CONTROL 0x7df8
+# define DC_HPDx_INT_ACK (1 << 0)
+# define DC_HPDx_INT_POLARITY (1 << 8)
+# define DC_HPDx_INT_EN (1 << 16)
+# define DC_HPDx_RX_INT_ACK (1 << 20)
+# define DC_HPDx_RX_INT_EN (1 << 24)
+
+/* DCE 3.0 */
+#define DC_HPD1_CONTROL 0x7d08
+#define DC_HPD2_CONTROL 0x7d14
+#define DC_HPD3_CONTROL 0x7d20
+#define DC_HPD4_CONTROL 0x7d2c
+/* DCE 3.2 */
+#define DC_HPD5_CONTROL 0x7dc8
+#define DC_HPD6_CONTROL 0x7dfc
+# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
+# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
+/* DCE 3.2 */
+# define DC_HPDx_EN (1 << 28)
+
+#define D1GRPH_INTERRUPT_STATUS 0x6158
+#define D2GRPH_INTERRUPT_STATUS 0x6958
+# define DxGRPH_PFLIP_INT_OCCURRED (1 << 0)
+# define DxGRPH_PFLIP_INT_CLEAR (1 << 8)
+#define D1GRPH_INTERRUPT_CONTROL 0x615c
+#define D2GRPH_INTERRUPT_CONTROL 0x695c
+# define DxGRPH_PFLIP_INT_MASK (1 << 0)
+# define DxGRPH_PFLIP_INT_TYPE (1 << 8)
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
+# define LC_POINT_7_PLUS_EN (1 << 6)
+#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
+# define LC_LINK_WIDTH_SHIFT 0
+# define LC_LINK_WIDTH_MASK 0x7
+# define LC_LINK_WIDTH_X0 0
+# define LC_LINK_WIDTH_X1 1
+# define LC_LINK_WIDTH_X2 2
+# define LC_LINK_WIDTH_X4 3
+# define LC_LINK_WIDTH_X8 4
+# define LC_LINK_WIDTH_X16 6
+# define LC_LINK_WIDTH_RD_SHIFT 4
+# define LC_LINK_WIDTH_RD_MASK 0x70
+# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define LC_RECONFIG_NOW (1 << 8)
+# define LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define LC_RENEGOTIATE_EN (1 << 10)
+# define LC_SHORT_RECONFIG_EN (1 << 11)
+# define LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define LC_UPCONFIGURE_DIS (1 << 13)
+#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
+# define LC_GEN2_EN_STRAP (1 << 0)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
+# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
+# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
+# define LC_CURRENT_DATA_RATE (1 << 11)
+# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
+# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
+# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
+# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
+#define MM_CFGREGS_CNTL 0x544c
+# define MM_WR_TO_CFG_EN (1 << 3)
+#define LINK_CNTL2 0x88 /* F0 */
+# define TARGET_LINK_SPEED_MASK (0xf << 0)
+# define SELECTABLE_DEEMPHASIS (1 << 6)
+
+/* Audio clocks DCE 2.0/3.0 */
+#define AUDIO_DTO 0x7340
+# define AUDIO_DTO_PHASE(x) (((x) & 0xffff) << 0)
+# define AUDIO_DTO_MODULE(x) (((x) & 0xffff) << 16)
+
+/* Audio clocks DCE 3.2 */
+#define DCCG_AUDIO_DTO0_PHASE 0x0514
+#define DCCG_AUDIO_DTO0_MODULE 0x0518
+#define DCCG_AUDIO_DTO0_LOAD 0x051c
+# define DTO_LOAD (1 << 31)
+#define DCCG_AUDIO_DTO0_CNTL 0x0520
+
+#define DCCG_AUDIO_DTO1_PHASE 0x0524
+#define DCCG_AUDIO_DTO1_MODULE 0x0528
+#define DCCG_AUDIO_DTO1_LOAD 0x052c
+#define DCCG_AUDIO_DTO1_CNTL 0x0530
+
+#define DCCG_AUDIO_DTO_SELECT 0x0534
+
+/* digital blocks */
+#define TMDSA_CNTL 0x7880
+# define TMDSA_HDMI_EN (1 << 2)
+#define LVTMA_CNTL 0x7a80
+# define LVTMA_HDMI_EN (1 << 2)
+#define DDIA_CNTL 0x7200
+# define DDIA_HDMI_EN (1 << 2)
+#define DIG0_CNTL 0x75a0
+# define DIG_MODE(x) (((x) & 7) << 8)
+# define DIG_MODE_DP 0
+# define DIG_MODE_LVDS 1
+# define DIG_MODE_TMDS_DVI 2
+# define DIG_MODE_TMDS_HDMI 3
+# define DIG_MODE_SDVO 4
+#define DIG1_CNTL 0x79a0
+
+/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one
+ * instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly
+ * different due to the new DIG blocks, but also have 2 instances.
+ * DCE 3.0 HDMI blocks are part of each DIG encoder.
+ */
+
+/* rs6xx/rs740/r6xx/dce3 */
+#define HDMI0_CONTROL 0x7400
+/* rs6xx/rs740/r6xx */
+# define HDMI0_ENABLE (1 << 0)
+# define HDMI0_STREAM(x) (((x) & 3) << 2)
+# define HDMI0_STREAM_TMDSA 0
+# define HDMI0_STREAM_LVTMA 1
+# define HDMI0_STREAM_DVOA 2
+# define HDMI0_STREAM_DDIA 3
+/* rs6xx/r6xx/dce3 */
+# define HDMI0_ERROR_ACK (1 << 8)
+# define HDMI0_ERROR_MASK (1 << 9)
+#define HDMI0_STATUS 0x7404
+# define HDMI0_ACTIVE_AVMUTE (1 << 0)
+# define HDMI0_AUDIO_ENABLE (1 << 4)
+# define HDMI0_AZ_FORMAT_WTRIG (1 << 28)
+# define HDMI0_AZ_FORMAT_WTRIG_INT (1 << 29)
+#define HDMI0_AUDIO_PACKET_CONTROL 0x7408
+# define HDMI0_AUDIO_SAMPLE_SEND (1 << 0)
+# define HDMI0_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
+# define HDMI0_AUDIO_SEND_MAX_PACKETS (1 << 8)
+# define HDMI0_AUDIO_TEST_EN (1 << 12)
+# define HDMI0_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
+# define HDMI0_AUDIO_CHANNEL_SWAP (1 << 24)
+# define HDMI0_60958_CS_UPDATE (1 << 26)
+# define HDMI0_AZ_FORMAT_WTRIG_MASK (1 << 28)
+# define HDMI0_AZ_FORMAT_WTRIG_ACK (1 << 29)
+#define HDMI0_AUDIO_CRC_CONTROL 0x740c
+# define HDMI0_AUDIO_CRC_EN (1 << 0)
+#define HDMI0_VBI_PACKET_CONTROL 0x7410
+# define HDMI0_NULL_SEND (1 << 0)
+# define HDMI0_GC_SEND (1 << 4)
+# define HDMI0_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI0_INFOFRAME_CONTROL0 0x7414
+# define HDMI0_AVI_INFO_SEND (1 << 0)
+# define HDMI0_AVI_INFO_CONT (1 << 1)
+# define HDMI0_AUDIO_INFO_SEND (1 << 4)
+# define HDMI0_AUDIO_INFO_CONT (1 << 5)
+# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+# define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
+# define HDMI0_MPEG_INFO_SEND (1 << 8)
+# define HDMI0_MPEG_INFO_CONT (1 << 9)
+# define HDMI0_MPEG_INFO_UPDATE (1 << 10)
+#define HDMI0_INFOFRAME_CONTROL1 0x7418
+# define HDMI0_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
+# define HDMI0_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
+# define HDMI0_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
+#define HDMI0_GENERIC_PACKET_CONTROL 0x741c
+# define HDMI0_GENERIC0_SEND (1 << 0)
+# define HDMI0_GENERIC0_CONT (1 << 1)
+# define HDMI0_GENERIC0_UPDATE (1 << 2)
+# define HDMI0_GENERIC1_SEND (1 << 4)
+# define HDMI0_GENERIC1_CONT (1 << 5)
+# define HDMI0_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
+# define HDMI0_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
+#define HDMI0_GC 0x7428
+# define HDMI0_GC_AVMUTE (1 << 0)
+#define HDMI0_AVI_INFO0 0x7454
+# define HDMI0_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define HDMI0_AVI_INFO_S(x) (((x) & 3) << 8)
+# define HDMI0_AVI_INFO_B(x) (((x) & 3) << 10)
+# define HDMI0_AVI_INFO_A(x) (((x) & 1) << 12)
+# define HDMI0_AVI_INFO_Y(x) (((x) & 3) << 13)
+# define HDMI0_AVI_INFO_Y_RGB 0
+# define HDMI0_AVI_INFO_Y_YCBCR422 1
+# define HDMI0_AVI_INFO_Y_YCBCR444 2
+# define HDMI0_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
+# define HDMI0_AVI_INFO_R(x) (((x) & 0xf) << 16)
+# define HDMI0_AVI_INFO_M(x) (((x) & 0x3) << 20)
+# define HDMI0_AVI_INFO_C(x) (((x) & 0x3) << 22)
+# define HDMI0_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
+# define HDMI0_AVI_INFO_SC(x) (((x) & 0x3) << 24)
+# define HDMI0_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
+#define HDMI0_AVI_INFO1 0x7458
+# define HDMI0_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+# define HDMI0_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+# define HDMI0_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
+#define HDMI0_AVI_INFO2 0x745c
+# define HDMI0_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
+# define HDMI0_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
+#define HDMI0_AVI_INFO3 0x7460
+# define HDMI0_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
+# define HDMI0_AVI_INFO_VERSION(x) (((x) & 3) << 24)
+#define HDMI0_MPEG_INFO0 0x7464
+# define HDMI0_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define HDMI0_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
+# define HDMI0_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
+# define HDMI0_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
+#define HDMI0_MPEG_INFO1 0x7468
+# define HDMI0_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
+# define HDMI0_MPEG_INFO_MF(x) (((x) & 3) << 8)
+# define HDMI0_MPEG_INFO_FR(x) (((x) & 1) << 12)
+#define HDMI0_GENERIC0_HDR 0x746c
+#define HDMI0_GENERIC0_0 0x7470
+#define HDMI0_GENERIC0_1 0x7474
+#define HDMI0_GENERIC0_2 0x7478
+#define HDMI0_GENERIC0_3 0x747c
+#define HDMI0_GENERIC0_4 0x7480
+#define HDMI0_GENERIC0_5 0x7484
+#define HDMI0_GENERIC0_6 0x7488
+#define HDMI0_GENERIC1_HDR 0x748c
+#define HDMI0_GENERIC1_0 0x7490
+#define HDMI0_GENERIC1_1 0x7494
+#define HDMI0_GENERIC1_2 0x7498
+#define HDMI0_GENERIC1_3 0x749c
+#define HDMI0_GENERIC1_4 0x74a0
+#define HDMI0_GENERIC1_5 0x74a4
+#define HDMI0_GENERIC1_6 0x74a8
+#define HDMI0_ACR_32_0 0x74ac
+# define HDMI0_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_32_1 0x74b0
+# define HDMI0_ACR_N_32(x) (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_44_0 0x74b4
+# define HDMI0_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_44_1 0x74b8
+# define HDMI0_ACR_N_44(x) (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_48_0 0x74bc
+# define HDMI0_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_48_1 0x74c0
+# define HDMI0_ACR_N_48(x) (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_STATUS_0 0x74c4
+#define HDMI0_ACR_STATUS_1 0x74c8
+#define HDMI0_AUDIO_INFO0 0x74cc
+# define HDMI0_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define HDMI0_AUDIO_INFO_CC(x) (((x) & 7) << 8)
+#define HDMI0_AUDIO_INFO1 0x74d0
+# define HDMI0_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
+# define HDMI0_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
+# define HDMI0_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
+# define HDMI0_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+#define HDMI0_60958_0 0x74d4
+# define HDMI0_60958_CS_A(x) (((x) & 1) << 0)
+# define HDMI0_60958_CS_B(x) (((x) & 1) << 1)
+# define HDMI0_60958_CS_C(x) (((x) & 1) << 2)
+# define HDMI0_60958_CS_D(x) (((x) & 3) << 3)
+# define HDMI0_60958_CS_MODE(x) (((x) & 3) << 6)
+# define HDMI0_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
+# define HDMI0_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
+# define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+# define HDMI0_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
+#define HDMI0_60958_1 0x74d8
+# define HDMI0_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
+# define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
+# define HDMI0_60958_CS_VALID_L(x) (((x) & 1) << 16)
+# define HDMI0_60958_CS_VALID_R(x) (((x) & 1) << 18)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
+#define HDMI0_ACR_PACKET_CONTROL 0x74dc
+# define HDMI0_ACR_SEND (1 << 0)
+# define HDMI0_ACR_CONT (1 << 1)
+# define HDMI0_ACR_SELECT(x) (((x) & 3) << 4)
+# define HDMI0_ACR_HW 0
+# define HDMI0_ACR_32 1
+# define HDMI0_ACR_44 2
+# define HDMI0_ACR_48 3
+# define HDMI0_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
+# define HDMI0_ACR_AUTO_SEND (1 << 12)
+#define HDMI0_RAMP_CONTROL0 0x74e0
+# define HDMI0_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL1 0x74e4
+# define HDMI0_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL2 0x74e8
+# define HDMI0_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL3 0x74ec
+# define HDMI0_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
+/* HDMI0_60958_2 is r7xx only */
+#define HDMI0_60958_2 0x74f0
+# define HDMI0_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
+/* r6xx only; second instance starts at 0x7700 */
+#define HDMI1_CONTROL 0x7700
+#define HDMI1_STATUS 0x7704
+#define HDMI1_AUDIO_PACKET_CONTROL 0x7708
+/* DCE3; second instance starts at 0x7800 NOT 0x7700 */
+#define DCE3_HDMI1_CONTROL 0x7800
+#define DCE3_HDMI1_STATUS 0x7804
+#define DCE3_HDMI1_AUDIO_PACKET_CONTROL 0x7808
+/* DCE3.2 (for interrupts) */
+#define AFMT_STATUS 0x7600
+# define AFMT_AUDIO_ENABLE (1 << 4)
+# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL 0x7604
+# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
+# define AFMT_AUDIO_TEST_EN (1 << 12)
+# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
+# define AFMT_60958_CS_UPDATE (1 << 26)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
+
+/*
+ * UVD
+ */
+#define UVD_SEMA_ADDR_LOW 0xef00
+#define UVD_SEMA_ADDR_HIGH 0xef04
+#define UVD_SEMA_CMD 0xef08
+
+#define UVD_GPCOM_VCPU_CMD 0xef0c
+#define UVD_GPCOM_VCPU_DATA0 0xef10
+#define UVD_GPCOM_VCPU_DATA1 0xef14
+#define UVD_ENGINE_CNTL 0xef18
+
+#define UVD_SEMA_CNTL 0xf400
+#define UVD_RB_ARB_CTRL 0xf480
+
+#define UVD_LMI_EXT40_ADDR 0xf498
+#define UVD_CGC_GATE 0xf4a8
+#define UVD_LMI_CTRL2 0xf4f4
+#define UVD_MASTINT_EN 0xf500
+#define UVD_LMI_ADDR_EXT 0xf594
+#define UVD_LMI_CTRL 0xf598
+#define UVD_LMI_SWAP_CNTL 0xf5b4
+#define UVD_MP_SWAP_CNTL 0xf5bC
+#define UVD_MPC_CNTL 0xf5dC
+#define UVD_MPC_SET_MUXA0 0xf5e4
+#define UVD_MPC_SET_MUXA1 0xf5e8
+#define UVD_MPC_SET_MUXB0 0xf5eC
+#define UVD_MPC_SET_MUXB1 0xf5f0
+#define UVD_MPC_SET_MUX 0xf5f4
+#define UVD_MPC_SET_ALU 0xf5f8
+
+#define UVD_VCPU_CNTL 0xf660
+#define UVD_SOFT_RESET 0xf680
+#define RBC_SOFT_RESET (1<<0)
+#define LBSI_SOFT_RESET (1<<1)
+#define LMI_SOFT_RESET (1<<2)
+#define VCPU_SOFT_RESET (1<<3)
+#define CSM_SOFT_RESET (1<<5)
+#define CXW_SOFT_RESET (1<<6)
+#define TAP_SOFT_RESET (1<<7)
+#define LMI_UMC_SOFT_RESET (1<<13)
+#define UVD_RBC_IB_BASE 0xf684
+#define UVD_RBC_IB_SIZE 0xf688
+#define UVD_RBC_RB_BASE 0xf68c
+#define UVD_RBC_RB_RPTR 0xf690
+#define UVD_RBC_RB_WPTR 0xf694
+#define UVD_RBC_RB_WPTR_CNTL 0xf698
+
+#define UVD_STATUS 0xf6bc
+
+#define UVD_SEMA_TIMEOUT_STATUS 0xf6c0
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL 0xf6c4
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL 0xf6c8
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL 0xf6cc
+
+#define UVD_RBC_RB_CNTL 0xf6a4
+#define UVD_RBC_RB_RPTR_ADDR 0xf6a8
+
+#define UVD_CONTEXT_ID 0xf6f4
+
+# define UPLL_CTLREQ_MASK 0x00000008
+# define UPLL_CTLACK_MASK 0x40000000
+# define UPLL_CTLACK2_MASK 0x80000000
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
+ (((reg) >> 2) & 0xFFFF) | \
+ ((n) & 0x3FFF) << 16)
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define PACKET3_NOP 0x10
+#define PACKET3_INDIRECT_BUFFER_END 0x17
+#define PACKET3_SET_PREDICATION 0x20
+#define PACKET3_REG_RMW 0x21
+#define PACKET3_COND_EXEC 0x22
+#define PACKET3_PRED_EXEC 0x23
+#define PACKET3_START_3D_CMDBUF 0x24
+#define PACKET3_DRAW_INDEX_2 0x27
+#define PACKET3_CONTEXT_CONTROL 0x28
+#define PACKET3_DRAW_INDEX_IMMD_BE 0x29
+#define PACKET3_INDEX_TYPE 0x2A
+#define PACKET3_DRAW_INDEX 0x2B
+#define PACKET3_DRAW_INDEX_AUTO 0x2D
+#define PACKET3_DRAW_INDEX_IMMD 0x2E
+#define PACKET3_NUM_INSTANCES 0x2F
+#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
+#define PACKET3_INDIRECT_BUFFER_MP 0x38
+#define PACKET3_MEM_SEMAPHORE 0x39
+# define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
+# define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
+# define PACKET3_SEM_SEL_WAIT (0x7 << 29)
+#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_COPY_DW 0x3B
+#define PACKET3_WAIT_REG_MEM 0x3C
+#define PACKET3_MEM_WRITE 0x3D
+#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_CP_DMA 0x41
+/* 1. header
+ * 2. SRC_ADDR_LO [31:0]
+ * 3. CP_SYNC [31] | SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
+#define PACKET3_SURFACE_SYNC 0x43
+# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_TC_ACTION_ENA (1 << 23)
+# define PACKET3_VC_ACTION_ENA (1 << 24)
+# define PACKET3_CB_ACTION_ENA (1 << 25)
+# define PACKET3_DB_ACTION_ENA (1 << 26)
+# define PACKET3_SH_ACTION_ENA (1 << 27)
+# define PACKET3_SMX_ACTION_ENA (1 << 28)
+#define PACKET3_ME_INITIALIZE 0x44
+#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define PACKET3_COND_WRITE 0x45
+#define PACKET3_EVENT_WRITE 0x46
+#define EVENT_TYPE(x) ((x) << 0)
+#define EVENT_INDEX(x) ((x) << 8)
+ /* 0 - any non-TS event
+ * 1 - ZPASS_DONE
+ * 2 - SAMPLE_PIPELINESTAT
+ * 3 - SAMPLE_STREAMOUTSTAT*
+ * 4 - *S_PARTIAL_FLUSH
+ * 5 - TS events
+ */
+#define PACKET3_EVENT_WRITE_EOP 0x47
+#define DATA_SEL(x) ((x) << 29)
+ /* 0 - discard
+ * 1 - send low 32bit data
+ * 2 - send 64bit data
+ * 3 - send 64bit counter value
+ */
+#define INT_SEL(x) ((x) << 24)
+ /* 0 - none
+ * 1 - interrupt only (DATA_SEL = 0)
+ * 2 - interrupt when data write is confirmed
+ */
+#define PACKET3_ONE_REG_WRITE 0x57
+#define PACKET3_SET_CONFIG_REG 0x68
+#define PACKET3_SET_CONFIG_REG_OFFSET 0x00008000
+#define PACKET3_SET_CONFIG_REG_END 0x0000ac00
+#define PACKET3_SET_CONTEXT_REG 0x69
+#define PACKET3_SET_CONTEXT_REG_OFFSET 0x00028000
+#define PACKET3_SET_CONTEXT_REG_END 0x00029000
+#define PACKET3_SET_ALU_CONST 0x6A
+#define PACKET3_SET_ALU_CONST_OFFSET 0x00030000
+#define PACKET3_SET_ALU_CONST_END 0x00032000
+#define PACKET3_SET_BOOL_CONST 0x6B
+#define PACKET3_SET_BOOL_CONST_OFFSET 0x0003e380
+#define PACKET3_SET_BOOL_CONST_END 0x00040000
+#define PACKET3_SET_LOOP_CONST 0x6C
+#define PACKET3_SET_LOOP_CONST_OFFSET 0x0003e200
+#define PACKET3_SET_LOOP_CONST_END 0x0003e380
+#define PACKET3_SET_RESOURCE 0x6D
+#define PACKET3_SET_RESOURCE_OFFSET 0x00038000
+#define PACKET3_SET_RESOURCE_END 0x0003c000
+#define PACKET3_SET_SAMPLER 0x6E
+#define PACKET3_SET_SAMPLER_OFFSET 0x0003c000
+#define PACKET3_SET_SAMPLER_END 0x0003cff0
+#define PACKET3_SET_CTL_CONST 0x6F
+#define PACKET3_SET_CTL_CONST_OFFSET 0x0003cff0
+#define PACKET3_SET_CTL_CONST_END 0x0003e200
+#define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */
+#define PACKET3_SURFACE_BASE_UPDATE 0x73
+
+#define R_000011_K8_FB_LOCATION 0x11
+#define R_000012_MC_MISC_UMA_CNTL 0x12
+#define G_000012_K8_ADDR_EXT(x) (((x) >> 0) & 0xFF)
+#define R_0028F8_MC_INDEX 0x28F8
+#define S_0028F8_MC_IND_ADDR(x) (((x) & 0x1FF) << 0)
+#define C_0028F8_MC_IND_ADDR 0xFFFFFE00
+#define S_0028F8_MC_IND_WR_EN(x) (((x) & 0x1) << 9)
+#define R_0028FC_MC_DATA 0x28FC
+
+#define R_008020_GRBM_SOFT_RESET 0x8020
+#define S_008020_SOFT_RESET_CP(x) (((x) & 1) << 0)
+#define S_008020_SOFT_RESET_CB(x) (((x) & 1) << 1)
+#define S_008020_SOFT_RESET_CR(x) (((x) & 1) << 2)
+#define S_008020_SOFT_RESET_DB(x) (((x) & 1) << 3)
+#define S_008020_SOFT_RESET_PA(x) (((x) & 1) << 5)
+#define S_008020_SOFT_RESET_SC(x) (((x) & 1) << 6)
+#define S_008020_SOFT_RESET_SMX(x) (((x) & 1) << 7)
+#define S_008020_SOFT_RESET_SPI(x) (((x) & 1) << 8)
+#define S_008020_SOFT_RESET_SH(x) (((x) & 1) << 9)
+#define S_008020_SOFT_RESET_SX(x) (((x) & 1) << 10)
+#define S_008020_SOFT_RESET_TC(x) (((x) & 1) << 11)
+#define S_008020_SOFT_RESET_TA(x) (((x) & 1) << 12)
+#define S_008020_SOFT_RESET_VC(x) (((x) & 1) << 13)
+#define S_008020_SOFT_RESET_VGT(x) (((x) & 1) << 14)
+#define R_008010_GRBM_STATUS 0x8010
+#define S_008010_CMDFIFO_AVAIL(x) (((x) & 0x1F) << 0)
+#define S_008010_CP_RQ_PENDING(x) (((x) & 1) << 6)
+#define S_008010_CF_RQ_PENDING(x) (((x) & 1) << 7)
+#define S_008010_PF_RQ_PENDING(x) (((x) & 1) << 8)
+#define S_008010_GRBM_EE_BUSY(x) (((x) & 1) << 10)
+#define S_008010_VC_BUSY(x) (((x) & 1) << 11)
+#define S_008010_DB03_CLEAN(x) (((x) & 1) << 12)
+#define S_008010_CB03_CLEAN(x) (((x) & 1) << 13)
+#define S_008010_VGT_BUSY_NO_DMA(x) (((x) & 1) << 16)
+#define S_008010_VGT_BUSY(x) (((x) & 1) << 17)
+#define S_008010_TA03_BUSY(x) (((x) & 1) << 18)
+#define S_008010_TC_BUSY(x) (((x) & 1) << 19)
+#define S_008010_SX_BUSY(x) (((x) & 1) << 20)
+#define S_008010_SH_BUSY(x) (((x) & 1) << 21)
+#define S_008010_SPI03_BUSY(x) (((x) & 1) << 22)
+#define S_008010_SMX_BUSY(x) (((x) & 1) << 23)
+#define S_008010_SC_BUSY(x) (((x) & 1) << 24)
+#define S_008010_PA_BUSY(x) (((x) & 1) << 25)
+#define S_008010_DB03_BUSY(x) (((x) & 1) << 26)
+#define S_008010_CR_BUSY(x) (((x) & 1) << 27)
+#define S_008010_CP_COHERENCY_BUSY(x) (((x) & 1) << 28)
+#define S_008010_CP_BUSY(x) (((x) & 1) << 29)
+#define S_008010_CB03_BUSY(x) (((x) & 1) << 30)
+#define S_008010_GUI_ACTIVE(x) (((x) & 1) << 31)
+#define G_008010_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x1F)
+#define G_008010_CP_RQ_PENDING(x) (((x) >> 6) & 1)
+#define G_008010_CF_RQ_PENDING(x) (((x) >> 7) & 1)
+#define G_008010_PF_RQ_PENDING(x) (((x) >> 8) & 1)
+#define G_008010_GRBM_EE_BUSY(x) (((x) >> 10) & 1)
+#define G_008010_VC_BUSY(x) (((x) >> 11) & 1)
+#define G_008010_DB03_CLEAN(x) (((x) >> 12) & 1)
+#define G_008010_CB03_CLEAN(x) (((x) >> 13) & 1)
+#define G_008010_TA_BUSY(x) (((x) >> 14) & 1)
+#define G_008010_VGT_BUSY_NO_DMA(x) (((x) >> 16) & 1)
+#define G_008010_VGT_BUSY(x) (((x) >> 17) & 1)
+#define G_008010_TA03_BUSY(x) (((x) >> 18) & 1)
+#define G_008010_TC_BUSY(x) (((x) >> 19) & 1)
+#define G_008010_SX_BUSY(x) (((x) >> 20) & 1)
+#define G_008010_SH_BUSY(x) (((x) >> 21) & 1)
+#define G_008010_SPI03_BUSY(x) (((x) >> 22) & 1)
+#define G_008010_SMX_BUSY(x) (((x) >> 23) & 1)
+#define G_008010_SC_BUSY(x) (((x) >> 24) & 1)
+#define G_008010_PA_BUSY(x) (((x) >> 25) & 1)
+#define G_008010_DB03_BUSY(x) (((x) >> 26) & 1)
+#define G_008010_CR_BUSY(x) (((x) >> 27) & 1)
+#define G_008010_CP_COHERENCY_BUSY(x) (((x) >> 28) & 1)
+#define G_008010_CP_BUSY(x) (((x) >> 29) & 1)
+#define G_008010_CB03_BUSY(x) (((x) >> 30) & 1)
+#define G_008010_GUI_ACTIVE(x) (((x) >> 31) & 1)
+#define R_008014_GRBM_STATUS2 0x8014
+#define S_008014_CR_CLEAN(x) (((x) & 1) << 0)
+#define S_008014_SMX_CLEAN(x) (((x) & 1) << 1)
+#define S_008014_SPI0_BUSY(x) (((x) & 1) << 8)
+#define S_008014_SPI1_BUSY(x) (((x) & 1) << 9)
+#define S_008014_SPI2_BUSY(x) (((x) & 1) << 10)
+#define S_008014_SPI3_BUSY(x) (((x) & 1) << 11)
+#define S_008014_TA0_BUSY(x) (((x) & 1) << 12)
+#define S_008014_TA1_BUSY(x) (((x) & 1) << 13)
+#define S_008014_TA2_BUSY(x) (((x) & 1) << 14)
+#define S_008014_TA3_BUSY(x) (((x) & 1) << 15)
+#define S_008014_DB0_BUSY(x) (((x) & 1) << 16)
+#define S_008014_DB1_BUSY(x) (((x) & 1) << 17)
+#define S_008014_DB2_BUSY(x) (((x) & 1) << 18)
+#define S_008014_DB3_BUSY(x) (((x) & 1) << 19)
+#define S_008014_CB0_BUSY(x) (((x) & 1) << 20)
+#define S_008014_CB1_BUSY(x) (((x) & 1) << 21)
+#define S_008014_CB2_BUSY(x) (((x) & 1) << 22)
+#define S_008014_CB3_BUSY(x) (((x) & 1) << 23)
+#define G_008014_CR_CLEAN(x) (((x) >> 0) & 1)
+#define G_008014_SMX_CLEAN(x) (((x) >> 1) & 1)
+#define G_008014_SPI0_BUSY(x) (((x) >> 8) & 1)
+#define G_008014_SPI1_BUSY(x) (((x) >> 9) & 1)
+#define G_008014_SPI2_BUSY(x) (((x) >> 10) & 1)
+#define G_008014_SPI3_BUSY(x) (((x) >> 11) & 1)
+#define G_008014_TA0_BUSY(x) (((x) >> 12) & 1)
+#define G_008014_TA1_BUSY(x) (((x) >> 13) & 1)
+#define G_008014_TA2_BUSY(x) (((x) >> 14) & 1)
+#define G_008014_TA3_BUSY(x) (((x) >> 15) & 1)
+#define G_008014_DB0_BUSY(x) (((x) >> 16) & 1)
+#define G_008014_DB1_BUSY(x) (((x) >> 17) & 1)
+#define G_008014_DB2_BUSY(x) (((x) >> 18) & 1)
+#define G_008014_DB3_BUSY(x) (((x) >> 19) & 1)
+#define G_008014_CB0_BUSY(x) (((x) >> 20) & 1)
+#define G_008014_CB1_BUSY(x) (((x) >> 21) & 1)
+#define G_008014_CB2_BUSY(x) (((x) >> 22) & 1)
+#define G_008014_CB3_BUSY(x) (((x) >> 23) & 1)
+#define R_000E50_SRBM_STATUS 0x0E50
+#define G_000E50_RLC_RQ_PENDING(x) (((x) >> 3) & 1)
+#define G_000E50_RCU_RQ_PENDING(x) (((x) >> 4) & 1)
+#define G_000E50_GRBM_RQ_PENDING(x) (((x) >> 5) & 1)
+#define G_000E50_HI_RQ_PENDING(x) (((x) >> 6) & 1)
+#define G_000E50_IO_EXTERN_SIGNAL(x) (((x) >> 7) & 1)
+#define G_000E50_VMC_BUSY(x) (((x) >> 8) & 1)
+#define G_000E50_MCB_BUSY(x) (((x) >> 9) & 1)
+#define G_000E50_MCDZ_BUSY(x) (((x) >> 10) & 1)
+#define G_000E50_MCDY_BUSY(x) (((x) >> 11) & 1)
+#define G_000E50_MCDX_BUSY(x) (((x) >> 12) & 1)
+#define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1)
+#define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1)
+#define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1)
+#define G_000E50_IH_BUSY(x) (((x) >> 17) & 1)
+#define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1)
+#define R_000E60_SRBM_SOFT_RESET 0x0E60
+#define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1)
+#define S_000E60_SOFT_RESET_CG(x) (((x) & 1) << 2)
+#define S_000E60_SOFT_RESET_CMC(x) (((x) & 1) << 3)
+#define S_000E60_SOFT_RESET_CSC(x) (((x) & 1) << 4)
+#define S_000E60_SOFT_RESET_DC(x) (((x) & 1) << 5)
+#define S_000E60_SOFT_RESET_GRBM(x) (((x) & 1) << 8)
+#define S_000E60_SOFT_RESET_HDP(x) (((x) & 1) << 9)
+#define S_000E60_SOFT_RESET_IH(x) (((x) & 1) << 10)
+#define S_000E60_SOFT_RESET_MC(x) (((x) & 1) << 11)
+#define S_000E60_SOFT_RESET_RLC(x) (((x) & 1) << 13)
+#define S_000E60_SOFT_RESET_ROM(x) (((x) & 1) << 14)
+#define S_000E60_SOFT_RESET_SEM(x) (((x) & 1) << 15)
+#define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16)
+#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
+
+#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
+
+#define R_028C04_PA_SC_AA_CONFIG 0x028C04
+#define S_028C04_MSAA_NUM_SAMPLES(x) (((x) & 0x3) << 0)
+#define G_028C04_MSAA_NUM_SAMPLES(x) (((x) >> 0) & 0x3)
+#define C_028C04_MSAA_NUM_SAMPLES 0xFFFFFFFC
+#define S_028C04_AA_MASK_CENTROID_DTMN(x) (((x) & 0x1) << 4)
+#define G_028C04_AA_MASK_CENTROID_DTMN(x) (((x) >> 4) & 0x1)
+#define C_028C04_AA_MASK_CENTROID_DTMN 0xFFFFFFEF
+#define S_028C04_MAX_SAMPLE_DIST(x) (((x) & 0xF) << 13)
+#define G_028C04_MAX_SAMPLE_DIST(x) (((x) >> 13) & 0xF)
+#define C_028C04_MAX_SAMPLE_DIST 0xFFFE1FFF
+#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
+#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_0280E0_BASE_256B 0x00000000
+#define R_0280E4_CB_COLOR1_FRAG 0x0280E4
+#define R_0280E8_CB_COLOR2_FRAG 0x0280E8
+#define R_0280EC_CB_COLOR3_FRAG 0x0280EC
+#define R_0280F0_CB_COLOR4_FRAG 0x0280F0
+#define R_0280F4_CB_COLOR5_FRAG 0x0280F4
+#define R_0280F8_CB_COLOR6_FRAG 0x0280F8
+#define R_0280FC_CB_COLOR7_FRAG 0x0280FC
+#define R_0280C0_CB_COLOR0_TILE 0x0280C0
+#define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_0280C0_BASE_256B 0x00000000
+#define R_0280C4_CB_COLOR1_TILE 0x0280C4
+#define R_0280C8_CB_COLOR2_TILE 0x0280C8
+#define R_0280CC_CB_COLOR3_TILE 0x0280CC
+#define R_0280D0_CB_COLOR4_TILE 0x0280D0
+#define R_0280D4_CB_COLOR5_TILE 0x0280D4
+#define R_0280D8_CB_COLOR6_TILE 0x0280D8
+#define R_0280DC_CB_COLOR7_TILE 0x0280DC
+#define R_0280A0_CB_COLOR0_INFO 0x0280A0
+#define S_0280A0_ENDIAN(x) (((x) & 0x3) << 0)
+#define G_0280A0_ENDIAN(x) (((x) >> 0) & 0x3)
+#define C_0280A0_ENDIAN 0xFFFFFFFC
+#define S_0280A0_FORMAT(x) (((x) & 0x3F) << 2)
+#define G_0280A0_FORMAT(x) (((x) >> 2) & 0x3F)
+#define C_0280A0_FORMAT 0xFFFFFF03
+#define V_0280A0_COLOR_INVALID 0x00000000
+#define V_0280A0_COLOR_8 0x00000001
+#define V_0280A0_COLOR_4_4 0x00000002
+#define V_0280A0_COLOR_3_3_2 0x00000003
+#define V_0280A0_COLOR_16 0x00000005
+#define V_0280A0_COLOR_16_FLOAT 0x00000006
+#define V_0280A0_COLOR_8_8 0x00000007
+#define V_0280A0_COLOR_5_6_5 0x00000008
+#define V_0280A0_COLOR_6_5_5 0x00000009
+#define V_0280A0_COLOR_1_5_5_5 0x0000000A
+#define V_0280A0_COLOR_4_4_4_4 0x0000000B
+#define V_0280A0_COLOR_5_5_5_1 0x0000000C
+#define V_0280A0_COLOR_32 0x0000000D
+#define V_0280A0_COLOR_32_FLOAT 0x0000000E
+#define V_0280A0_COLOR_16_16 0x0000000F
+#define V_0280A0_COLOR_16_16_FLOAT 0x00000010
+#define V_0280A0_COLOR_8_24 0x00000011
+#define V_0280A0_COLOR_8_24_FLOAT 0x00000012
+#define V_0280A0_COLOR_24_8 0x00000013
+#define V_0280A0_COLOR_24_8_FLOAT 0x00000014
+#define V_0280A0_COLOR_10_11_11 0x00000015
+#define V_0280A0_COLOR_10_11_11_FLOAT 0x00000016
+#define V_0280A0_COLOR_11_11_10 0x00000017
+#define V_0280A0_COLOR_11_11_10_FLOAT 0x00000018
+#define V_0280A0_COLOR_2_10_10_10 0x00000019
+#define V_0280A0_COLOR_8_8_8_8 0x0000001A
+#define V_0280A0_COLOR_10_10_10_2 0x0000001B
+#define V_0280A0_COLOR_X24_8_32_FLOAT 0x0000001C
+#define V_0280A0_COLOR_32_32 0x0000001D
+#define V_0280A0_COLOR_32_32_FLOAT 0x0000001E
+#define V_0280A0_COLOR_16_16_16_16 0x0000001F
+#define V_0280A0_COLOR_16_16_16_16_FLOAT 0x00000020
+#define V_0280A0_COLOR_32_32_32_32 0x00000022
+#define V_0280A0_COLOR_32_32_32_32_FLOAT 0x00000023
+#define S_0280A0_ARRAY_MODE(x) (((x) & 0xF) << 8)
+#define G_0280A0_ARRAY_MODE(x) (((x) >> 8) & 0xF)
+#define C_0280A0_ARRAY_MODE 0xFFFFF0FF
+#define V_0280A0_ARRAY_LINEAR_GENERAL 0x00000000
+#define V_0280A0_ARRAY_LINEAR_ALIGNED 0x00000001
+#define V_0280A0_ARRAY_1D_TILED_THIN1 0x00000002
+#define V_0280A0_ARRAY_2D_TILED_THIN1 0x00000004
+#define S_0280A0_NUMBER_TYPE(x) (((x) & 0x7) << 12)
+#define G_0280A0_NUMBER_TYPE(x) (((x) >> 12) & 0x7)
+#define C_0280A0_NUMBER_TYPE 0xFFFF8FFF
+#define S_0280A0_READ_SIZE(x) (((x) & 0x1) << 15)
+#define G_0280A0_READ_SIZE(x) (((x) >> 15) & 0x1)
+#define C_0280A0_READ_SIZE 0xFFFF7FFF
+#define S_0280A0_COMP_SWAP(x) (((x) & 0x3) << 16)
+#define G_0280A0_COMP_SWAP(x) (((x) >> 16) & 0x3)
+#define C_0280A0_COMP_SWAP 0xFFFCFFFF
+#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18)
+#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3)
+#define C_0280A0_TILE_MODE 0xFFF3FFFF
+#define V_0280A0_TILE_DISABLE 0
+#define V_0280A0_CLEAR_ENABLE 1
+#define V_0280A0_FRAG_ENABLE 2
+#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20)
+#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1)
+#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF
+#define S_0280A0_CLEAR_COLOR(x) (((x) & 0x1) << 21)
+#define G_0280A0_CLEAR_COLOR(x) (((x) >> 21) & 0x1)
+#define C_0280A0_CLEAR_COLOR 0xFFDFFFFF
+#define S_0280A0_BLEND_BYPASS(x) (((x) & 0x1) << 22)
+#define G_0280A0_BLEND_BYPASS(x) (((x) >> 22) & 0x1)
+#define C_0280A0_BLEND_BYPASS 0xFFBFFFFF
+#define S_0280A0_BLEND_FLOAT32(x) (((x) & 0x1) << 23)
+#define G_0280A0_BLEND_FLOAT32(x) (((x) >> 23) & 0x1)
+#define C_0280A0_BLEND_FLOAT32 0xFF7FFFFF
+#define S_0280A0_SIMPLE_FLOAT(x) (((x) & 0x1) << 24)
+#define G_0280A0_SIMPLE_FLOAT(x) (((x) >> 24) & 0x1)
+#define C_0280A0_SIMPLE_FLOAT 0xFEFFFFFF
+#define S_0280A0_ROUND_MODE(x) (((x) & 0x1) << 25)
+#define G_0280A0_ROUND_MODE(x) (((x) >> 25) & 0x1)
+#define C_0280A0_ROUND_MODE 0xFDFFFFFF
+#define S_0280A0_TILE_COMPACT(x) (((x) & 0x1) << 26)
+#define G_0280A0_TILE_COMPACT(x) (((x) >> 26) & 0x1)
+#define C_0280A0_TILE_COMPACT 0xFBFFFFFF
+#define S_0280A0_SOURCE_FORMAT(x) (((x) & 0x1) << 27)
+#define G_0280A0_SOURCE_FORMAT(x) (((x) >> 27) & 0x1)
+#define C_0280A0_SOURCE_FORMAT 0xF7FFFFFF
+#define R_0280A4_CB_COLOR1_INFO 0x0280A4
+#define R_0280A8_CB_COLOR2_INFO 0x0280A8
+#define R_0280AC_CB_COLOR3_INFO 0x0280AC
+#define R_0280B0_CB_COLOR4_INFO 0x0280B0
+#define R_0280B4_CB_COLOR5_INFO 0x0280B4
+#define R_0280B8_CB_COLOR6_INFO 0x0280B8
+#define R_0280BC_CB_COLOR7_INFO 0x0280BC
+#define R_028060_CB_COLOR0_SIZE 0x028060
+#define S_028060_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0)
+#define G_028060_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF)
+#define C_028060_PITCH_TILE_MAX 0xFFFFFC00
+#define S_028060_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10)
+#define G_028060_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF)
+#define C_028060_SLICE_TILE_MAX 0xC00003FF
+#define R_028064_CB_COLOR1_SIZE 0x028064
+#define R_028068_CB_COLOR2_SIZE 0x028068
+#define R_02806C_CB_COLOR3_SIZE 0x02806C
+#define R_028070_CB_COLOR4_SIZE 0x028070
+#define R_028074_CB_COLOR5_SIZE 0x028074
+#define R_028078_CB_COLOR6_SIZE 0x028078
+#define R_02807C_CB_COLOR7_SIZE 0x02807C
+#define R_028238_CB_TARGET_MASK 0x028238
+#define S_028238_TARGET0_ENABLE(x) (((x) & 0xF) << 0)
+#define G_028238_TARGET0_ENABLE(x) (((x) >> 0) & 0xF)
+#define C_028238_TARGET0_ENABLE 0xFFFFFFF0
+#define S_028238_TARGET1_ENABLE(x) (((x) & 0xF) << 4)
+#define G_028238_TARGET1_ENABLE(x) (((x) >> 4) & 0xF)
+#define C_028238_TARGET1_ENABLE 0xFFFFFF0F
+#define S_028238_TARGET2_ENABLE(x) (((x) & 0xF) << 8)
+#define G_028238_TARGET2_ENABLE(x) (((x) >> 8) & 0xF)
+#define C_028238_TARGET2_ENABLE 0xFFFFF0FF
+#define S_028238_TARGET3_ENABLE(x) (((x) & 0xF) << 12)
+#define G_028238_TARGET3_ENABLE(x) (((x) >> 12) & 0xF)
+#define C_028238_TARGET3_ENABLE 0xFFFF0FFF
+#define S_028238_TARGET4_ENABLE(x) (((x) & 0xF) << 16)
+#define G_028238_TARGET4_ENABLE(x) (((x) >> 16) & 0xF)
+#define C_028238_TARGET4_ENABLE 0xFFF0FFFF
+#define S_028238_TARGET5_ENABLE(x) (((x) & 0xF) << 20)
+#define G_028238_TARGET5_ENABLE(x) (((x) >> 20) & 0xF)
+#define C_028238_TARGET5_ENABLE 0xFF0FFFFF
+#define S_028238_TARGET6_ENABLE(x) (((x) & 0xF) << 24)
+#define G_028238_TARGET6_ENABLE(x) (((x) >> 24) & 0xF)
+#define C_028238_TARGET6_ENABLE 0xF0FFFFFF
+#define S_028238_TARGET7_ENABLE(x) (((x) & 0xF) << 28)
+#define G_028238_TARGET7_ENABLE(x) (((x) >> 28) & 0xF)
+#define C_028238_TARGET7_ENABLE 0x0FFFFFFF
+#define R_02823C_CB_SHADER_MASK 0x02823C
+#define S_02823C_OUTPUT0_ENABLE(x) (((x) & 0xF) << 0)
+#define G_02823C_OUTPUT0_ENABLE(x) (((x) >> 0) & 0xF)
+#define C_02823C_OUTPUT0_ENABLE 0xFFFFFFF0
+#define S_02823C_OUTPUT1_ENABLE(x) (((x) & 0xF) << 4)
+#define G_02823C_OUTPUT1_ENABLE(x) (((x) >> 4) & 0xF)
+#define C_02823C_OUTPUT1_ENABLE 0xFFFFFF0F
+#define S_02823C_OUTPUT2_ENABLE(x) (((x) & 0xF) << 8)
+#define G_02823C_OUTPUT2_ENABLE(x) (((x) >> 8) & 0xF)
+#define C_02823C_OUTPUT2_ENABLE 0xFFFFF0FF
+#define S_02823C_OUTPUT3_ENABLE(x) (((x) & 0xF) << 12)
+#define G_02823C_OUTPUT3_ENABLE(x) (((x) >> 12) & 0xF)
+#define C_02823C_OUTPUT3_ENABLE 0xFFFF0FFF
+#define S_02823C_OUTPUT4_ENABLE(x) (((x) & 0xF) << 16)
+#define G_02823C_OUTPUT4_ENABLE(x) (((x) >> 16) & 0xF)
+#define C_02823C_OUTPUT4_ENABLE 0xFFF0FFFF
+#define S_02823C_OUTPUT5_ENABLE(x) (((x) & 0xF) << 20)
+#define G_02823C_OUTPUT5_ENABLE(x) (((x) >> 20) & 0xF)
+#define C_02823C_OUTPUT5_ENABLE 0xFF0FFFFF
+#define S_02823C_OUTPUT6_ENABLE(x) (((x) & 0xF) << 24)
+#define G_02823C_OUTPUT6_ENABLE(x) (((x) >> 24) & 0xF)
+#define C_02823C_OUTPUT6_ENABLE 0xF0FFFFFF
+#define S_02823C_OUTPUT7_ENABLE(x) (((x) & 0xF) << 28)
+#define G_02823C_OUTPUT7_ENABLE(x) (((x) >> 28) & 0xF)
+#define C_02823C_OUTPUT7_ENABLE 0x0FFFFFFF
+#define R_028AB0_VGT_STRMOUT_EN 0x028AB0
+#define S_028AB0_STREAMOUT(x) (((x) & 0x1) << 0)
+#define G_028AB0_STREAMOUT(x) (((x) >> 0) & 0x1)
+#define C_028AB0_STREAMOUT 0xFFFFFFFE
+#define R_028B20_VGT_STRMOUT_BUFFER_EN 0x028B20
+#define S_028B20_BUFFER_0_EN(x) (((x) & 0x1) << 0)
+#define G_028B20_BUFFER_0_EN(x) (((x) >> 0) & 0x1)
+#define C_028B20_BUFFER_0_EN 0xFFFFFFFE
+#define S_028B20_BUFFER_1_EN(x) (((x) & 0x1) << 1)
+#define G_028B20_BUFFER_1_EN(x) (((x) >> 1) & 0x1)
+#define C_028B20_BUFFER_1_EN 0xFFFFFFFD
+#define S_028B20_BUFFER_2_EN(x) (((x) & 0x1) << 2)
+#define G_028B20_BUFFER_2_EN(x) (((x) >> 2) & 0x1)
+#define C_028B20_BUFFER_2_EN 0xFFFFFFFB
+#define S_028B20_BUFFER_3_EN(x) (((x) & 0x1) << 3)
+#define G_028B20_BUFFER_3_EN(x) (((x) >> 3) & 0x1)
+#define C_028B20_BUFFER_3_EN 0xFFFFFFF7
+#define S_028B20_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_028B20_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_028B20_SIZE 0x00000000
+#define R_038000_SQ_TEX_RESOURCE_WORD0_0 0x038000
+#define S_038000_DIM(x) (((x) & 0x7) << 0)
+#define G_038000_DIM(x) (((x) >> 0) & 0x7)
+#define C_038000_DIM 0xFFFFFFF8
+#define V_038000_SQ_TEX_DIM_1D 0x00000000
+#define V_038000_SQ_TEX_DIM_2D 0x00000001
+#define V_038000_SQ_TEX_DIM_3D 0x00000002
+#define V_038000_SQ_TEX_DIM_CUBEMAP 0x00000003
+#define V_038000_SQ_TEX_DIM_1D_ARRAY 0x00000004
+#define V_038000_SQ_TEX_DIM_2D_ARRAY 0x00000005
+#define V_038000_SQ_TEX_DIM_2D_MSAA 0x00000006
+#define V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007
+#define S_038000_TILE_MODE(x) (((x) & 0xF) << 3)
+#define G_038000_TILE_MODE(x) (((x) >> 3) & 0xF)
+#define C_038000_TILE_MODE 0xFFFFFF87
+#define V_038000_ARRAY_LINEAR_GENERAL 0x00000000
+#define V_038000_ARRAY_LINEAR_ALIGNED 0x00000001
+#define V_038000_ARRAY_1D_TILED_THIN1 0x00000002
+#define V_038000_ARRAY_2D_TILED_THIN1 0x00000004
+#define S_038000_TILE_TYPE(x) (((x) & 0x1) << 7)
+#define G_038000_TILE_TYPE(x) (((x) >> 7) & 0x1)
+#define C_038000_TILE_TYPE 0xFFFFFF7F
+#define S_038000_PITCH(x) (((x) & 0x7FF) << 8)
+#define G_038000_PITCH(x) (((x) >> 8) & 0x7FF)
+#define C_038000_PITCH 0xFFF800FF
+#define S_038000_TEX_WIDTH(x) (((x) & 0x1FFF) << 19)
+#define G_038000_TEX_WIDTH(x) (((x) >> 19) & 0x1FFF)
+#define C_038000_TEX_WIDTH 0x0007FFFF
+#define R_038004_SQ_TEX_RESOURCE_WORD1_0 0x038004
+#define S_038004_TEX_HEIGHT(x) (((x) & 0x1FFF) << 0)
+#define G_038004_TEX_HEIGHT(x) (((x) >> 0) & 0x1FFF)
+#define C_038004_TEX_HEIGHT 0xFFFFE000
+#define S_038004_TEX_DEPTH(x) (((x) & 0x1FFF) << 13)
+#define G_038004_TEX_DEPTH(x) (((x) >> 13) & 0x1FFF)
+#define C_038004_TEX_DEPTH 0xFC001FFF
+#define S_038004_DATA_FORMAT(x) (((x) & 0x3F) << 26)
+#define G_038004_DATA_FORMAT(x) (((x) >> 26) & 0x3F)
+#define C_038004_DATA_FORMAT 0x03FFFFFF
+#define V_038004_COLOR_INVALID 0x00000000
+#define V_038004_COLOR_8 0x00000001
+#define V_038004_COLOR_4_4 0x00000002
+#define V_038004_COLOR_3_3_2 0x00000003
+#define V_038004_COLOR_16 0x00000005
+#define V_038004_COLOR_16_FLOAT 0x00000006
+#define V_038004_COLOR_8_8 0x00000007
+#define V_038004_COLOR_5_6_5 0x00000008
+#define V_038004_COLOR_6_5_5 0x00000009
+#define V_038004_COLOR_1_5_5_5 0x0000000A
+#define V_038004_COLOR_4_4_4_4 0x0000000B
+#define V_038004_COLOR_5_5_5_1 0x0000000C
+#define V_038004_COLOR_32 0x0000000D
+#define V_038004_COLOR_32_FLOAT 0x0000000E
+#define V_038004_COLOR_16_16 0x0000000F
+#define V_038004_COLOR_16_16_FLOAT 0x00000010
+#define V_038004_COLOR_8_24 0x00000011
+#define V_038004_COLOR_8_24_FLOAT 0x00000012
+#define V_038004_COLOR_24_8 0x00000013
+#define V_038004_COLOR_24_8_FLOAT 0x00000014
+#define V_038004_COLOR_10_11_11 0x00000015
+#define V_038004_COLOR_10_11_11_FLOAT 0x00000016
+#define V_038004_COLOR_11_11_10 0x00000017
+#define V_038004_COLOR_11_11_10_FLOAT 0x00000018
+#define V_038004_COLOR_2_10_10_10 0x00000019
+#define V_038004_COLOR_8_8_8_8 0x0000001A
+#define V_038004_COLOR_10_10_10_2 0x0000001B
+#define V_038004_COLOR_X24_8_32_FLOAT 0x0000001C
+#define V_038004_COLOR_32_32 0x0000001D
+#define V_038004_COLOR_32_32_FLOAT 0x0000001E
+#define V_038004_COLOR_16_16_16_16 0x0000001F
+#define V_038004_COLOR_16_16_16_16_FLOAT 0x00000020
+#define V_038004_COLOR_32_32_32_32 0x00000022
+#define V_038004_COLOR_32_32_32_32_FLOAT 0x00000023
+#define V_038004_FMT_1 0x00000025
+#define V_038004_FMT_GB_GR 0x00000027
+#define V_038004_FMT_BG_RG 0x00000028
+#define V_038004_FMT_32_AS_8 0x00000029
+#define V_038004_FMT_32_AS_8_8 0x0000002A
+#define V_038004_FMT_5_9_9_9_SHAREDEXP 0x0000002B
+#define V_038004_FMT_8_8_8 0x0000002C
+#define V_038004_FMT_16_16_16 0x0000002D
+#define V_038004_FMT_16_16_16_FLOAT 0x0000002E
+#define V_038004_FMT_32_32_32 0x0000002F
+#define V_038004_FMT_32_32_32_FLOAT 0x00000030
+#define V_038004_FMT_BC1 0x00000031
+#define V_038004_FMT_BC2 0x00000032
+#define V_038004_FMT_BC3 0x00000033
+#define V_038004_FMT_BC4 0x00000034
+#define V_038004_FMT_BC5 0x00000035
+#define V_038004_FMT_BC6 0x00000036
+#define V_038004_FMT_BC7 0x00000037
+#define V_038004_FMT_32_AS_32_32_32_32 0x00000038
+#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
+#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
+#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
+#define C_038010_FORMAT_COMP_X 0xFFFFFFFC
+#define S_038010_FORMAT_COMP_Y(x) (((x) & 0x3) << 2)
+#define G_038010_FORMAT_COMP_Y(x) (((x) >> 2) & 0x3)
+#define C_038010_FORMAT_COMP_Y 0xFFFFFFF3
+#define S_038010_FORMAT_COMP_Z(x) (((x) & 0x3) << 4)
+#define G_038010_FORMAT_COMP_Z(x) (((x) >> 4) & 0x3)
+#define C_038010_FORMAT_COMP_Z 0xFFFFFFCF
+#define S_038010_FORMAT_COMP_W(x) (((x) & 0x3) << 6)
+#define G_038010_FORMAT_COMP_W(x) (((x) >> 6) & 0x3)
+#define C_038010_FORMAT_COMP_W 0xFFFFFF3F
+#define S_038010_NUM_FORMAT_ALL(x) (((x) & 0x3) << 8)
+#define G_038010_NUM_FORMAT_ALL(x) (((x) >> 8) & 0x3)
+#define C_038010_NUM_FORMAT_ALL 0xFFFFFCFF
+#define S_038010_SRF_MODE_ALL(x) (((x) & 0x1) << 10)
+#define G_038010_SRF_MODE_ALL(x) (((x) >> 10) & 0x1)
+#define C_038010_SRF_MODE_ALL 0xFFFFFBFF
+#define S_038010_FORCE_DEGAMMA(x) (((x) & 0x1) << 11)
+#define G_038010_FORCE_DEGAMMA(x) (((x) >> 11) & 0x1)
+#define C_038010_FORCE_DEGAMMA 0xFFFFF7FF
+#define S_038010_ENDIAN_SWAP(x) (((x) & 0x3) << 12)
+#define G_038010_ENDIAN_SWAP(x) (((x) >> 12) & 0x3)
+#define C_038010_ENDIAN_SWAP 0xFFFFCFFF
+#define S_038010_REQUEST_SIZE(x) (((x) & 0x3) << 14)
+#define G_038010_REQUEST_SIZE(x) (((x) >> 14) & 0x3)
+#define C_038010_REQUEST_SIZE 0xFFFF3FFF
+#define S_038010_DST_SEL_X(x) (((x) & 0x7) << 16)
+#define G_038010_DST_SEL_X(x) (((x) >> 16) & 0x7)
+#define C_038010_DST_SEL_X 0xFFF8FFFF
+#define S_038010_DST_SEL_Y(x) (((x) & 0x7) << 19)
+#define G_038010_DST_SEL_Y(x) (((x) >> 19) & 0x7)
+#define C_038010_DST_SEL_Y 0xFFC7FFFF
+#define S_038010_DST_SEL_Z(x) (((x) & 0x7) << 22)
+#define G_038010_DST_SEL_Z(x) (((x) >> 22) & 0x7)
+#define C_038010_DST_SEL_Z 0xFE3FFFFF
+#define S_038010_DST_SEL_W(x) (((x) & 0x7) << 25)
+#define G_038010_DST_SEL_W(x) (((x) >> 25) & 0x7)
+#define C_038010_DST_SEL_W 0xF1FFFFFF
+# define SQ_SEL_X 0
+# define SQ_SEL_Y 1
+# define SQ_SEL_Z 2
+# define SQ_SEL_W 3
+# define SQ_SEL_0 4
+# define SQ_SEL_1 5
+#define S_038010_BASE_LEVEL(x) (((x) & 0xF) << 28)
+#define G_038010_BASE_LEVEL(x) (((x) >> 28) & 0xF)
+#define C_038010_BASE_LEVEL 0x0FFFFFFF
+#define R_038014_SQ_TEX_RESOURCE_WORD5_0 0x038014
+#define S_038014_LAST_LEVEL(x) (((x) & 0xF) << 0)
+#define G_038014_LAST_LEVEL(x) (((x) >> 0) & 0xF)
+#define C_038014_LAST_LEVEL 0xFFFFFFF0
+#define S_038014_BASE_ARRAY(x) (((x) & 0x1FFF) << 4)
+#define G_038014_BASE_ARRAY(x) (((x) >> 4) & 0x1FFF)
+#define C_038014_BASE_ARRAY 0xFFFE000F
+#define S_038014_LAST_ARRAY(x) (((x) & 0x1FFF) << 17)
+#define G_038014_LAST_ARRAY(x) (((x) >> 17) & 0x1FFF)
+#define C_038014_LAST_ARRAY 0xC001FFFF
+#define R_0288A8_SQ_ESGS_RING_ITEMSIZE 0x0288A8
+#define S_0288A8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288A8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288A8_ITEMSIZE 0xFFFF8000
+#define R_008C44_SQ_ESGS_RING_SIZE 0x008C44
+#define S_008C44_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C44_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C44_MEM_SIZE 0x00000000
+#define R_0288B0_SQ_ESTMP_RING_ITEMSIZE 0x0288B0
+#define S_0288B0_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288B0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288B0_ITEMSIZE 0xFFFF8000
+#define R_008C54_SQ_ESTMP_RING_SIZE 0x008C54
+#define S_008C54_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C54_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C54_MEM_SIZE 0x00000000
+#define R_0288C0_SQ_FBUF_RING_ITEMSIZE 0x0288C0
+#define S_0288C0_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288C0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288C0_ITEMSIZE 0xFFFF8000
+#define R_008C74_SQ_FBUF_RING_SIZE 0x008C74
+#define S_008C74_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C74_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C74_MEM_SIZE 0x00000000
+#define R_0288B4_SQ_GSTMP_RING_ITEMSIZE 0x0288B4
+#define S_0288B4_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288B4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288B4_ITEMSIZE 0xFFFF8000
+#define R_008C5C_SQ_GSTMP_RING_SIZE 0x008C5C
+#define S_008C5C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C5C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C5C_MEM_SIZE 0x00000000
+#define R_0288AC_SQ_GSVS_RING_ITEMSIZE 0x0288AC
+#define S_0288AC_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288AC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288AC_ITEMSIZE 0xFFFF8000
+#define R_008C4C_SQ_GSVS_RING_SIZE 0x008C4C
+#define S_008C4C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C4C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C4C_MEM_SIZE 0x00000000
+#define R_0288BC_SQ_PSTMP_RING_ITEMSIZE 0x0288BC
+#define S_0288BC_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288BC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288BC_ITEMSIZE 0xFFFF8000
+#define R_008C6C_SQ_PSTMP_RING_SIZE 0x008C6C
+#define S_008C6C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C6C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C6C_MEM_SIZE 0x00000000
+#define R_0288C4_SQ_REDUC_RING_ITEMSIZE 0x0288C4
+#define S_0288C4_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288C4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288C4_ITEMSIZE 0xFFFF8000
+#define R_008C7C_SQ_REDUC_RING_SIZE 0x008C7C
+#define S_008C7C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C7C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C7C_MEM_SIZE 0x00000000
+#define R_0288B8_SQ_VSTMP_RING_ITEMSIZE 0x0288B8
+#define S_0288B8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288B8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288B8_ITEMSIZE 0xFFFF8000
+#define R_008C64_SQ_VSTMP_RING_SIZE 0x008C64
+#define S_008C64_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C64_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C64_MEM_SIZE 0x00000000
+#define R_0288C8_SQ_GS_VERT_ITEMSIZE 0x0288C8
+#define S_0288C8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288C8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288C8_ITEMSIZE 0xFFFF8000
+#define R_028010_DB_DEPTH_INFO 0x028010
+#define S_028010_FORMAT(x) (((x) & 0x7) << 0)
+#define G_028010_FORMAT(x) (((x) >> 0) & 0x7)
+#define C_028010_FORMAT 0xFFFFFFF8
+#define V_028010_DEPTH_INVALID 0x00000000
+#define V_028010_DEPTH_16 0x00000001
+#define V_028010_DEPTH_X8_24 0x00000002
+#define V_028010_DEPTH_8_24 0x00000003
+#define V_028010_DEPTH_X8_24_FLOAT 0x00000004
+#define V_028010_DEPTH_8_24_FLOAT 0x00000005
+#define V_028010_DEPTH_32_FLOAT 0x00000006
+#define V_028010_DEPTH_X24_8_32_FLOAT 0x00000007
+#define S_028010_READ_SIZE(x) (((x) & 0x1) << 3)
+#define G_028010_READ_SIZE(x) (((x) >> 3) & 0x1)
+#define C_028010_READ_SIZE 0xFFFFFFF7
+#define S_028010_ARRAY_MODE(x) (((x) & 0xF) << 15)
+#define G_028010_ARRAY_MODE(x) (((x) >> 15) & 0xF)
+#define C_028010_ARRAY_MODE 0xFFF87FFF
+#define V_028010_ARRAY_1D_TILED_THIN1 0x00000002
+#define V_028010_ARRAY_2D_TILED_THIN1 0x00000004
+#define S_028010_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 25)
+#define G_028010_TILE_SURFACE_ENABLE(x) (((x) >> 25) & 0x1)
+#define C_028010_TILE_SURFACE_ENABLE 0xFDFFFFFF
+#define S_028010_TILE_COMPACT(x) (((x) & 0x1) << 26)
+#define G_028010_TILE_COMPACT(x) (((x) >> 26) & 0x1)
+#define C_028010_TILE_COMPACT 0xFBFFFFFF
+#define S_028010_ZRANGE_PRECISION(x) (((x) & 0x1) << 31)
+#define G_028010_ZRANGE_PRECISION(x) (((x) >> 31) & 0x1)
+#define C_028010_ZRANGE_PRECISION 0x7FFFFFFF
+#define R_028000_DB_DEPTH_SIZE 0x028000
+#define S_028000_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0)
+#define G_028000_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF)
+#define C_028000_PITCH_TILE_MAX 0xFFFFFC00
+#define S_028000_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10)
+#define G_028000_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF)
+#define C_028000_SLICE_TILE_MAX 0xC00003FF
+#define R_028004_DB_DEPTH_VIEW 0x028004
+#define S_028004_SLICE_START(x) (((x) & 0x7FF) << 0)
+#define G_028004_SLICE_START(x) (((x) >> 0) & 0x7FF)
+#define C_028004_SLICE_START 0xFFFFF800
+#define S_028004_SLICE_MAX(x) (((x) & 0x7FF) << 13)
+#define G_028004_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
+#define C_028004_SLICE_MAX 0xFF001FFF
+#define R_028800_DB_DEPTH_CONTROL 0x028800
+#define S_028800_STENCIL_ENABLE(x) (((x) & 0x1) << 0)
+#define G_028800_STENCIL_ENABLE(x) (((x) >> 0) & 0x1)
+#define C_028800_STENCIL_ENABLE 0xFFFFFFFE
+#define S_028800_Z_ENABLE(x) (((x) & 0x1) << 1)
+#define G_028800_Z_ENABLE(x) (((x) >> 1) & 0x1)
+#define C_028800_Z_ENABLE 0xFFFFFFFD
+#define S_028800_Z_WRITE_ENABLE(x) (((x) & 0x1) << 2)
+#define G_028800_Z_WRITE_ENABLE(x) (((x) >> 2) & 0x1)
+#define C_028800_Z_WRITE_ENABLE 0xFFFFFFFB
+#define S_028800_ZFUNC(x) (((x) & 0x7) << 4)
+#define G_028800_ZFUNC(x) (((x) >> 4) & 0x7)
+#define C_028800_ZFUNC 0xFFFFFF8F
+#define S_028800_BACKFACE_ENABLE(x) (((x) & 0x1) << 7)
+#define G_028800_BACKFACE_ENABLE(x) (((x) >> 7) & 0x1)
+#define C_028800_BACKFACE_ENABLE 0xFFFFFF7F
+#define S_028800_STENCILFUNC(x) (((x) & 0x7) << 8)
+#define G_028800_STENCILFUNC(x) (((x) >> 8) & 0x7)
+#define C_028800_STENCILFUNC 0xFFFFF8FF
+#define S_028800_STENCILFAIL(x) (((x) & 0x7) << 11)
+#define G_028800_STENCILFAIL(x) (((x) >> 11) & 0x7)
+#define C_028800_STENCILFAIL 0xFFFFC7FF
+#define S_028800_STENCILZPASS(x) (((x) & 0x7) << 14)
+#define G_028800_STENCILZPASS(x) (((x) >> 14) & 0x7)
+#define C_028800_STENCILZPASS 0xFFFE3FFF
+#define S_028800_STENCILZFAIL(x) (((x) & 0x7) << 17)
+#define G_028800_STENCILZFAIL(x) (((x) >> 17) & 0x7)
+#define C_028800_STENCILZFAIL 0xFFF1FFFF
+#define S_028800_STENCILFUNC_BF(x) (((x) & 0x7) << 20)
+#define G_028800_STENCILFUNC_BF(x) (((x) >> 20) & 0x7)
+#define C_028800_STENCILFUNC_BF 0xFF8FFFFF
+#define S_028800_STENCILFAIL_BF(x) (((x) & 0x7) << 23)
+#define G_028800_STENCILFAIL_BF(x) (((x) >> 23) & 0x7)
+#define C_028800_STENCILFAIL_BF 0xFC7FFFFF
+#define S_028800_STENCILZPASS_BF(x) (((x) & 0x7) << 26)
+#define G_028800_STENCILZPASS_BF(x) (((x) >> 26) & 0x7)
+#define C_028800_STENCILZPASS_BF 0xE3FFFFFF
+#define S_028800_STENCILZFAIL_BF(x) (((x) & 0x7) << 29)
+#define G_028800_STENCILZFAIL_BF(x) (((x) >> 29) & 0x7)
+#define C_028800_STENCILZFAIL_BF 0x1FFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
new file mode 100644
index 0000000..d4ff48c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -0,0 +1,2079 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RADEON_H__
+#define __RADEON_H__
+
+/* TODO: Here are things that needs to be done :
+ * - surface allocator & initializer : (bit like scratch reg) should
+ * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
+ * related to surface
+ * - WB : write back stuff (do it bit like scratch reg things)
+ * - Vblank : look at Jesse's rework and what we should do
+ * - r600/r700: gart & cp
+ * - cs : clean cs ioctl use bitmap & things like that.
+ * - power management stuff
+ * - Barrier in gart code
+ * - Unmappabled vram ?
+ * - TESTING, TESTING, TESTING
+ */
+
+/* Initialization path:
+ * We expect that acceleration initialization might fail for various
+ * reasons even thought we work hard to make it works on most
+ * configurations. In order to still have a working userspace in such
+ * situation the init path must succeed up to the memory controller
+ * initialization point. Failure before this point are considered as
+ * fatal error. Here is the init callchain :
+ * radeon_device_init perform common structure, mutex initialization
+ * asic_init setup the GPU memory layout and perform all
+ * one time initialization (failure in this
+ * function are considered fatal)
+ * asic_startup setup the GPU acceleration, in order to
+ * follow guideline the first thing this
+ * function should do is setting the GPU
+ * memory controller (only MC setup failure
+ * are considered as fatal)
+ */
+
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+#include <ttm/ttm_execbuf_util.h>
+
+#include "radeon_family.h"
+#include "radeon_mode.h"
+#include "radeon_reg.h"
+
+/*
+ * Modules parameters.
+ */
+extern int radeon_no_wb;
+extern int radeon_modeset;
+extern int radeon_dynclks;
+extern int radeon_r4xx_atom;
+extern int radeon_agpmode;
+extern int radeon_vram_limit;
+extern int radeon_gart_size;
+extern int radeon_benchmarking;
+extern int radeon_testing;
+extern int radeon_connector_table;
+extern int radeon_tv;
+extern int radeon_audio;
+extern int radeon_disp_priority;
+extern int radeon_hw_i2c;
+extern int radeon_pcie_gen2;
+extern int radeon_msi;
+extern int radeon_lockup_timeout;
+extern int radeon_fastfb;
+
+/*
+ * Copy from radeon_drv.h so we don't have to include both and have conflicting
+ * symbol;
+ */
+#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
+/* RADEON_IB_POOL_SIZE must be a power of 2 */
+#define RADEON_IB_POOL_SIZE 16
+#define RADEON_DEBUGFS_MAX_COMPONENTS 32
+#define RADEONFB_CONN_LIMIT 4
+#define RADEON_BIOS_NUM_SCRATCH 8
+
+/* max number of rings */
+#define RADEON_NUM_RINGS 6
+
+/* fence seq are set to this number when signaled */
+#define RADEON_FENCE_SIGNALED_SEQ 0LL
+
+/* internal ring indices */
+/* r1xx+ has gfx CP ring */
+#define RADEON_RING_TYPE_GFX_INDEX 0
+
+/* cayman has 2 compute CP rings */
+#define CAYMAN_RING_TYPE_CP1_INDEX 1
+#define CAYMAN_RING_TYPE_CP2_INDEX 2
+
+/* R600+ has an async dma ring */
+#define R600_RING_TYPE_DMA_INDEX 3
+/* cayman add a second async dma ring */
+#define CAYMAN_RING_TYPE_DMA1_INDEX 4
+
+/* R600+ */
+#define R600_RING_TYPE_UVD_INDEX 5
+
+/* hardcode those limit for now */
+#define RADEON_VA_IB_OFFSET (1 << 20)
+#define RADEON_VA_RESERVED_SIZE (8 << 20)
+#define RADEON_IB_VM_MAX_SIZE (64 << 10)
+
+/* reset flags */
+#define RADEON_RESET_GFX (1 << 0)
+#define RADEON_RESET_COMPUTE (1 << 1)
+#define RADEON_RESET_DMA (1 << 2)
+#define RADEON_RESET_CP (1 << 3)
+#define RADEON_RESET_GRBM (1 << 4)
+#define RADEON_RESET_DMA1 (1 << 5)
+#define RADEON_RESET_RLC (1 << 6)
+#define RADEON_RESET_SEM (1 << 7)
+#define RADEON_RESET_IH (1 << 8)
+#define RADEON_RESET_VMC (1 << 9)
+#define RADEON_RESET_MC (1 << 10)
+#define RADEON_RESET_DISPLAY (1 << 11)
+
+/*
+ * Errata workarounds.
+ */
+enum radeon_pll_errata {
+ CHIP_ERRATA_R300_CG = 0x00000001,
+ CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002,
+ CHIP_ERRATA_PLL_DELAY = 0x00000004
+};
+
+
+struct radeon_device;
+
+
+/*
+ * BIOS.
+ */
+bool radeon_get_bios(struct radeon_device *rdev);
+
+/*
+ * Dummy page
+ */
+struct radeon_dummy_page {
+ struct page *page;
+ dma_addr_t addr;
+};
+int radeon_dummy_page_init(struct radeon_device *rdev);
+void radeon_dummy_page_fini(struct radeon_device *rdev);
+
+
+/*
+ * Clocks
+ */
+struct radeon_clock {
+ struct radeon_pll p1pll;
+ struct radeon_pll p2pll;
+ struct radeon_pll dcpll;
+ struct radeon_pll spll;
+ struct radeon_pll mpll;
+ /* 10 Khz units */
+ uint32_t default_mclk;
+ uint32_t default_sclk;
+ uint32_t default_dispclk;
+ uint32_t dp_extclk;
+ uint32_t max_pixel_clock;
+};
+
+/*
+ * Power management
+ */
+int radeon_pm_init(struct radeon_device *rdev);
+void radeon_pm_fini(struct radeon_device *rdev);
+void radeon_pm_compute_clocks(struct radeon_device *rdev);
+void radeon_pm_suspend(struct radeon_device *rdev);
+void radeon_pm_resume(struct radeon_device *rdev);
+void radeon_combios_get_power_modes(struct radeon_device *rdev);
+void radeon_atombios_get_power_modes(struct radeon_device *rdev);
+int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
+ u8 clock_type,
+ u32 clock,
+ bool strobe_mode,
+ struct atom_clock_dividers *dividers);
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
+void rs690_pm_info(struct radeon_device *rdev);
+extern int rv6xx_get_temp(struct radeon_device *rdev);
+extern int rv770_get_temp(struct radeon_device *rdev);
+extern int evergreen_get_temp(struct radeon_device *rdev);
+extern int sumo_get_temp(struct radeon_device *rdev);
+extern int si_get_temp(struct radeon_device *rdev);
+extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
+ unsigned *bankh, unsigned *mtaspect,
+ unsigned *tile_split);
+
+/*
+ * Fences.
+ */
+struct radeon_fence_driver {
+ uint32_t scratch_reg;
+ uint64_t gpu_addr;
+ volatile uint32_t *cpu_addr;
+ /* sync_seq is protected by ring emission lock */
+ uint64_t sync_seq[RADEON_NUM_RINGS];
+ atomic64_t last_seq;
+ unsigned long last_activity;
+ bool initialized;
+};
+
+struct radeon_fence {
+ struct radeon_device *rdev;
+ struct kref kref;
+ /* protected by radeon_fence.lock */
+ uint64_t seq;
+ /* RB, DMA, etc. */
+ unsigned ring;
+};
+
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
+int radeon_fence_driver_init(struct radeon_device *rdev);
+void radeon_fence_driver_fini(struct radeon_device *rdev);
+void radeon_fence_driver_force_completion(struct radeon_device *rdev);
+int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
+void radeon_fence_process(struct radeon_device *rdev, int ring);
+bool radeon_fence_signaled(struct radeon_fence *fence);
+int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
+int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_any(struct radeon_device *rdev,
+ struct radeon_fence **fences,
+ bool intr);
+struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
+void radeon_fence_unref(struct radeon_fence **fence);
+unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
+bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
+void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
+static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
+ struct radeon_fence *b)
+{
+ if (!a) {
+ return b;
+ }
+
+ if (!b) {
+ return a;
+ }
+
+ BUG_ON(a->ring != b->ring);
+
+ if (a->seq > b->seq) {
+ return a;
+ } else {
+ return b;
+ }
+}
+
+static inline bool radeon_fence_is_earlier(struct radeon_fence *a,
+ struct radeon_fence *b)
+{
+ if (!a) {
+ return false;
+ }
+
+ if (!b) {
+ return true;
+ }
+
+ BUG_ON(a->ring != b->ring);
+
+ return a->seq < b->seq;
+}
+
+/*
+ * Tiling registers
+ */
+struct radeon_surface_reg {
+ struct radeon_bo *bo;
+};
+
+#define RADEON_GEM_MAX_SURFACES 8
+
+/*
+ * TTM.
+ */
+struct radeon_mman {
+ struct ttm_bo_global_ref bo_global_ref;
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_device bdev;
+ bool mem_global_referenced;
+ bool initialized;
+};
+
+/* bo virtual address in a specific vm */
+struct radeon_bo_va {
+ /* protected by bo being reserved */
+ struct list_head bo_list;
+ uint64_t soffset;
+ uint64_t eoffset;
+ uint32_t flags;
+ bool valid;
+ unsigned ref_count;
+
+ /* protected by vm mutex */
+ struct list_head vm_list;
+
+ /* constant after initialization */
+ struct radeon_vm *vm;
+ struct radeon_bo *bo;
+};
+
+struct radeon_bo {
+ /* Protected by gem.mutex */
+ struct list_head list;
+ /* Protected by tbo.reserved */
+ u32 placements[3];
+ struct ttm_placement placement;
+ struct ttm_buffer_object tbo;
+ struct ttm_bo_kmap_obj kmap;
+ unsigned pin_count;
+ void *kptr;
+ u32 tiling_flags;
+ u32 pitch;
+ int surface_reg;
+ /* list of all virtual address to which this bo
+ * is associated to
+ */
+ struct list_head va;
+ /* Constant after initialization */
+ struct radeon_device *rdev;
+ struct drm_gem_object gem_base;
+
+ struct ttm_bo_kmap_obj dma_buf_vmap;
+ pid_t pid;
+};
+#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
+
+struct radeon_bo_list {
+ struct ttm_validate_buffer tv;
+ struct radeon_bo *bo;
+ uint64_t gpu_offset;
+ bool written;
+ unsigned domain;
+ unsigned alt_domain;
+ u32 tiling_flags;
+};
+
+int radeon_gem_debugfs_init(struct radeon_device *rdev);
+
+/* sub-allocation manager, it has to be protected by another lock.
+ * By conception this is an helper for other part of the driver
+ * like the indirect buffer or semaphore, which both have their
+ * locking.
+ *
+ * Principe is simple, we keep a list of sub allocation in offset
+ * order (first entry has offset == 0, last entry has the highest
+ * offset).
+ *
+ * When allocating new object we first check if there is room at
+ * the end total_size - (last_object_offset + last_object_size) >=
+ * alloc_size. If so we allocate new object there.
+ *
+ * When there is not enough room at the end, we start waiting for
+ * each sub object until we reach object_offset+object_size >=
+ * alloc_size, this object then become the sub object we return.
+ *
+ * Alignment can't be bigger than page size.
+ *
+ * Hole are not considered for allocation to keep things simple.
+ * Assumption is that there won't be hole (all object on same
+ * alignment).
+ */
+struct radeon_sa_manager {
+ wait_queue_head_t wq;
+ struct radeon_bo *bo;
+ struct list_head *hole;
+ struct list_head flist[RADEON_NUM_RINGS];
+ struct list_head olist;
+ unsigned size;
+ uint64_t gpu_addr;
+ void *cpu_ptr;
+ uint32_t domain;
+ uint32_t align;
+};
+
+struct radeon_sa_bo;
+
+/* sub-allocation buffer */
+struct radeon_sa_bo {
+ struct list_head olist;
+ struct list_head flist;
+ struct radeon_sa_manager *manager;
+ unsigned soffset;
+ unsigned eoffset;
+ struct radeon_fence *fence;
+};
+
+/*
+ * GEM objects.
+ */
+struct radeon_gem {
+ struct mutex mutex;
+ struct list_head objects;
+};
+
+int radeon_gem_init(struct radeon_device *rdev);
+void radeon_gem_fini(struct radeon_device *rdev);
+int radeon_gem_object_create(struct radeon_device *rdev, int size,
+ int alignment, int initial_domain,
+ bool discardable, bool kernel,
+ struct drm_gem_object **obj);
+
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p);
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
+
+/*
+ * Semaphores.
+ */
+/* everything here is constant */
+struct radeon_semaphore {
+ struct radeon_sa_bo *sa_bo;
+ signed waiters;
+ uint64_t gpu_addr;
+};
+
+int radeon_semaphore_create(struct radeon_device *rdev,
+ struct radeon_semaphore **semaphore);
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+ struct radeon_semaphore *semaphore);
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+ struct radeon_semaphore *semaphore);
+int radeon_semaphore_sync_rings(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore,
+ int signaler, int waiter);
+void radeon_semaphore_free(struct radeon_device *rdev,
+ struct radeon_semaphore **semaphore,
+ struct radeon_fence *fence);
+
+/*
+ * GART structures, functions & helpers
+ */
+struct radeon_mc;
+
+#define RADEON_GPU_PAGE_SIZE 4096
+#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
+#define RADEON_GPU_PAGE_SHIFT 12
+#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
+
+struct radeon_gart {
+ dma_addr_t table_addr;
+ struct radeon_bo *robj;
+ void *ptr;
+ unsigned num_gpu_pages;
+ unsigned num_cpu_pages;
+ unsigned table_size;
+ struct page **pages;
+ dma_addr_t *pages_addr;
+ bool ready;
+};
+
+int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
+void radeon_gart_table_ram_free(struct radeon_device *rdev);
+int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
+void radeon_gart_table_vram_free(struct radeon_device *rdev);
+int radeon_gart_table_vram_pin(struct radeon_device *rdev);
+void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
+int radeon_gart_init(struct radeon_device *rdev);
+void radeon_gart_fini(struct radeon_device *rdev);
+void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+ int pages);
+int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+ int pages, struct page **pagelist,
+ dma_addr_t *dma_addr);
+void radeon_gart_restore(struct radeon_device *rdev);
+
+
+/*
+ * GPU MC structures, functions & helpers
+ */
+struct radeon_mc {
+ resource_size_t aper_size;
+ resource_size_t aper_base;
+ resource_size_t agp_base;
+ /* for some chips with <= 32MB we need to lie
+ * about vram size near mc fb location */
+ u64 mc_vram_size;
+ u64 visible_vram_size;
+ u64 gtt_size;
+ u64 gtt_start;
+ u64 gtt_end;
+ u64 vram_start;
+ u64 vram_end;
+ unsigned vram_width;
+ u64 real_vram_size;
+ int vram_mtrr;
+ bool vram_is_ddr;
+ bool igp_sideport_enabled;
+ u64 gtt_base_align;
+ u64 mc_mask;
+};
+
+bool radeon_combios_sideport_present(struct radeon_device *rdev);
+bool radeon_atombios_sideport_present(struct radeon_device *rdev);
+
+/*
+ * GPU scratch registers structures, functions & helpers
+ */
+struct radeon_scratch {
+ unsigned num_reg;
+ uint32_t reg_base;
+ bool free[32];
+ uint32_t reg[32];
+};
+
+int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
+void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
+
+
+/*
+ * IRQS.
+ */
+
+struct radeon_unpin_work {
+ struct work_struct work;
+ struct radeon_device *rdev;
+ int crtc_id;
+ struct radeon_fence *fence;
+ struct drm_pending_vblank_event *event;
+ struct radeon_bo *old_rbo;
+ u64 new_crtc_base;
+};
+
+struct r500_irq_stat_regs {
+ u32 disp_int;
+ u32 hdmi0_status;
+};
+
+struct r600_irq_stat_regs {
+ u32 disp_int;
+ u32 disp_int_cont;
+ u32 disp_int_cont2;
+ u32 d1grph_int;
+ u32 d2grph_int;
+ u32 hdmi0_status;
+ u32 hdmi1_status;
+};
+
+struct evergreen_irq_stat_regs {
+ u32 disp_int;
+ u32 disp_int_cont;
+ u32 disp_int_cont2;
+ u32 disp_int_cont3;
+ u32 disp_int_cont4;
+ u32 disp_int_cont5;
+ u32 d1grph_int;
+ u32 d2grph_int;
+ u32 d3grph_int;
+ u32 d4grph_int;
+ u32 d5grph_int;
+ u32 d6grph_int;
+ u32 afmt_status1;
+ u32 afmt_status2;
+ u32 afmt_status3;
+ u32 afmt_status4;
+ u32 afmt_status5;
+ u32 afmt_status6;
+};
+
+union radeon_irq_stat_regs {
+ struct r500_irq_stat_regs r500;
+ struct r600_irq_stat_regs r600;
+ struct evergreen_irq_stat_regs evergreen;
+};
+
+#define RADEON_MAX_HPD_PINS 6
+#define RADEON_MAX_CRTCS 6
+#define RADEON_MAX_AFMT_BLOCKS 6
+
+struct radeon_irq {
+ bool installed;
+ spinlock_t lock;
+ atomic_t ring_int[RADEON_NUM_RINGS];
+ bool crtc_vblank_int[RADEON_MAX_CRTCS];
+ atomic_t pflip[RADEON_MAX_CRTCS];
+ wait_queue_head_t vblank_queue;
+ bool hpd[RADEON_MAX_HPD_PINS];
+ bool afmt[RADEON_MAX_AFMT_BLOCKS];
+ union radeon_irq_stat_regs stat_regs;
+};
+
+int radeon_irq_kms_init(struct radeon_device *rdev);
+void radeon_irq_kms_fini(struct radeon_device *rdev);
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
+void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
+void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
+void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
+void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
+
+/*
+ * CP & rings.
+ */
+
+struct radeon_ib {
+ struct radeon_sa_bo *sa_bo;
+ uint32_t length_dw;
+ uint64_t gpu_addr;
+ uint32_t *ptr;
+ int ring;
+ struct radeon_fence *fence;
+ struct radeon_vm *vm;
+ bool is_const_ib;
+ struct radeon_fence *sync_to[RADEON_NUM_RINGS];
+ struct radeon_semaphore *semaphore;
+};
+
+struct radeon_ring {
+ struct radeon_bo *ring_obj;
+ volatile uint32_t *ring;
+ unsigned rptr;
+ unsigned rptr_offs;
+ unsigned rptr_reg;
+ unsigned rptr_save_reg;
+ u64 next_rptr_gpu_addr;
+ volatile u32 *next_rptr_cpu_addr;
+ unsigned wptr;
+ unsigned wptr_old;
+ unsigned wptr_reg;
+ unsigned ring_size;
+ unsigned ring_free_dw;
+ int count_dw;
+ unsigned long last_activity;
+ unsigned last_rptr;
+ uint64_t gpu_addr;
+ uint32_t align_mask;
+ uint32_t ptr_mask;
+ bool ready;
+ u32 ptr_reg_shift;
+ u32 ptr_reg_mask;
+ u32 nop;
+ u32 idx;
+ u64 last_semaphore_signal_addr;
+ u64 last_semaphore_wait_addr;
+};
+
+/*
+ * VM
+ */
+
+/* maximum number of VMIDs */
+#define RADEON_NUM_VM 16
+
+/* defines number of bits in page table versus page directory,
+ * a page is 4KB so we have 12 bits offset, 9 bits in the page
+ * table and the remaining 19 bits are in the page directory */
+#define RADEON_VM_BLOCK_SIZE 9
+
+/* number of entries in page table */
+#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
+
+struct radeon_vm {
+ struct list_head list;
+ struct list_head va;
+ unsigned id;
+
+ /* contains the page directory */
+ struct radeon_sa_bo *page_directory;
+ uint64_t pd_gpu_addr;
+
+ /* array of page tables, one for each page directory entry */
+ struct radeon_sa_bo **page_tables;
+
+ struct mutex mutex;
+ /* last fence for cs using this vm */
+ struct radeon_fence *fence;
+ /* last flush or NULL if we still need to flush */
+ struct radeon_fence *last_flush;
+};
+
+struct radeon_vm_manager {
+ struct mutex lock;
+ struct list_head lru_vm;
+ struct radeon_fence *active[RADEON_NUM_VM];
+ struct radeon_sa_manager sa_manager;
+ uint32_t max_pfn;
+ /* number of VMIDs */
+ unsigned nvm;
+ /* vram base address for page table entry */
+ u64 vram_base_offset;
+ /* is vm enabled? */
+ bool enabled;
+};
+
+/*
+ * file private structure
+ */
+struct radeon_fpriv {
+ struct radeon_vm vm;
+};
+
+/*
+ * R6xx+ IH ring
+ */
+struct r600_ih {
+ struct radeon_bo *ring_obj;
+ volatile uint32_t *ring;
+ unsigned rptr;
+ unsigned ring_size;
+ uint64_t gpu_addr;
+ uint32_t ptr_mask;
+ atomic_t lock;
+ bool enabled;
+};
+
+struct r600_blit_cp_primitives {
+ void (*set_render_target)(struct radeon_device *rdev, int format,
+ int w, int h, u64 gpu_addr);
+ void (*cp_set_surface_sync)(struct radeon_device *rdev,
+ u32 sync_type, u32 size,
+ u64 mc_addr);
+ void (*set_shaders)(struct radeon_device *rdev);
+ void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
+ void (*set_tex_resource)(struct radeon_device *rdev,
+ int format, int w, int h, int pitch,
+ u64 gpu_addr, u32 size);
+ void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
+ int x2, int y2);
+ void (*draw_auto)(struct radeon_device *rdev);
+ void (*set_default_state)(struct radeon_device *rdev);
+};
+
+struct r600_blit {
+ struct radeon_bo *shader_obj;
+ struct r600_blit_cp_primitives primitives;
+ int max_dim;
+ int ring_size_common;
+ int ring_size_per_loop;
+ u64 shader_gpu_addr;
+ u32 vs_offset, ps_offset;
+ u32 state_offset;
+ u32 state_len;
+};
+
+/*
+ * SI RLC stuff
+ */
+struct si_rlc {
+ /* for power gating */
+ struct radeon_bo *save_restore_obj;
+ uint64_t save_restore_gpu_addr;
+ /* for clear state */
+ struct radeon_bo *clear_state_obj;
+ uint64_t clear_state_gpu_addr;
+};
+
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+ struct radeon_ib *ib, struct radeon_vm *vm,
+ unsigned size);
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
+void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
+ struct radeon_ib *const_ib);
+int radeon_ib_pool_init(struct radeon_device *rdev);
+void radeon_ib_pool_fini(struct radeon_device *rdev);
+int radeon_ib_ring_tests(struct radeon_device *rdev);
+/* Ring access between begin & end cannot sleep */
+bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_undo(struct radeon_ring *ring);
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
+void radeon_ring_lockup_update(struct radeon_ring *ring);
+bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
+ uint32_t **data);
+int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned size, uint32_t *data);
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
+ unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+ u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
+
+
+/* r600 async dma */
+void r600_dma_stop(struct radeon_device *rdev);
+int r600_dma_resume(struct radeon_device *rdev);
+void r600_dma_fini(struct radeon_device *rdev);
+
+void cayman_dma_stop(struct radeon_device *rdev);
+int cayman_dma_resume(struct radeon_device *rdev);
+void cayman_dma_fini(struct radeon_device *rdev);
+
+/*
+ * CS.
+ */
+struct radeon_cs_reloc {
+ struct drm_gem_object *gobj;
+ struct radeon_bo *robj;
+ struct radeon_bo_list lobj;
+ uint32_t handle;
+ uint32_t flags;
+};
+
+struct radeon_cs_chunk {
+ uint32_t chunk_id;
+ uint32_t length_dw;
+ int kpage_idx[2];
+ uint32_t *kpage[2];
+ uint32_t *kdata;
+ void __user *user_ptr;
+ int last_copied_page;
+ int last_page_index;
+};
+
+struct radeon_cs_parser {
+ struct device *dev;
+ struct radeon_device *rdev;
+ struct drm_file *filp;
+ /* chunks */
+ unsigned nchunks;
+ struct radeon_cs_chunk *chunks;
+ uint64_t *chunks_array;
+ /* IB */
+ unsigned idx;
+ /* relocations */
+ unsigned nrelocs;
+ struct radeon_cs_reloc *relocs;
+ struct radeon_cs_reloc **relocs_ptr;
+ struct list_head validated;
+ unsigned dma_reloc_idx;
+ /* indices of various chunks */
+ int chunk_ib_idx;
+ int chunk_relocs_idx;
+ int chunk_flags_idx;
+ int chunk_const_ib_idx;
+ struct radeon_ib ib;
+ struct radeon_ib const_ib;
+ void *track;
+ unsigned family;
+ int parser_error;
+ u32 cs_flags;
+ u32 ring;
+ s32 priority;
+};
+
+extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
+extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
+
+struct radeon_cs_packet {
+ unsigned idx;
+ unsigned type;
+ unsigned reg;
+ unsigned opcode;
+ int count;
+ unsigned one_reg_wr;
+};
+
+typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx, unsigned reg);
+typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt);
+
+
+/*
+ * AGP
+ */
+int radeon_agp_init(struct radeon_device *rdev);
+void radeon_agp_resume(struct radeon_device *rdev);
+void radeon_agp_suspend(struct radeon_device *rdev);
+void radeon_agp_fini(struct radeon_device *rdev);
+
+
+/*
+ * Writeback
+ */
+struct radeon_wb {
+ struct radeon_bo *wb_obj;
+ volatile uint32_t *wb;
+ uint64_t gpu_addr;
+ bool enabled;
+ bool use_event;
+};
+
+#define RADEON_WB_SCRATCH_OFFSET 0
+#define RADEON_WB_RING0_NEXT_RPTR 256
+#define RADEON_WB_CP_RPTR_OFFSET 1024
+#define RADEON_WB_CP1_RPTR_OFFSET 1280
+#define RADEON_WB_CP2_RPTR_OFFSET 1536
+#define R600_WB_DMA_RPTR_OFFSET 1792
+#define R600_WB_IH_WPTR_OFFSET 2048
+#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
+#define R600_WB_UVD_RPTR_OFFSET 2560
+#define R600_WB_EVENT_OFFSET 3072
+
+/**
+ * struct radeon_pm - power management datas
+ * @max_bandwidth: maximum bandwidth the gpu has (MByte/s)
+ * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880)
+ * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880)
+ * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880)
+ * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880)
+ * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP)
+ * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
+ * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP)
+ * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP)
+ * @sclk: GPU clock Mhz (core bandwidth depends of this clock)
+ * @needed_bandwidth: current bandwidth needs
+ *
+ * It keeps track of various data needed to take powermanagement decision.
+ * Bandwidth need is used to determine minimun clock of the GPU and memory.
+ * Equation between gpu/memory clock and available bandwidth is hw dependent
+ * (type of memory, bus size, efficiency, ...)
+ */
+
+enum radeon_pm_method {
+ PM_METHOD_PROFILE,
+ PM_METHOD_DYNPM,
+};
+
+enum radeon_dynpm_state {
+ DYNPM_STATE_DISABLED,
+ DYNPM_STATE_MINIMUM,
+ DYNPM_STATE_PAUSED,
+ DYNPM_STATE_ACTIVE,
+ DYNPM_STATE_SUSPENDED,
+};
+enum radeon_dynpm_action {
+ DYNPM_ACTION_NONE,
+ DYNPM_ACTION_MINIMUM,
+ DYNPM_ACTION_DOWNCLOCK,
+ DYNPM_ACTION_UPCLOCK,
+ DYNPM_ACTION_DEFAULT
+};
+
+enum radeon_voltage_type {
+ VOLTAGE_NONE = 0,
+ VOLTAGE_GPIO,
+ VOLTAGE_VDDC,
+ VOLTAGE_SW
+};
+
+enum radeon_pm_state_type {
+ POWER_STATE_TYPE_DEFAULT,
+ POWER_STATE_TYPE_POWERSAVE,
+ POWER_STATE_TYPE_BATTERY,
+ POWER_STATE_TYPE_BALANCED,
+ POWER_STATE_TYPE_PERFORMANCE,
+};
+
+enum radeon_pm_profile_type {
+ PM_PROFILE_DEFAULT,
+ PM_PROFILE_AUTO,
+ PM_PROFILE_LOW,
+ PM_PROFILE_MID,
+ PM_PROFILE_HIGH,
+};
+
+#define PM_PROFILE_DEFAULT_IDX 0
+#define PM_PROFILE_LOW_SH_IDX 1
+#define PM_PROFILE_MID_SH_IDX 2
+#define PM_PROFILE_HIGH_SH_IDX 3
+#define PM_PROFILE_LOW_MH_IDX 4
+#define PM_PROFILE_MID_MH_IDX 5
+#define PM_PROFILE_HIGH_MH_IDX 6
+#define PM_PROFILE_MAX 7
+
+struct radeon_pm_profile {
+ int dpms_off_ps_idx;
+ int dpms_on_ps_idx;
+ int dpms_off_cm_idx;
+ int dpms_on_cm_idx;
+};
+
+enum radeon_int_thermal_type {
+ THERMAL_TYPE_NONE,
+ THERMAL_TYPE_RV6XX,
+ THERMAL_TYPE_RV770,
+ THERMAL_TYPE_EVERGREEN,
+ THERMAL_TYPE_SUMO,
+ THERMAL_TYPE_NI,
+ THERMAL_TYPE_SI,
+};
+
+struct radeon_voltage {
+ enum radeon_voltage_type type;
+ /* gpio voltage */
+ struct radeon_gpio_rec gpio;
+ u32 delay; /* delay in usec from voltage drop to sclk change */
+ bool active_high; /* voltage drop is active when bit is high */
+ /* VDDC voltage */
+ u8 vddc_id; /* index into vddc voltage table */
+ u8 vddci_id; /* index into vddci voltage table */
+ bool vddci_enabled;
+ /* r6xx+ sw */
+ u16 voltage;
+ /* evergreen+ vddci */
+ u16 vddci;
+};
+
+/* clock mode flags */
+#define RADEON_PM_MODE_NO_DISPLAY (1 << 0)
+
+struct radeon_pm_clock_info {
+ /* memory clock */
+ u32 mclk;
+ /* engine clock */
+ u32 sclk;
+ /* voltage info */
+ struct radeon_voltage voltage;
+ /* standardized clock flags */
+ u32 flags;
+};
+
+/* state flags */
+#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
+
+struct radeon_power_state {
+ enum radeon_pm_state_type type;
+ struct radeon_pm_clock_info *clock_info;
+ /* number of valid clock modes in this power state */
+ int num_clock_modes;
+ struct radeon_pm_clock_info *default_clock_mode;
+ /* standardized state flags */
+ u32 flags;
+ u32 misc; /* vbios specific flags */
+ u32 misc2; /* vbios specific flags */
+ int pcie_lanes; /* pcie lanes */
+};
+
+/*
+ * Some modes are overclocked by very low value, accept them
+ */
+#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
+
+struct radeon_pm {
+ struct mutex mutex;
+ /* write locked while reprogramming mclk */
+ struct rw_semaphore mclk_lock;
+ u32 active_crtcs;
+ int active_crtc_count;
+ int req_vblank;
+ bool vblank_sync;
+ fixed20_12 max_bandwidth;
+ fixed20_12 igp_sideport_mclk;
+ fixed20_12 igp_system_mclk;
+ fixed20_12 igp_ht_link_clk;
+ fixed20_12 igp_ht_link_width;
+ fixed20_12 k8_bandwidth;
+ fixed20_12 sideport_bandwidth;
+ fixed20_12 ht_bandwidth;
+ fixed20_12 core_bandwidth;
+ fixed20_12 sclk;
+ fixed20_12 mclk;
+ fixed20_12 needed_bandwidth;
+ struct radeon_power_state *power_state;
+ /* number of valid power states */
+ int num_power_states;
+ int current_power_state_index;
+ int current_clock_mode_index;
+ int requested_power_state_index;
+ int requested_clock_mode_index;
+ int default_power_state_index;
+ u32 current_sclk;
+ u32 current_mclk;
+ u16 current_vddc;
+ u16 current_vddci;
+ u32 default_sclk;
+ u32 default_mclk;
+ u16 default_vddc;
+ u16 default_vddci;
+ struct radeon_i2c_chan *i2c_bus;
+ /* selected pm method */
+ enum radeon_pm_method pm_method;
+ /* dynpm power management */
+ struct delayed_work dynpm_idle_work;
+ enum radeon_dynpm_state dynpm_state;
+ enum radeon_dynpm_action dynpm_planned_action;
+ unsigned long dynpm_action_timeout;
+ bool dynpm_can_upclock;
+ bool dynpm_can_downclock;
+ /* profile-based power management */
+ enum radeon_pm_profile_type profile;
+ int profile_index;
+ struct radeon_pm_profile profiles[PM_PROFILE_MAX];
+ /* internal thermal controller on rv6xx+ */
+ enum radeon_int_thermal_type int_thermal_type;
+ struct device *int_hwmon_dev;
+};
+
+int radeon_pm_get_type_index(struct radeon_device *rdev,
+ enum radeon_pm_state_type ps_type,
+ int instance);
+/*
+ * UVD
+ */
+#define RADEON_MAX_UVD_HANDLES 10
+#define RADEON_UVD_STACK_SIZE (1024*1024)
+#define RADEON_UVD_HEAP_SIZE (1024*1024)
+
+struct radeon_uvd {
+ struct radeon_bo *vcpu_bo;
+ void *cpu_addr;
+ uint64_t gpu_addr;
+ void *saved_bo;
+ atomic_t handles[RADEON_MAX_UVD_HANDLES];
+ struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
+ struct delayed_work idle_work;
+};
+
+int radeon_uvd_init(struct radeon_device *rdev);
+void radeon_uvd_fini(struct radeon_device *rdev);
+int radeon_uvd_suspend(struct radeon_device *rdev);
+int radeon_uvd_resume(struct radeon_device *rdev);
+int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
+ uint32_t handle, struct radeon_fence **fence);
+int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
+ uint32_t handle, struct radeon_fence **fence);
+void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo);
+void radeon_uvd_free_handles(struct radeon_device *rdev,
+ struct drm_file *filp);
+int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
+void radeon_uvd_note_usage(struct radeon_device *rdev);
+int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
+ unsigned vclk, unsigned dclk,
+ unsigned vco_min, unsigned vco_max,
+ unsigned fb_factor, unsigned fb_mask,
+ unsigned pd_min, unsigned pd_max,
+ unsigned pd_even,
+ unsigned *optimal_fb_div,
+ unsigned *optimal_vclk_div,
+ unsigned *optimal_dclk_div);
+int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
+ unsigned cg_upll_func_cntl);
+
+struct r600_audio {
+ int channels;
+ int rate;
+ int bits_per_sample;
+ u8 status_bits;
+ u8 category_code;
+};
+
+/*
+ * Benchmarking
+ */
+void radeon_benchmark(struct radeon_device *rdev, int test_number);
+
+
+/*
+ * Testing
+ */
+void radeon_test_moves(struct radeon_device *rdev);
+void radeon_test_ring_sync(struct radeon_device *rdev,
+ struct radeon_ring *cpA,
+ struct radeon_ring *cpB);
+void radeon_test_syncing(struct radeon_device *rdev);
+
+
+/*
+ * Debugfs
+ */
+struct radeon_debugfs {
+ struct drm_info_list *files;
+ unsigned num_files;
+};
+
+int radeon_debugfs_add_files(struct radeon_device *rdev,
+ struct drm_info_list *files,
+ unsigned nfiles);
+int radeon_debugfs_fence_init(struct radeon_device *rdev);
+
+
+/*
+ * ASIC specific functions.
+ */
+struct radeon_asic {
+ int (*init)(struct radeon_device *rdev);
+ void (*fini)(struct radeon_device *rdev);
+ int (*resume)(struct radeon_device *rdev);
+ int (*suspend)(struct radeon_device *rdev);
+ void (*vga_set_state)(struct radeon_device *rdev, bool state);
+ int (*asic_reset)(struct radeon_device *rdev);
+ /* ioctl hw specific callback. Some hw might want to perform special
+ * operation on specific ioctl. For instance on wait idle some hw
+ * might want to perform and HDP flush through MMIO as it seems that
+ * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
+ * through ring.
+ */
+ void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
+ /* check if 3D engine is idle */
+ bool (*gui_idle)(struct radeon_device *rdev);
+ /* wait for mc_idle */
+ int (*mc_wait_for_idle)(struct radeon_device *rdev);
+ /* get the reference clock */
+ u32 (*get_xclk)(struct radeon_device *rdev);
+ /* get the gpu clock counter */
+ uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
+ /* gart */
+ struct {
+ void (*tlb_flush)(struct radeon_device *rdev);
+ int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
+ } gart;
+ struct {
+ int (*init)(struct radeon_device *rdev);
+ void (*fini)(struct radeon_device *rdev);
+
+ u32 pt_ring_index;
+ void (*set_page)(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+ } vm;
+ /* ring specific callbacks */
+ struct {
+ void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+ int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
+ void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+ void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+ struct radeon_semaphore *semaphore, bool emit_wait);
+ int (*cs_parse)(struct radeon_cs_parser *p);
+ void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
+ int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+ int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+ bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
+ void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+ } ring[RADEON_NUM_RINGS];
+ /* irqs */
+ struct {
+ int (*set)(struct radeon_device *rdev);
+ int (*process)(struct radeon_device *rdev);
+ } irq;
+ /* displays */
+ struct {
+ /* display watermarks */
+ void (*bandwidth_update)(struct radeon_device *rdev);
+ /* get frame count */
+ u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
+ /* wait for vblank */
+ void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
+ /* set backlight level */
+ void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
+ /* get backlight level */
+ u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
+ /* audio callbacks */
+ void (*hdmi_enable)(struct drm_encoder *encoder, bool enable);
+ void (*hdmi_setmode)(struct drm_encoder *encoder, struct drm_display_mode *mode);
+ } display;
+ /* copy functions for bo handling */
+ struct {
+ int (*blit)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
+ u32 blit_ring_index;
+ int (*dma)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
+ u32 dma_ring_index;
+ /* method used for bo copy */
+ int (*copy)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
+ /* ring used for bo copies */
+ u32 copy_ring_index;
+ } copy;
+ /* surfaces */
+ struct {
+ int (*set_reg)(struct radeon_device *rdev, int reg,
+ uint32_t tiling_flags, uint32_t pitch,
+ uint32_t offset, uint32_t obj_size);
+ void (*clear_reg)(struct radeon_device *rdev, int reg);
+ } surface;
+ /* hotplug detect */
+ struct {
+ void (*init)(struct radeon_device *rdev);
+ void (*fini)(struct radeon_device *rdev);
+ bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+ void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+ } hpd;
+ /* power management */
+ struct {
+ void (*misc)(struct radeon_device *rdev);
+ void (*prepare)(struct radeon_device *rdev);
+ void (*finish)(struct radeon_device *rdev);
+ void (*init_profile)(struct radeon_device *rdev);
+ void (*get_dynpm_state)(struct radeon_device *rdev);
+ uint32_t (*get_engine_clock)(struct radeon_device *rdev);
+ void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
+ uint32_t (*get_memory_clock)(struct radeon_device *rdev);
+ void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
+ int (*get_pcie_lanes)(struct radeon_device *rdev);
+ void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
+ void (*set_clock_gating)(struct radeon_device *rdev, int enable);
+ int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
+ } pm;
+ /* pageflipping */
+ struct {
+ void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
+ u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
+ void (*post_page_flip)(struct radeon_device *rdev, int crtc);
+ } pflip;
+};
+
+/*
+ * Asic structures
+ */
+struct r100_asic {
+ const unsigned *reg_safe_bm;
+ unsigned reg_safe_bm_size;
+ u32 hdp_cntl;
+};
+
+struct r300_asic {
+ const unsigned *reg_safe_bm;
+ unsigned reg_safe_bm_size;
+ u32 resync_scratch;
+ u32 hdp_cntl;
+};
+
+struct r600_asic {
+ unsigned max_pipes;
+ unsigned max_tile_pipes;
+ unsigned max_simds;
+ unsigned max_backends;
+ unsigned max_gprs;
+ unsigned max_threads;
+ unsigned max_stack_entries;
+ unsigned max_hw_contexts;
+ unsigned max_gs_threads;
+ unsigned sx_max_export_size;
+ unsigned sx_max_export_pos_size;
+ unsigned sx_max_export_smx_size;
+ unsigned sq_num_cf_insts;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
+ unsigned tile_config;
+ unsigned backend_map;
+};
+
+struct rv770_asic {
+ unsigned max_pipes;
+ unsigned max_tile_pipes;
+ unsigned max_simds;
+ unsigned max_backends;
+ unsigned max_gprs;
+ unsigned max_threads;
+ unsigned max_stack_entries;
+ unsigned max_hw_contexts;
+ unsigned max_gs_threads;
+ unsigned sx_max_export_size;
+ unsigned sx_max_export_pos_size;
+ unsigned sx_max_export_smx_size;
+ unsigned sq_num_cf_insts;
+ unsigned sx_num_of_sets;
+ unsigned sc_prim_fifo_size;
+ unsigned sc_hiz_tile_fifo_size;
+ unsigned sc_earlyz_tile_fifo_fize;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
+ unsigned tile_config;
+ unsigned backend_map;
+};
+
+struct evergreen_asic {
+ unsigned num_ses;
+ unsigned max_pipes;
+ unsigned max_tile_pipes;
+ unsigned max_simds;
+ unsigned max_backends;
+ unsigned max_gprs;
+ unsigned max_threads;
+ unsigned max_stack_entries;
+ unsigned max_hw_contexts;
+ unsigned max_gs_threads;
+ unsigned sx_max_export_size;
+ unsigned sx_max_export_pos_size;
+ unsigned sx_max_export_smx_size;
+ unsigned sq_num_cf_insts;
+ unsigned sx_num_of_sets;
+ unsigned sc_prim_fifo_size;
+ unsigned sc_hiz_tile_fifo_size;
+ unsigned sc_earlyz_tile_fifo_size;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
+ unsigned tile_config;
+ unsigned backend_map;
+};
+
+struct cayman_asic {
+ unsigned max_shader_engines;
+ unsigned max_pipes_per_simd;
+ unsigned max_tile_pipes;
+ unsigned max_simds_per_se;
+ unsigned max_backends_per_se;
+ unsigned max_texture_channel_caches;
+ unsigned max_gprs;
+ unsigned max_threads;
+ unsigned max_gs_threads;
+ unsigned max_stack_entries;
+ unsigned sx_num_of_sets;
+ unsigned sx_max_export_size;
+ unsigned sx_max_export_pos_size;
+ unsigned sx_max_export_smx_size;
+ unsigned max_hw_contexts;
+ unsigned sq_num_cf_insts;
+ unsigned sc_prim_fifo_size;
+ unsigned sc_hiz_tile_fifo_size;
+ unsigned sc_earlyz_tile_fifo_size;
+
+ unsigned num_shader_engines;
+ unsigned num_shader_pipes_per_simd;
+ unsigned num_tile_pipes;
+ unsigned num_simds_per_se;
+ unsigned num_backends_per_se;
+ unsigned backend_disable_mask_per_asic;
+ unsigned backend_map;
+ unsigned num_texture_channel_caches;
+ unsigned mem_max_burst_length_bytes;
+ unsigned mem_row_size_in_kb;
+ unsigned shader_engine_tile_size;
+ unsigned num_gpus;
+ unsigned multi_gpu_tile_size;
+
+ unsigned tile_config;
+};
+
+struct si_asic {
+ unsigned max_shader_engines;
+ unsigned max_tile_pipes;
+ unsigned max_cu_per_sh;
+ unsigned max_sh_per_se;
+ unsigned max_backends_per_se;
+ unsigned max_texture_channel_caches;
+ unsigned max_gprs;
+ unsigned max_gs_threads;
+ unsigned max_hw_contexts;
+ unsigned sc_prim_fifo_size_frontend;
+ unsigned sc_prim_fifo_size_backend;
+ unsigned sc_hiz_tile_fifo_size;
+ unsigned sc_earlyz_tile_fifo_size;
+
+ unsigned num_tile_pipes;
+ unsigned num_backends_per_se;
+ unsigned backend_disable_mask_per_asic;
+ unsigned backend_map;
+ unsigned num_texture_channel_caches;
+ unsigned mem_max_burst_length_bytes;
+ unsigned mem_row_size_in_kb;
+ unsigned shader_engine_tile_size;
+ unsigned num_gpus;
+ unsigned multi_gpu_tile_size;
+
+ unsigned tile_config;
+ uint32_t tile_mode_array[32];
+};
+
+union radeon_asic_config {
+ struct r300_asic r300;
+ struct r100_asic r100;
+ struct r600_asic r600;
+ struct rv770_asic rv770;
+ struct evergreen_asic evergreen;
+ struct cayman_asic cayman;
+ struct si_asic si;
+};
+
+/*
+ * asic initizalization from radeon_asic.c
+ */
+void radeon_agp_disable(struct radeon_device *rdev);
+int radeon_asic_init(struct radeon_device *rdev);
+
+
+/*
+ * IOCTL.
+ */
+int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+/* VRAM scratch page for HDP bug, default vram page */
+struct r600_vram_scratch {
+ struct radeon_bo *robj;
+ volatile uint32_t *ptr;
+ u64 gpu_addr;
+};
+
+/*
+ * ACPI
+ */
+struct radeon_atif_notification_cfg {
+ bool enabled;
+ int command_code;
+};
+
+struct radeon_atif_notifications {
+ bool display_switch;
+ bool expansion_mode_change;
+ bool thermal_state;
+ bool forced_power_state;
+ bool system_power_state;
+ bool display_conf_change;
+ bool px_gfx_switch;
+ bool brightness_change;
+ bool dgpu_display_event;
+};
+
+struct radeon_atif_functions {
+ bool system_params;
+ bool sbios_requests;
+ bool select_active_disp;
+ bool lid_state;
+ bool get_tv_standard;
+ bool set_tv_standard;
+ bool get_panel_expansion_mode;
+ bool set_panel_expansion_mode;
+ bool temperature_change;
+ bool graphics_device_types;
+};
+
+struct radeon_atif {
+ struct radeon_atif_notifications notifications;
+ struct radeon_atif_functions functions;
+ struct radeon_atif_notification_cfg notification_cfg;
+ struct radeon_encoder *encoder_for_bl;
+};
+
+struct radeon_atcs_functions {
+ bool get_ext_state;
+ bool pcie_perf_req;
+ bool pcie_dev_rdy;
+ bool pcie_bus_width;
+};
+
+struct radeon_atcs {
+ struct radeon_atcs_functions functions;
+};
+
+/*
+ * Core structure, functions and helpers.
+ */
+typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
+typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
+
+struct radeon_device {
+ struct device *dev;
+ struct drm_device *ddev;
+ struct pci_dev *pdev;
+ struct rw_semaphore exclusive_lock;
+ /* ASIC */
+ union radeon_asic_config config;
+ enum radeon_family family;
+ unsigned long flags;
+ int usec_timeout;
+ enum radeon_pll_errata pll_errata;
+ int num_gb_pipes;
+ int num_z_pipes;
+ int disp_priority;
+ /* BIOS */
+ uint8_t *bios;
+ bool is_atom_bios;
+ uint16_t bios_header_start;
+ struct radeon_bo *stollen_vga_memory;
+ /* Register mmio */
+ resource_size_t rmmio_base;
+ resource_size_t rmmio_size;
+ /* protects concurrent MM_INDEX/DATA based register access */
+ spinlock_t mmio_idx_lock;
+ void __iomem *rmmio;
+ radeon_rreg_t mc_rreg;
+ radeon_wreg_t mc_wreg;
+ radeon_rreg_t pll_rreg;
+ radeon_wreg_t pll_wreg;
+ uint32_t pcie_reg_mask;
+ radeon_rreg_t pciep_rreg;
+ radeon_wreg_t pciep_wreg;
+ /* io port */
+ void __iomem *rio_mem;
+ resource_size_t rio_mem_size;
+ struct radeon_clock clock;
+ struct radeon_mc mc;
+ struct radeon_gart gart;
+ struct radeon_mode_info mode_info;
+ struct radeon_scratch scratch;
+ struct radeon_mman mman;
+ struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
+ wait_queue_head_t fence_queue;
+ struct mutex ring_lock;
+ struct radeon_ring ring[RADEON_NUM_RINGS];
+ bool ib_pool_ready;
+ struct radeon_sa_manager ring_tmp_bo;
+ struct radeon_irq irq;
+ struct radeon_asic *asic;
+ struct radeon_gem gem;
+ struct radeon_pm pm;
+ struct radeon_uvd uvd;
+ uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
+ struct radeon_wb wb;
+ struct radeon_dummy_page dummy_page;
+ bool shutdown;
+ bool suspend;
+ bool need_dma32;
+ bool accel_working;
+ bool fastfb_working; /* IGP feature*/
+ struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
+ const struct firmware *me_fw; /* all family ME firmware */
+ const struct firmware *pfp_fw; /* r6/700 PFP firmware */
+ const struct firmware *rlc_fw; /* r6/700 RLC firmware */
+ const struct firmware *mc_fw; /* NI MC firmware */
+ const struct firmware *ce_fw; /* SI CE firmware */
+ const struct firmware *uvd_fw; /* UVD firmware */
+ struct r600_blit r600_blit;
+ struct r600_vram_scratch vram_scratch;
+ int msi_enabled; /* msi enabled */
+ struct r600_ih ih; /* r6/700 interrupt ring */
+ struct si_rlc rlc;
+ struct work_struct hotplug_work;
+ struct work_struct audio_work;
+ int num_crtc; /* number of crtcs */
+ struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
+ bool audio_enabled;
+ bool has_uvd;
+ struct r600_audio audio_status; /* audio stuff */
+ struct notifier_block acpi_nb;
+ /* only one userspace can use Hyperz features or CMASK at a time */
+ struct drm_file *hyperz_filp;
+ struct drm_file *cmask_filp;
+ /* i2c buses */
+ struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
+ /* debugfs */
+ struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
+ unsigned debugfs_count;
+ /* virtual memory */
+ struct radeon_vm_manager vm_manager;
+ struct mutex gpu_clock_mutex;
+ /* ACPI interface */
+ struct radeon_atif atif;
+ struct radeon_atcs atcs;
+};
+
+int radeon_device_init(struct radeon_device *rdev,
+ struct drm_device *ddev,
+ struct pci_dev *pdev,
+ uint32_t flags);
+void radeon_device_fini(struct radeon_device *rdev);
+int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
+
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+ bool always_indirect);
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+ bool always_indirect);
+u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
+void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+
+/*
+ * Cast helper
+ */
+#define to_radeon_fence(p) ((struct radeon_fence *)(p))
+
+/*
+ * Registers read & write functions.
+ */
+#define RREG8(reg) readb((rdev->rmmio) + (reg))
+#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
+#define RREG16(reg) readw((rdev->rmmio) + (reg))
+#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
+#define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
+#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg), false))
+#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false)
+#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true)
+#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
+#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
+#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
+#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
+#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
+#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
+#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
+#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
+#define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg))
+#define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
+#define WREG32_P(reg, val, mask) \
+ do { \
+ uint32_t tmp_ = RREG32(reg); \
+ tmp_ &= (mask); \
+ tmp_ |= ((val) & ~(mask)); \
+ WREG32(reg, tmp_); \
+ } while (0)
+#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
+#define WREG32_PLL_P(reg, val, mask) \
+ do { \
+ uint32_t tmp_ = RREG32_PLL(reg); \
+ tmp_ &= (mask); \
+ tmp_ |= ((val) & ~(mask)); \
+ WREG32_PLL(reg, tmp_); \
+ } while (0)
+#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
+#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
+#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
+
+/*
+ * Indirect registers accessor
+ */
+static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t r;
+
+ WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
+ r = RREG32(RADEON_PCIE_DATA);
+ return r;
+}
+
+static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
+ WREG32(RADEON_PCIE_DATA, (v));
+}
+
+void r100_pll_errata_after_index(struct radeon_device *rdev);
+
+
+/*
+ * ASICs helpers.
+ */
+#define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \
+ (rdev->pdev->device == 0x5969))
+#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
+ (rdev->family == CHIP_RV200) || \
+ (rdev->family == CHIP_RS100) || \
+ (rdev->family == CHIP_RS200) || \
+ (rdev->family == CHIP_RV250) || \
+ (rdev->family == CHIP_RV280) || \
+ (rdev->family == CHIP_RS300))
+#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300) || \
+ (rdev->family == CHIP_RV350) || \
+ (rdev->family == CHIP_R350) || \
+ (rdev->family == CHIP_RV380) || \
+ (rdev->family == CHIP_R420) || \
+ (rdev->family == CHIP_R423) || \
+ (rdev->family == CHIP_RV410) || \
+ (rdev->family == CHIP_RS400) || \
+ (rdev->family == CHIP_RS480))
+#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
+ (rdev->ddev->pdev->device == 0x9443) || \
+ (rdev->ddev->pdev->device == 0x944B) || \
+ (rdev->ddev->pdev->device == 0x9506) || \
+ (rdev->ddev->pdev->device == 0x9509) || \
+ (rdev->ddev->pdev->device == 0x950F) || \
+ (rdev->ddev->pdev->device == 0x689C) || \
+ (rdev->ddev->pdev->device == 0x689D))
+#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
+#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
+ (rdev->family == CHIP_RS690) || \
+ (rdev->family == CHIP_RS740) || \
+ (rdev->family >= CHIP_R600))
+#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
+#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
+#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
+#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
+ (rdev->flags & RADEON_IS_IGP))
+#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
+#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
+#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
+ (rdev->flags & RADEON_IS_IGP))
+#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
+#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN))
+
+/*
+ * BIOS helpers.
+ */
+#define RBIOS8(i) (rdev->bios[i])
+#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
+#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
+
+int radeon_combios_init(struct radeon_device *rdev);
+void radeon_combios_fini(struct radeon_device *rdev);
+int radeon_atombios_init(struct radeon_device *rdev);
+void radeon_atombios_fini(struct radeon_device *rdev);
+
+
+/*
+ * RING helpers.
+ */
+#if DRM_DEBUG_CODE == 0
+static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
+{
+ ring->ring[ring->wptr++] = v;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw--;
+ ring->ring_free_dw--;
+}
+#else
+/* With debugging this is just too big to inline */
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
+#endif
+
+/*
+ * ASICs macro.
+ */
+#define radeon_init(rdev) (rdev)->asic->init((rdev))
+#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
+#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
+#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
+#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
+#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
+#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
+#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
+#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
+#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
+#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
+#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
+#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
+#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
+#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
+#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
+#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
+#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
+#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
+#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
+#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
+#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
+#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
+#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
+#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
+#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
+#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
+#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
+#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
+#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
+#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
+#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
+#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
+#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev))
+#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
+#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev))
+#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
+#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
+#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
+#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
+#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
+#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
+#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
+#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
+#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev))
+#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev))
+#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h))
+#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h))
+#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
+#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev))
+#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev))
+#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
+#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
+#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
+#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc))
+#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
+#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
+#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
+#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
+#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
+#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
+
+/* Common functions */
+/* AGP */
+extern int radeon_gpu_reset(struct radeon_device *rdev);
+extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
+extern void radeon_agp_disable(struct radeon_device *rdev);
+extern int radeon_modeset_init(struct radeon_device *rdev);
+extern void radeon_modeset_fini(struct radeon_device *rdev);
+extern bool radeon_card_posted(struct radeon_device *rdev);
+extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
+extern void radeon_update_display_priority(struct radeon_device *rdev);
+extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
+extern void radeon_scratch_init(struct radeon_device *rdev);
+extern void radeon_wb_fini(struct radeon_device *rdev);
+extern int radeon_wb_init(struct radeon_device *rdev);
+extern void radeon_wb_disable(struct radeon_device *rdev);
+extern void radeon_surface_init(struct radeon_device *rdev);
+extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
+extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
+extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
+extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
+extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
+extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
+extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
+extern int radeon_resume_kms(struct drm_device *dev);
+extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
+extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
+extern void radeon_program_register_sequence(struct radeon_device *rdev,
+ const u32 *registers,
+ const u32 array_size);
+
+/*
+ * vm
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev);
+void radeon_vm_manager_fini(struct radeon_device *rdev);
+void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
+struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
+ struct radeon_vm *vm, int ring);
+void radeon_vm_fence(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_fence *fence);
+uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ struct ttm_mem_reg *mem);
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+ struct radeon_bo *bo);
+struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
+ struct radeon_bo *bo);
+struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo);
+int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va,
+ uint64_t offset,
+ uint32_t flags);
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va);
+
+/* audio */
+void r600_audio_update_hdmi(struct work_struct *work);
+
+/*
+ * R600 vram scratch functions
+ */
+int r600_vram_scratch_init(struct radeon_device *rdev);
+void r600_vram_scratch_fini(struct radeon_device *rdev);
+
+/*
+ * r600 cs checking helper
+ */
+unsigned r600_mip_minify(unsigned size, unsigned level);
+bool r600_fmt_is_valid_color(u32 format);
+bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
+int r600_fmt_get_blocksize(u32 format);
+int r600_fmt_get_nblocksx(u32 format, u32 w);
+int r600_fmt_get_nblocksy(u32 format, u32 h);
+
+/*
+ * r600 functions used by radeon_encoder.c
+ */
+struct radeon_hdmi_acr {
+ u32 clock;
+
+ int n_32khz;
+ int cts_32khz;
+
+ int n_44_1khz;
+ int cts_44_1khz;
+
+ int n_48khz;
+ int cts_48khz;
+
+};
+
+extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
+
+extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+ u32 tiling_pipe_num,
+ u32 max_rb_num,
+ u32 total_max_rb_num,
+ u32 enabled_rb_mask);
+
+/*
+ * evergreen functions used by radeon_encoder.c
+ */
+
+extern int ni_init_microcode(struct radeon_device *rdev);
+extern int ni_mc_load_microcode(struct radeon_device *rdev);
+
+/* radeon_acpi.c */
+#if defined(CONFIG_ACPI)
+extern int radeon_acpi_init(struct radeon_device *rdev);
+extern void radeon_acpi_fini(struct radeon_device *rdev);
+#else
+static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
+static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
+#endif
+
+int radeon_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx);
+bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
+void radeon_cs_dump_packet(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt);
+int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc,
+ int nomm);
+int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
+ uint32_t *vline_start_end,
+ uint32_t *vline_status);
+
+#include "radeon_object.h"
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
new file mode 100644
index 0000000..196d28d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -0,0 +1,649 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/slab.h>
+#include <linux/power_supply.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/video.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "radeon.h"
+#include "radeon_acpi.h"
+#include "atom.h"
+
+#include <linux/vga_switcheroo.h>
+
+#define ACPI_AC_CLASS "ac_adapter"
+
+extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev);
+
+struct atif_verify_interface {
+ u16 size; /* structure size in bytes (includes size field) */
+ u16 version; /* version */
+ u32 notification_mask; /* supported notifications mask */
+ u32 function_bits; /* supported functions bit vector */
+} __packed;
+
+struct atif_system_params {
+ u16 size; /* structure size in bytes (includes size field) */
+ u32 valid_mask; /* valid flags mask */
+ u32 flags; /* flags */
+ u8 command_code; /* notify command code */
+} __packed;
+
+struct atif_sbios_requests {
+ u16 size; /* structure size in bytes (includes size field) */
+ u32 pending; /* pending sbios requests */
+ u8 panel_exp_mode; /* panel expansion mode */
+ u8 thermal_gfx; /* thermal state: target gfx controller */
+ u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */
+ u8 forced_power_gfx; /* forced power state: target gfx controller */
+ u8 forced_power_state; /* forced power state: state id */
+ u8 system_power_src; /* system power source */
+ u8 backlight_level; /* panel backlight level (0-255) */
+} __packed;
+
+#define ATIF_NOTIFY_MASK 0x3
+#define ATIF_NOTIFY_NONE 0
+#define ATIF_NOTIFY_81 1
+#define ATIF_NOTIFY_N 2
+
+struct atcs_verify_interface {
+ u16 size; /* structure size in bytes (includes size field) */
+ u16 version; /* version */
+ u32 function_bits; /* supported functions bit vector */
+} __packed;
+
+/* Call the ATIF method
+ */
+/**
+ * radeon_atif_call - call an ATIF method
+ *
+ * @handle: acpi handle
+ * @function: the ATIF function to execute
+ * @params: ATIF function params
+ *
+ * Executes the requested ATIF function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static union acpi_object *radeon_atif_call(acpi_handle handle, int function,
+ struct acpi_buffer *params)
+{
+ acpi_status status;
+ union acpi_object atif_arg_elements[2];
+ struct acpi_object_list atif_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ atif_arg.count = 2;
+ atif_arg.pointer = &atif_arg_elements[0];
+
+ atif_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ atif_arg_elements[0].integer.value = function;
+
+ if (params) {
+ atif_arg_elements[1].type = ACPI_TYPE_BUFFER;
+ atif_arg_elements[1].buffer.length = params->length;
+ atif_arg_elements[1].buffer.pointer = params->pointer;
+ } else {
+ /* We need a second fake parameter */
+ atif_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ atif_arg_elements[1].integer.value = 0;
+ }
+
+ status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
+
+ /* Fail only if calling the method fails and ATIF is supported */
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
+ acpi_format_exception(status));
+ kfree(buffer.pointer);
+ return NULL;
+ }
+
+ return buffer.pointer;
+}
+
+/**
+ * radeon_atif_parse_notification - parse supported notifications
+ *
+ * @n: supported notifications struct
+ * @mask: supported notifications mask from ATIF
+ *
+ * Use the supported notifications mask from ATIF function
+ * ATIF_FUNCTION_VERIFY_INTERFACE to determine what notifications
+ * are supported (all asics).
+ */
+static void radeon_atif_parse_notification(struct radeon_atif_notifications *n, u32 mask)
+{
+ n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
+ n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
+ n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
+ n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
+ n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
+ n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
+ n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
+ n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
+ n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
+}
+
+/**
+ * radeon_atif_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATIF
+ *
+ * Use the supported functions mask from ATIF function
+ * ATIF_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void radeon_atif_parse_functions(struct radeon_atif_functions *f, u32 mask)
+{
+ f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
+ f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
+ f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
+ f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
+ f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
+ f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
+ f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
+ f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
+ f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
+ f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
+}
+
+/**
+ * radeon_atif_verify_interface - verify ATIF
+ *
+ * @handle: acpi handle
+ * @atif: radeon atif struct
+ *
+ * Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function
+ * to initialize ATIF and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atif_verify_interface(acpi_handle handle,
+ struct radeon_atif *atif)
+{
+ union acpi_object *info;
+ struct atif_verify_interface output;
+ size_t size;
+ int err = 0;
+
+ info = radeon_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
+ if (!info)
+ return -EIO;
+
+ memset(&output, 0, sizeof(output));
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 12) {
+ DRM_INFO("ATIF buffer is too small: %zu\n", size);
+ err = -EINVAL;
+ goto out;
+ }
+ size = min(sizeof(output), size);
+
+ memcpy(&output, info->buffer.pointer, size);
+
+ /* TODO: check version? */
+ DRM_DEBUG_DRIVER("ATIF version %u\n", output.version);
+
+ radeon_atif_parse_notification(&atif->notifications, output.notification_mask);
+ radeon_atif_parse_functions(&atif->functions, output.function_bits);
+
+out:
+ kfree(info);
+ return err;
+}
+
+/**
+ * radeon_atif_get_notification_params - determine notify configuration
+ *
+ * @handle: acpi handle
+ * @n: atif notification configuration struct
+ *
+ * Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function
+ * to determine if a notifier is used and if so which one
+ * (all asics). This is either Notify(VGA, 0x81) or Notify(VGA, n)
+ * where n is specified in the result if a notifier is used.
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atif_get_notification_params(acpi_handle handle,
+ struct radeon_atif_notification_cfg *n)
+{
+ union acpi_object *info;
+ struct atif_system_params params;
+ size_t size;
+ int err = 0;
+
+ info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
+ if (!info) {
+ err = -EIO;
+ goto out;
+ }
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 10) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ memset(&params, 0, sizeof(params));
+ size = min(sizeof(params), size);
+ memcpy(&params, info->buffer.pointer, size);
+
+ DRM_DEBUG_DRIVER("SYSTEM_PARAMS: mask = %#x, flags = %#x\n",
+ params.flags, params.valid_mask);
+ params.flags = params.flags & params.valid_mask;
+
+ if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_NONE) {
+ n->enabled = false;
+ n->command_code = 0;
+ } else if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_81) {
+ n->enabled = true;
+ n->command_code = 0x81;
+ } else {
+ if (size < 11) {
+ err = -EINVAL;
+ goto out;
+ }
+ n->enabled = true;
+ n->command_code = params.command_code;
+ }
+
+out:
+ DRM_DEBUG_DRIVER("Notification %s, command code = %#x\n",
+ (n->enabled ? "enabled" : "disabled"),
+ n->command_code);
+ kfree(info);
+ return err;
+}
+
+/**
+ * radeon_atif_get_sbios_requests - get requested sbios event
+ *
+ * @handle: acpi handle
+ * @req: atif sbios request struct
+ *
+ * Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function
+ * to determine what requests the sbios is making to the driver
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atif_get_sbios_requests(acpi_handle handle,
+ struct atif_sbios_requests *req)
+{
+ union acpi_object *info;
+ size_t size;
+ int count = 0;
+
+ info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
+ if (!info)
+ return -EIO;
+
+ size = *(u16 *)info->buffer.pointer;
+ if (size < 0xd) {
+ count = -EINVAL;
+ goto out;
+ }
+ memset(req, 0, sizeof(*req));
+
+ size = min(sizeof(*req), size);
+ memcpy(req, info->buffer.pointer, size);
+ DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending);
+
+ count = hweight32(req->pending);
+
+out:
+ kfree(info);
+ return count;
+}
+
+/**
+ * radeon_atif_handler - handle ATIF notify requests
+ *
+ * @rdev: radeon_device pointer
+ * @event: atif sbios request struct
+ *
+ * Checks the acpi event and if it matches an atif event,
+ * handles it.
+ * Returns NOTIFY code
+ */
+int radeon_atif_handler(struct radeon_device *rdev,
+ struct acpi_bus_event *event)
+{
+ struct radeon_atif *atif = &rdev->atif;
+ struct atif_sbios_requests req;
+ acpi_handle handle;
+ int count;
+
+ DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
+ event->device_class, event->type);
+
+ if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
+ return NOTIFY_DONE;
+
+ if (!atif->notification_cfg.enabled ||
+ event->type != atif->notification_cfg.command_code)
+ /* Not our event */
+ return NOTIFY_DONE;
+
+ /* Check pending SBIOS requests */
+ handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ count = radeon_atif_get_sbios_requests(handle, &req);
+
+ if (count <= 0)
+ return NOTIFY_DONE;
+
+ DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
+
+ if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
+ struct radeon_encoder *enc = atif->encoder_for_bl;
+
+ if (enc) {
+ DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+ req.backlight_level);
+
+ radeon_set_backlight_level(rdev, enc, req.backlight_level);
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *dig = enc->enc_priv;
+ backlight_force_update(dig->bl_dev,
+ BACKLIGHT_UPDATE_HOTKEY);
+ } else {
+ struct radeon_encoder_lvds *dig = enc->enc_priv;
+ backlight_force_update(dig->bl_dev,
+ BACKLIGHT_UPDATE_HOTKEY);
+ }
+#endif
+ }
+ }
+ /* TODO: check other events */
+
+ /* We've handled the event, stop the notifier chain. The ACPI interface
+ * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
+ * userspace if the event was generated only to signal a SBIOS
+ * request.
+ */
+ return NOTIFY_BAD;
+}
+
+/* Call the ATCS method
+ */
+/**
+ * radeon_atcs_call - call an ATCS method
+ *
+ * @handle: acpi handle
+ * @function: the ATCS function to execute
+ * @params: ATCS function params
+ *
+ * Executes the requested ATCS function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static union acpi_object *radeon_atcs_call(acpi_handle handle, int function,
+ struct acpi_buffer *params)
+{
+ acpi_status status;
+ union acpi_object atcs_arg_elements[2];
+ struct acpi_object_list atcs_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ atcs_arg.count = 2;
+ atcs_arg.pointer = &atcs_arg_elements[0];
+
+ atcs_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ atcs_arg_elements[0].integer.value = function;
+
+ if (params) {
+ atcs_arg_elements[1].type = ACPI_TYPE_BUFFER;
+ atcs_arg_elements[1].buffer.length = params->length;
+ atcs_arg_elements[1].buffer.pointer = params->pointer;
+ } else {
+ /* We need a second fake parameter */
+ atcs_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ atcs_arg_elements[1].integer.value = 0;
+ }
+
+ status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer);
+
+ /* Fail only if calling the method fails and ATIF is supported */
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ DRM_DEBUG_DRIVER("failed to evaluate ATCS got %s\n",
+ acpi_format_exception(status));
+ kfree(buffer.pointer);
+ return NULL;
+ }
+
+ return buffer.pointer;
+}
+
+/**
+ * radeon_atcs_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATCS
+ *
+ * Use the supported functions mask from ATCS function
+ * ATCS_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void radeon_atcs_parse_functions(struct radeon_atcs_functions *f, u32 mask)
+{
+ f->get_ext_state = mask & ATCS_GET_EXTERNAL_STATE_SUPPORTED;
+ f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED;
+ f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED;
+ f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED;
+}
+
+/**
+ * radeon_atcs_verify_interface - verify ATCS
+ *
+ * @handle: acpi handle
+ * @atcs: radeon atcs struct
+ *
+ * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function
+ * to initialize ATCS and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atcs_verify_interface(acpi_handle handle,
+ struct radeon_atcs *atcs)
+{
+ union acpi_object *info;
+ struct atcs_verify_interface output;
+ size_t size;
+ int err = 0;
+
+ info = radeon_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
+ if (!info)
+ return -EIO;
+
+ memset(&output, 0, sizeof(output));
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 8) {
+ DRM_INFO("ATCS buffer is too small: %zu\n", size);
+ err = -EINVAL;
+ goto out;
+ }
+ size = min(sizeof(output), size);
+
+ memcpy(&output, info->buffer.pointer, size);
+
+ /* TODO: check version? */
+ DRM_DEBUG_DRIVER("ATCS version %u\n", output.version);
+
+ radeon_atcs_parse_functions(&atcs->functions, output.function_bits);
+
+out:
+ kfree(info);
+ return err;
+}
+
+/**
+ * radeon_acpi_event - handle notify events
+ *
+ * @nb: notifier block
+ * @val: val
+ * @data: acpi event
+ *
+ * Calls relevant radeon functions in response to various
+ * acpi events.
+ * Returns NOTIFY code
+ */
+static int radeon_acpi_event(struct notifier_block *nb,
+ unsigned long val,
+ void *data)
+{
+ struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
+ struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
+
+ if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
+ if (power_supply_is_system_supplied() > 0)
+ DRM_DEBUG_DRIVER("pm: AC\n");
+ else
+ DRM_DEBUG_DRIVER("pm: DC\n");
+
+ radeon_pm_acpi_event_handler(rdev);
+ }
+
+ /* Check for pending SBIOS requests */
+ return radeon_atif_handler(rdev, entry);
+}
+
+/* Call all ACPI methods here */
+/**
+ * radeon_acpi_init - init driver acpi support
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Verifies the AMD ACPI interfaces and registers with the acpi
+ * notifier chain (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_acpi_init(struct radeon_device *rdev)
+{
+ acpi_handle handle;
+ struct radeon_atif *atif = &rdev->atif;
+ struct radeon_atcs *atcs = &rdev->atcs;
+ int ret;
+
+ /* Get the device handle */
+ handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+
+ /* No need to proceed if we're sure that ATIF is not supported */
+ if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
+ return 0;
+
+ /* Call the ATCS method */
+ ret = radeon_atcs_verify_interface(handle, atcs);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
+ }
+
+ /* Call the ATIF method */
+ ret = radeon_atif_verify_interface(handle, atif);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
+ goto out;
+ }
+
+ if (atif->notifications.brightness_change) {
+ struct drm_encoder *tmp;
+ struct radeon_encoder *target = NULL;
+
+ /* Find the encoder controlling the brightness */
+ list_for_each_entry(tmp, &rdev->ddev->mode_config.encoder_list,
+ head) {
+ struct radeon_encoder *enc = to_radeon_encoder(tmp);
+
+ if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
+ enc->enc_priv) {
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *dig = enc->enc_priv;
+ if (dig->bl_dev) {
+ target = enc;
+ break;
+ }
+ } else {
+ struct radeon_encoder_lvds *dig = enc->enc_priv;
+ if (dig->bl_dev) {
+ target = enc;
+ break;
+ }
+ }
+ }
+ }
+
+ atif->encoder_for_bl = target;
+ if (!target) {
+ /* Brightness change notification is enabled, but we
+ * didn't find a backlight controller, this should
+ * never happen.
+ */
+ DRM_ERROR("Cannot find a backlight controller\n");
+ }
+ }
+
+ if (atif->functions.sbios_requests && !atif->functions.system_params) {
+ /* XXX check this workraround, if sbios request function is
+ * present we have to see how it's configured in the system
+ * params
+ */
+ atif->functions.system_params = true;
+ }
+
+ if (atif->functions.system_params) {
+ ret = radeon_atif_get_notification_params(handle,
+ &atif->notification_cfg);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
+ ret);
+ /* Disable notification */
+ atif->notification_cfg.enabled = false;
+ }
+ }
+
+out:
+ rdev->acpi_nb.notifier_call = radeon_acpi_event;
+ register_acpi_notifier(&rdev->acpi_nb);
+
+ return ret;
+}
+
+/**
+ * radeon_acpi_fini - tear down driver acpi support
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Unregisters with the acpi notifier chain (all asics).
+ */
+void radeon_acpi_fini(struct radeon_device *rdev)
+{
+ unregister_acpi_notifier(&rdev->acpi_nb);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.h b/drivers/gpu/drm/radeon/radeon_acpi.h
new file mode 100644
index 0000000..be4af76
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_acpi.h
@@ -0,0 +1,445 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef RADEON_ACPI_H
+#define RADEON_ACPI_H
+
+struct radeon_device;
+struct acpi_bus_event;
+
+int radeon_atif_handler(struct radeon_device *rdev,
+ struct acpi_bus_event *event);
+
+/* AMD hw uses four ACPI control methods:
+ * 1. ATIF
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATIF provides an entry point for the gfx driver to interact with the sbios.
+ * The AMD ACPI notification mechanism uses Notify (VGA, 0x81) or a custom
+ * notification. Which notification is used as indicated by the ATIF Control
+ * Method GET_SYSTEM_PARAMETERS. When the driver receives Notify (VGA, 0x81) or
+ * a custom notification it invokes ATIF Control Method GET_SYSTEM_BIOS_REQUESTS
+ * to identify pending System BIOS requests and associated parameters. For
+ * example, if one of the pending requests is DISPLAY_SWITCH_REQUEST, the driver
+ * will perform display device detection and invoke ATIF Control Method
+ * SELECT_ACTIVE_DISPLAYS.
+ *
+ * 2. ATPX
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATPX methods are used on PowerXpress systems to handle mux switching and
+ * discrete GPU power control.
+ *
+ * 3. ATRM
+ * ARG0: (ACPI_INTEGER) offset of vbios rom data
+ * ARG1: (ACPI_BUFFER) size of the buffer to fill (up to 4K).
+ * OUTPUT: (ACPI_BUFFER) output buffer
+ * ATRM provides an interfacess to access the discrete GPU vbios image on
+ * PowerXpress systems with multiple GPUs.
+ *
+ * 4. ATCS
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATCS provides an interface to AMD chipset specific functionality.
+ *
+ */
+/* ATIF */
+#define ATIF_FUNCTION_VERIFY_INTERFACE 0x0
+/* ARG0: ATIF_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - version
+ * DWORD - supported notifications mask
+ * DWORD - supported functions bit vector
+ */
+/* Notifications mask */
+# define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED (1 << 0)
+# define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED (1 << 1)
+# define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED (1 << 2)
+# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED (1 << 3)
+# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED (1 << 4)
+# define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED (1 << 5)
+# define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED (1 << 6)
+# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED (1 << 7)
+# define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED (1 << 8)
+/* supported functions vector */
+# define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED (1 << 0)
+# define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED (1 << 1)
+# define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED (1 << 2)
+# define ATIF_GET_LID_STATE_SUPPORTED (1 << 3)
+# define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED (1 << 4)
+# define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED (1 << 5)
+# define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED (1 << 6)
+# define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED (1 << 7)
+# define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED (1 << 12)
+# define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED (1 << 14)
+#define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS 0x1
+/* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ *
+ * OR
+ *
+ * WORD - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ * BYTE - notify command code
+ *
+ * flags
+ * bits 1:0:
+ * 0 - Notify(VGA, 0x81) is not used for notification
+ * 1 - Notify(VGA, 0x81) is used for notification
+ * 2 - Notify(VGA, n) is used for notification where
+ * n (0xd0-0xd9) is specified in notify command code.
+ * bit 2:
+ * 1 - lid changes not reported though int10
+ */
+#define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS 0x2
+/* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * DWORD - pending sbios requests
+ * BYTE - panel expansion mode
+ * BYTE - thermal state: target gfx controller
+ * BYTE - thermal state: state id (0: exit state, non-0: state)
+ * BYTE - forced power state: target gfx controller
+ * BYTE - forced power state: state id
+ * BYTE - system power source
+ * BYTE - panel backlight level (0-255)
+ */
+/* pending sbios requests */
+# define ATIF_DISPLAY_SWITCH_REQUEST (1 << 0)
+# define ATIF_EXPANSION_MODE_CHANGE_REQUEST (1 << 1)
+# define ATIF_THERMAL_STATE_CHANGE_REQUEST (1 << 2)
+# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST (1 << 3)
+# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST (1 << 4)
+# define ATIF_DISPLAY_CONF_CHANGE_REQUEST (1 << 5)
+# define ATIF_PX_GFX_SWITCH_REQUEST (1 << 6)
+# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST (1 << 7)
+# define ATIF_DGPU_DISPLAY_EVENT (1 << 8)
+/* panel expansion mode */
+# define ATIF_PANEL_EXPANSION_DISABLE 0
+# define ATIF_PANEL_EXPANSION_FULL 1
+# define ATIF_PANEL_EXPANSION_ASPECT 2
+/* target gfx controller */
+# define ATIF_TARGET_GFX_SINGLE 0
+# define ATIF_TARGET_GFX_PX_IGPU 1
+# define ATIF_TARGET_GFX_PX_DGPU 2
+/* system power source */
+# define ATIF_POWER_SOURCE_AC 1
+# define ATIF_POWER_SOURCE_DC 2
+# define ATIF_POWER_SOURCE_RESTRICTED_AC_1 3
+# define ATIF_POWER_SOURCE_RESTRICTED_AC_2 4
+#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS 0x3
+/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - selected displays
+ * WORD - connected displays
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - selected displays
+ */
+# define ATIF_LCD1 (1 << 0)
+# define ATIF_CRT1 (1 << 1)
+# define ATIF_TV (1 << 2)
+# define ATIF_DFP1 (1 << 3)
+# define ATIF_CRT2 (1 << 4)
+# define ATIF_LCD2 (1 << 5)
+# define ATIF_DFP2 (1 << 7)
+# define ATIF_CV (1 << 8)
+# define ATIF_DFP3 (1 << 9)
+# define ATIF_DFP4 (1 << 10)
+# define ATIF_DFP5 (1 << 11)
+# define ATIF_DFP6 (1 << 12)
+#define ATIF_FUNCTION_GET_LID_STATE 0x4
+/* ARG0: ATIF_FUNCTION_GET_LID_STATE
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - lid state (0: open, 1: closed)
+ *
+ * GET_LID_STATE only works at boot and resume, for general lid
+ * status, use the kernel provided status
+ */
+#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS 0x5
+/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - 0
+ * BYTE - TV standard
+ */
+# define ATIF_TV_STD_NTSC 0
+# define ATIF_TV_STD_PAL 1
+# define ATIF_TV_STD_PALM 2
+# define ATIF_TV_STD_PAL60 3
+# define ATIF_TV_STD_NTSCJ 4
+# define ATIF_TV_STD_PALCN 5
+# define ATIF_TV_STD_PALN 6
+# define ATIF_TV_STD_SCART_RGB 9
+#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS 0x6
+/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - 0
+ * BYTE - TV standard
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS 0x7
+/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - panel expansion mode
+ */
+#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS 0x8
+/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - panel expansion mode
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION 0xD
+/* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - gfx controller id
+ * BYTE - current temperature (degress Celsius)
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES 0xF
+/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES
+ * ARG1: none
+ * OUTPUT:
+ * WORD - number of gfx devices
+ * WORD - device structure size in bytes (excludes device size field)
+ * DWORD - flags \
+ * WORD - bus number } repeated structure
+ * WORD - device number /
+ */
+/* flags */
+# define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE (1 << 0)
+# define ATIF_XGP_PORT (1 << 1)
+# define ATIF_VGA_ENABLED_GRAPHICS_DEVICE (1 << 2)
+# define ATIF_XGP_PORT_IN_DOCK (1 << 3)
+
+/* ATPX */
+#define ATPX_FUNCTION_VERIFY_INTERFACE 0x0
+/* ARG0: ATPX_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - version
+ * DWORD - supported functions bit vector
+ */
+/* supported functions vector */
+# define ATPX_GET_PX_PARAMETERS_SUPPORTED (1 << 0)
+# define ATPX_POWER_CONTROL_SUPPORTED (1 << 1)
+# define ATPX_DISPLAY_MUX_CONTROL_SUPPORTED (1 << 2)
+# define ATPX_I2C_MUX_CONTROL_SUPPORTED (1 << 3)
+# define ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED (1 << 4)
+# define ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED (1 << 5)
+# define ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED (1 << 7)
+# define ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED (1 << 8)
+#define ATPX_FUNCTION_GET_PX_PARAMETERS 0x1
+/* ARG0: ATPX_FUNCTION_GET_PX_PARAMETERS
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ */
+/* flags */
+# define ATPX_LVDS_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 0)
+# define ATPX_CRT1_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 1)
+# define ATPX_DVI1_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 2)
+# define ATPX_CRT1_RGB_SIGNAL_MUXED (1 << 3)
+# define ATPX_TV_SIGNAL_MUXED (1 << 4)
+# define ATPX_DFP_SIGNAL_MUXED (1 << 5)
+# define ATPX_SEPARATE_MUX_FOR_I2C (1 << 6)
+# define ATPX_DYNAMIC_PX_SUPPORTED (1 << 7)
+# define ATPX_ACF_NOT_SUPPORTED (1 << 8)
+# define ATPX_FIXED_NOT_SUPPORTED (1 << 9)
+# define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED (1 << 10)
+# define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS (1 << 11)
+#define ATPX_FUNCTION_POWER_CONTROL 0x2
+/* ARG0: ATPX_FUNCTION_POWER_CONTROL
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - dGPU power state (0: power off, 1: power on)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_DISPLAY_MUX_CONTROL 0x3
+/* ARG0: ATPX_FUNCTION_DISPLAY_MUX_CONTROL
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - display mux control (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+# define ATPX_INTEGRATED_GPU 0
+# define ATPX_DISCRETE_GPU 1
+#define ATPX_FUNCTION_I2C_MUX_CONTROL 0x4
+/* ARG0: ATPX_FUNCTION_I2C_MUX_CONTROL
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - i2c/aux/hpd mux control (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION 0x5
+/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - target gpu (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION 0x6
+/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - target gpu (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING 0x8
+/* ARG0: ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING
+ * ARG1: none
+ * OUTPUT:
+ * WORD - number of display connectors
+ * WORD - connector structure size in bytes (excludes connector size field)
+ * BYTE - flags \
+ * BYTE - ATIF display vector bit position } repeated
+ * BYTE - adapter id (0: iGPU, 1-n: dGPU ordered by pcie bus number) } structure
+ * WORD - connector ACPI id /
+ */
+/* flags */
+# define ATPX_DISPLAY_OUTPUT_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 0)
+# define ATPX_DISPLAY_HPD_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 1)
+# define ATPX_DISPLAY_I2C_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 2)
+#define ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS 0x9
+/* ARG0: ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS
+ * ARG1: none
+ * OUTPUT:
+ * WORD - number of HPD/DDC ports
+ * WORD - port structure size in bytes (excludes port size field)
+ * BYTE - ATIF display vector bit position \
+ * BYTE - hpd id } reapeated structure
+ * BYTE - ddc id /
+ *
+ * available on A+A systems only
+ */
+/* hpd id */
+# define ATPX_HPD_NONE 0
+# define ATPX_HPD1 1
+# define ATPX_HPD2 2
+# define ATPX_HPD3 3
+# define ATPX_HPD4 4
+# define ATPX_HPD5 5
+# define ATPX_HPD6 6
+/* ddc id */
+# define ATPX_DDC_NONE 0
+# define ATPX_DDC1 1
+# define ATPX_DDC2 2
+# define ATPX_DDC3 3
+# define ATPX_DDC4 4
+# define ATPX_DDC5 5
+# define ATPX_DDC6 6
+# define ATPX_DDC7 7
+# define ATPX_DDC8 8
+
+/* ATCS */
+#define ATCS_FUNCTION_VERIFY_INTERFACE 0x0
+/* ARG0: ATCS_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - version
+ * DWORD - supported functions bit vector
+ */
+/* supported functions vector */
+# define ATCS_GET_EXTERNAL_STATE_SUPPORTED (1 << 0)
+# define ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED (1 << 1)
+# define ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED (1 << 2)
+# define ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED (1 << 3)
+#define ATCS_FUNCTION_GET_EXTERNAL_STATE 0x1
+/* ARG0: ATCS_FUNCTION_GET_EXTERNAL_STATE
+ * ARG1: none
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags (0: undocked, 1: docked)
+ */
+/* flags */
+# define ATCS_DOCKED (1 << 0)
+#define ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST 0x2
+/* ARG0: ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
+ * WORD - valid flags mask
+ * WORD - flags
+ * BYTE - request type
+ * BYTE - performance request
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - return value
+ */
+/* flags */
+# define ATCS_ADVERTISE_CAPS (1 << 0)
+# define ATCS_WAIT_FOR_COMPLETION (1 << 1)
+/* request type */
+# define ATCS_PCIE_LINK_SPEED 1
+/* performance request */
+# define ATCS_REMOVE 0
+# define ATCS_FORCE_LOW_POWER 1
+# define ATCS_PERF_LEVEL_1 2 /* PCIE Gen 1 */
+# define ATCS_PERF_LEVEL_2 3 /* PCIE Gen 2 */
+# define ATCS_PERF_LEVEL_3 4 /* PCIE Gen 3 */
+/* return value */
+# define ATCS_REQUEST_REFUSED 1
+# define ATCS_REQUEST_COMPLETE 2
+# define ATCS_REQUEST_IN_PROGRESS 3
+#define ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION 0x3
+/* ARG0: ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION
+ * ARG1: none
+ * OUTPUT: none
+ */
+#define ATCS_FUNCTION_SET_PCIE_BUS_WIDTH 0x4
+/* ARG0: ATCS_FUNCTION_SET_PCIE_BUS_WIDTH
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * WORD - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
+ * BYTE - number of active lanes
+ * OUTPUT:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - number of active lanes
+ */
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
new file mode 100644
index 0000000..4243334
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie
+ * Jerome Glisse <glisse@freedesktop.org>
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include <drm/radeon_drm.h>
+
+#if __OS_HAS_AGP
+
+struct radeon_agpmode_quirk {
+ u32 hostbridge_vendor;
+ u32 hostbridge_device;
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 default_mode;
+};
+
+static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
+ /* Intel E7505 Memory Controller Hub / RV350 AR [Radeon 9600XT] Needs AGPMode 4 (deb #515326) */
+ { PCI_VENDOR_ID_INTEL, 0x2550, PCI_VENDOR_ID_ATI, 0x4152, 0x1458, 0x4038, 4},
+ /* Intel 82865G/PE/P DRAM Controller/Host-Hub / Mobility 9800 Needs AGPMode 4 (deb #462590) */
+ { PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x4a4e, PCI_VENDOR_ID_DELL, 0x5106, 4},
+ /* Intel 82865G/PE/P DRAM Controller/Host-Hub / RV280 [Radeon 9200 SE] Needs AGPMode 4 (lp #300304) */
+ { PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x5964,
+ 0x148c, 0x2073, 4},
+ /* Intel 82855PM Processor to I/O Controller / Mobility M6 LY Needs AGPMode 1 (deb #467235) */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c59,
+ PCI_VENDOR_ID_IBM, 0x052f, 1},
+ /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
+ PCI_VENDOR_ID_IBM, 0x0550, 1},
+ /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
+ PCI_VENDOR_ID_IBM, 0x0530, 1},
+ /* Intel 82855PM host bridge / FireGL Mobility T2 RV350 Needs AGPMode 2 (fdo #20647) */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e54,
+ PCI_VENDOR_ID_IBM, 0x054f, 2},
+ /* Intel 82855PM host bridge / Mobility M9+ / VaioPCG-V505DX Needs AGPMode 2 (fdo #17928) */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
+ PCI_VENDOR_ID_SONY, 0x816b, 2},
+ /* Intel 82855PM Processor to I/O Controller / Mobility M9+ Needs AGPMode 8 (phoronix forum) */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
+ PCI_VENDOR_ID_SONY, 0x8195, 8},
+ /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
+ { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
+ PCI_VENDOR_ID_DELL, 0x00e3, 2},
+ /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */
+ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
+ PCI_VENDOR_ID_DELL, 0x0149, 1},
+ /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
+ PCI_VENDOR_ID_IBM, 0x0531, 1},
+ /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
+ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+ 0x1025, 0x0061, 1},
+ /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #203007) */
+ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+ 0x1025, 0x0064, 1},
+ /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #141551) */
+ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+ PCI_VENDOR_ID_ASUSTEK, 0x1942, 1},
+ /* Intel 82852/82855 host bridge / Mobility 9600/9700 Needs AGPMode 1 (deb #510208) */
+ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+ 0x10cf, 0x127f, 1},
+ /* ASRock K7VT4A+ AGP 8x / ATI Radeon 9250 AGP Needs AGPMode 4 (lp #133192) */
+ { 0x1849, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
+ 0x1787, 0x5960, 4},
+ /* VIA K8M800 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (fdo #12544) */
+ { PCI_VENDOR_ID_VIA, 0x0204, PCI_VENDOR_ID_ATI, 0x5960,
+ 0x17af, 0x2020, 4},
+ /* VIA KT880 Host Bridge / RV350 [Radeon 9550] Needs AGPMode 4 (fdo #19981) */
+ { PCI_VENDOR_ID_VIA, 0x0269, PCI_VENDOR_ID_ATI, 0x4153,
+ PCI_VENDOR_ID_ASUSTEK, 0x003c, 4},
+ /* VIA VT8363 Host Bridge / R200 QL [Radeon 8500] Needs AGPMode 2 (lp #141551) */
+ { PCI_VENDOR_ID_VIA, 0x0305, PCI_VENDOR_ID_ATI, 0x514c,
+ PCI_VENDOR_ID_ATI, 0x013a, 2},
+ /* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 (deb #515512) */
+ { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
+ PCI_VENDOR_ID_ASUSTEK, 0x004c, 2},
+ /* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 */
+ { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
+ PCI_VENDOR_ID_ASUSTEK, 0x0054, 2},
+ /* VIA VT8377 Host Bridge / R200 QM [Radeon 9100] Needs AGPMode 4 (deb #461144) */
+ { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x514d,
+ 0x174b, 0x7149, 4},
+ /* VIA VT8377 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (lp #312693) */
+ { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
+ 0x1462, 0x0380, 4},
+ /* VIA VT8377 Host Bridge / RV280 Needs AGPMode 4 (ati ML) */
+ { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5964,
+ 0x148c, 0x2073, 4},
+ /* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */
+ { PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61,
+ PCI_VENDOR_ID_SONY, 0x8175, 1},
+ /* HP Host Bridge / R300 [FireGL X1] Needs AGPMode 2 (fdo #7770) */
+ { PCI_VENDOR_ID_HP, 0x122e, PCI_VENDOR_ID_ATI, 0x4e47,
+ PCI_VENDOR_ID_ATI, 0x0152, 2},
+ { 0, 0, 0, 0, 0, 0, 0 },
+};
+#endif
+
+int radeon_agp_init(struct radeon_device *rdev)
+{
+#if __OS_HAS_AGP
+ struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
+ struct drm_agp_mode mode;
+ struct drm_agp_info info;
+ uint32_t agp_status;
+ int default_mode;
+ bool is_v3;
+ int ret;
+
+ /* Acquire AGP. */
+ ret = drm_agp_acquire(rdev->ddev);
+ if (ret) {
+ DRM_ERROR("Unable to acquire AGP: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_agp_info(rdev->ddev, &info);
+ if (ret) {
+ drm_agp_release(rdev->ddev);
+ DRM_ERROR("Unable to get AGP info: %d\n", ret);
+ return ret;
+ }
+
+ if (rdev->ddev->agp->agp_info.aper_size < 32) {
+ drm_agp_release(rdev->ddev);
+ dev_warn(rdev->dev, "AGP aperture too small (%zuM) "
+ "need at least 32M, disabling AGP\n",
+ rdev->ddev->agp->agp_info.aper_size);
+ return -EINVAL;
+ }
+
+ mode.mode = info.mode;
+ /* chips with the agp to pcie bridge don't have the AGP_STATUS register
+ * Just use the whatever mode the host sets up.
+ */
+ if (rdev->family <= CHIP_RV350)
+ agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+ else
+ agp_status = mode.mode;
+ is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
+
+ if (is_v3) {
+ default_mode = (agp_status & RADEON_AGPv3_8X_MODE) ? 8 : 4;
+ } else {
+ if (agp_status & RADEON_AGP_4X_MODE) {
+ default_mode = 4;
+ } else if (agp_status & RADEON_AGP_2X_MODE) {
+ default_mode = 2;
+ } else {
+ default_mode = 1;
+ }
+ }
+
+ /* Apply AGPMode Quirks */
+ while (p && p->chip_device != 0) {
+ if (info.id_vendor == p->hostbridge_vendor &&
+ info.id_device == p->hostbridge_device &&
+ rdev->pdev->vendor == p->chip_vendor &&
+ rdev->pdev->device == p->chip_device &&
+ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+ rdev->pdev->subsystem_device == p->subsys_device) {
+ default_mode = p->default_mode;
+ }
+ ++p;
+ }
+
+ if (radeon_agpmode > 0) {
+ if ((radeon_agpmode < (is_v3 ? 4 : 1)) ||
+ (radeon_agpmode > (is_v3 ? 8 : 4)) ||
+ (radeon_agpmode & (radeon_agpmode - 1))) {
+ DRM_ERROR("Illegal AGP Mode: %d (valid %s), leaving at %d\n",
+ radeon_agpmode, is_v3 ? "4, 8" : "1, 2, 4",
+ default_mode);
+ radeon_agpmode = default_mode;
+ } else {
+ DRM_INFO("AGP mode requested: %d\n", radeon_agpmode);
+ }
+ } else {
+ radeon_agpmode = default_mode;
+ }
+
+ mode.mode &= ~RADEON_AGP_MODE_MASK;
+ if (is_v3) {
+ switch (radeon_agpmode) {
+ case 8:
+ mode.mode |= RADEON_AGPv3_8X_MODE;
+ break;
+ case 4:
+ default:
+ mode.mode |= RADEON_AGPv3_4X_MODE;
+ break;
+ }
+ } else {
+ switch (radeon_agpmode) {
+ case 4:
+ mode.mode |= RADEON_AGP_4X_MODE;
+ break;
+ case 2:
+ mode.mode |= RADEON_AGP_2X_MODE;
+ break;
+ case 1:
+ default:
+ mode.mode |= RADEON_AGP_1X_MODE;
+ break;
+ }
+ }
+
+ mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */
+ ret = drm_agp_enable(rdev->ddev, mode);
+ if (ret) {
+ DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
+ drm_agp_release(rdev->ddev);
+ return ret;
+ }
+
+ rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
+ rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
+ rdev->mc.gtt_start = rdev->mc.agp_base;
+ rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1;
+ dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
+ rdev->mc.gtt_size >> 20, rdev->mc.gtt_start, rdev->mc.gtt_end);
+
+ /* workaround some hw issues */
+ if (rdev->family < CHIP_R200) {
+ WREG32(RADEON_AGP_CNTL, RREG32(RADEON_AGP_CNTL) | 0x000e0000);
+ }
+ return 0;
+#else
+ return 0;
+#endif
+}
+
+void radeon_agp_resume(struct radeon_device *rdev)
+{
+#if __OS_HAS_AGP
+ int r;
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r)
+ dev_warn(rdev->dev, "radeon AGP reinit failed\n");
+ }
+#endif
+}
+
+void radeon_agp_fini(struct radeon_device *rdev)
+{
+#if __OS_HAS_AGP
+ if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
+ drm_agp_release(rdev->ddev);
+ }
+#endif
+}
+
+void radeon_agp_suspend(struct radeon_device *rdev)
+{
+ radeon_agp_fini(rdev);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
new file mode 100644
index 0000000..de36c47
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -0,0 +1,2097 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+
+#include <linux/console.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+
+/*
+ * Registers accessors functions.
+ */
+/**
+ * radeon_invalid_rreg - dummy reg read function
+ *
+ * @rdev: radeon device pointer
+ * @reg: offset of register
+ *
+ * Dummy register read function. Used for register blocks
+ * that certain asics don't have (all asics).
+ * Returns the value in the register.
+ */
+static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
+ BUG_ON(1);
+ return 0;
+}
+
+/**
+ * radeon_invalid_wreg - dummy reg write function
+ *
+ * @rdev: radeon device pointer
+ * @reg: offset of register
+ * @v: value to write to the register
+ *
+ * Dummy register read function. Used for register blocks
+ * that certain asics don't have (all asics).
+ */
+static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
+ reg, v);
+ BUG_ON(1);
+}
+
+/**
+ * radeon_register_accessor_init - sets up the register accessor callbacks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Sets up the register accessor callbacks for various register
+ * apertures. Not all asics have all apertures (all asics).
+ */
+static void radeon_register_accessor_init(struct radeon_device *rdev)
+{
+ rdev->mc_rreg = &radeon_invalid_rreg;
+ rdev->mc_wreg = &radeon_invalid_wreg;
+ rdev->pll_rreg = &radeon_invalid_rreg;
+ rdev->pll_wreg = &radeon_invalid_wreg;
+ rdev->pciep_rreg = &radeon_invalid_rreg;
+ rdev->pciep_wreg = &radeon_invalid_wreg;
+
+ /* Don't change order as we are overridding accessor. */
+ if (rdev->family < CHIP_RV515) {
+ rdev->pcie_reg_mask = 0xff;
+ } else {
+ rdev->pcie_reg_mask = 0x7ff;
+ }
+ /* FIXME: not sure here */
+ if (rdev->family <= CHIP_R580) {
+ rdev->pll_rreg = &r100_pll_rreg;
+ rdev->pll_wreg = &r100_pll_wreg;
+ }
+ if (rdev->family >= CHIP_R420) {
+ rdev->mc_rreg = &r420_mc_rreg;
+ rdev->mc_wreg = &r420_mc_wreg;
+ }
+ if (rdev->family >= CHIP_RV515) {
+ rdev->mc_rreg = &rv515_mc_rreg;
+ rdev->mc_wreg = &rv515_mc_wreg;
+ }
+ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
+ rdev->mc_rreg = &rs400_mc_rreg;
+ rdev->mc_wreg = &rs400_mc_wreg;
+ }
+ if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+ rdev->mc_rreg = &rs690_mc_rreg;
+ rdev->mc_wreg = &rs690_mc_wreg;
+ }
+ if (rdev->family == CHIP_RS600) {
+ rdev->mc_rreg = &rs600_mc_rreg;
+ rdev->mc_wreg = &rs600_mc_wreg;
+ }
+ if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
+ rdev->mc_rreg = &rs780_mc_rreg;
+ rdev->mc_wreg = &rs780_mc_wreg;
+ }
+ if (rdev->family >= CHIP_R600) {
+ rdev->pciep_rreg = &r600_pciep_rreg;
+ rdev->pciep_wreg = &r600_pciep_wreg;
+ }
+}
+
+
+/* helper to disable agp */
+/**
+ * radeon_agp_disable - AGP disable helper function
+ *
+ * @rdev: radeon device pointer
+ *
+ * Removes AGP flags and changes the gart callbacks on AGP
+ * cards when using the internal gart rather than AGP (all asics).
+ */
+void radeon_agp_disable(struct radeon_device *rdev)
+{
+ rdev->flags &= ~RADEON_IS_AGP;
+ if (rdev->family >= CHIP_R600) {
+ DRM_INFO("Forcing AGP to PCIE mode\n");
+ rdev->flags |= RADEON_IS_PCIE;
+ } else if (rdev->family >= CHIP_RV515 ||
+ rdev->family == CHIP_RV380 ||
+ rdev->family == CHIP_RV410 ||
+ rdev->family == CHIP_R423) {
+ DRM_INFO("Forcing AGP to PCIE mode\n");
+ rdev->flags |= RADEON_IS_PCIE;
+ rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
+ } else {
+ DRM_INFO("Forcing AGP to PCI mode\n");
+ rdev->flags |= RADEON_IS_PCI;
+ rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart.set_page = &r100_pci_gart_set_page;
+ }
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+}
+
+/*
+ * ASIC
+ */
+static struct radeon_asic r100_asic = {
+ .init = &r100_init,
+ .fini = &r100_fini,
+ .suspend = &r100_suspend,
+ .resume = &r100_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .asic_reset = &r100_asic_reset,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &r100_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &r100_pci_gart_tlb_flush,
+ .set_page = &r100_pci_gart_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r100_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .ring_start = &r100_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
+ },
+ .irq = {
+ .set = &r100_irq_set,
+ .process = &r100_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &r100_bandwidth_update,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .wait_for_vblank = &r100_wait_for_vblank,
+ .set_backlight_level = &radeon_legacy_set_backlight_level,
+ .get_backlight_level = &radeon_legacy_get_backlight_level,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = NULL,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r100_hpd_init,
+ .fini = &r100_hpd_fini,
+ .sense = &r100_hpd_sense,
+ .set_polarity = &r100_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &r100_pm_misc,
+ .prepare = &r100_pm_prepare,
+ .finish = &r100_pm_finish,
+ .init_profile = &r100_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
+ },
+};
+
+static struct radeon_asic r200_asic = {
+ .init = &r100_init,
+ .fini = &r100_fini,
+ .suspend = &r100_suspend,
+ .resume = &r100_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .asic_reset = &r100_asic_reset,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &r100_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &r100_pci_gart_tlb_flush,
+ .set_page = &r100_pci_gart_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r100_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .ring_start = &r100_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
+ },
+ .irq = {
+ .set = &r100_irq_set,
+ .process = &r100_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &r100_bandwidth_update,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .wait_for_vblank = &r100_wait_for_vblank,
+ .set_backlight_level = &radeon_legacy_set_backlight_level,
+ .get_backlight_level = &radeon_legacy_get_backlight_level,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r100_hpd_init,
+ .fini = &r100_hpd_fini,
+ .sense = &r100_hpd_sense,
+ .set_polarity = &r100_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &r100_pm_misc,
+ .prepare = &r100_pm_prepare,
+ .finish = &r100_pm_finish,
+ .init_profile = &r100_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
+ },
+};
+
+static struct radeon_asic r300_asic = {
+ .init = &r300_init,
+ .fini = &r300_fini,
+ .suspend = &r300_suspend,
+ .resume = &r300_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .asic_reset = &r300_asic_reset,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &r300_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &r100_pci_gart_tlb_flush,
+ .set_page = &r100_pci_gart_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
+ },
+ .irq = {
+ .set = &r100_irq_set,
+ .process = &r100_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &r100_bandwidth_update,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .wait_for_vblank = &r100_wait_for_vblank,
+ .set_backlight_level = &radeon_legacy_set_backlight_level,
+ .get_backlight_level = &radeon_legacy_get_backlight_level,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r100_hpd_init,
+ .fini = &r100_hpd_fini,
+ .sense = &r100_hpd_sense,
+ .set_polarity = &r100_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &r100_pm_misc,
+ .prepare = &r100_pm_prepare,
+ .finish = &r100_pm_finish,
+ .init_profile = &r100_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
+ },
+};
+
+static struct radeon_asic r300_asic_pcie = {
+ .init = &r300_init,
+ .fini = &r300_fini,
+ .suspend = &r300_suspend,
+ .resume = &r300_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .asic_reset = &r300_asic_reset,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &r300_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .set_page = &rv370_pcie_gart_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
+ },
+ .irq = {
+ .set = &r100_irq_set,
+ .process = &r100_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &r100_bandwidth_update,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .wait_for_vblank = &r100_wait_for_vblank,
+ .set_backlight_level = &radeon_legacy_set_backlight_level,
+ .get_backlight_level = &radeon_legacy_get_backlight_level,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r100_hpd_init,
+ .fini = &r100_hpd_fini,
+ .sense = &r100_hpd_sense,
+ .set_polarity = &r100_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &r100_pm_misc,
+ .prepare = &r100_pm_prepare,
+ .finish = &r100_pm_finish,
+ .init_profile = &r100_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
+ },
+};
+
+static struct radeon_asic r420_asic = {
+ .init = &r420_init,
+ .fini = &r420_fini,
+ .suspend = &r420_suspend,
+ .resume = &r420_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .asic_reset = &r300_asic_reset,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &r300_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .set_page = &rv370_pcie_gart_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
+ },
+ .irq = {
+ .set = &r100_irq_set,
+ .process = &r100_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &r100_bandwidth_update,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .wait_for_vblank = &r100_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r100_hpd_init,
+ .fini = &r100_hpd_fini,
+ .sense = &r100_hpd_sense,
+ .set_polarity = &r100_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &r100_pm_misc,
+ .prepare = &r100_pm_prepare,
+ .finish = &r100_pm_finish,
+ .init_profile = &r420_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
+ },
+};
+
+static struct radeon_asic rs400_asic = {
+ .init = &rs400_init,
+ .fini = &rs400_fini,
+ .suspend = &rs400_suspend,
+ .resume = &rs400_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .asic_reset = &r300_asic_reset,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &rs400_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &rs400_gart_tlb_flush,
+ .set_page = &rs400_gart_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
+ },
+ .irq = {
+ .set = &r100_irq_set,
+ .process = &r100_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &r100_bandwidth_update,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .wait_for_vblank = &r100_wait_for_vblank,
+ .set_backlight_level = &radeon_legacy_set_backlight_level,
+ .get_backlight_level = &radeon_legacy_get_backlight_level,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r100_hpd_init,
+ .fini = &r100_hpd_fini,
+ .sense = &r100_hpd_sense,
+ .set_polarity = &r100_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &r100_pm_misc,
+ .prepare = &r100_pm_prepare,
+ .finish = &r100_pm_finish,
+ .init_profile = &r100_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
+ },
+};
+
+static struct radeon_asic rs600_asic = {
+ .init = &rs600_init,
+ .fini = &rs600_fini,
+ .suspend = &rs600_suspend,
+ .resume = &rs600_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .asic_reset = &rs600_asic_reset,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &rs600_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &rs600_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
+ },
+ .irq = {
+ .set = &rs600_irq_set,
+ .process = &rs600_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &rs600_bandwidth_update,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .wait_for_vblank = &avivo_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &r600_hdmi_enable,
+ .hdmi_setmode = &r600_hdmi_setmode,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &rs600_hpd_init,
+ .fini = &rs600_hpd_fini,
+ .sense = &rs600_hpd_sense,
+ .set_polarity = &rs600_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &rs600_pm_misc,
+ .prepare = &rs600_pm_prepare,
+ .finish = &rs600_pm_finish,
+ .init_profile = &r420_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
+ },
+};
+
+static struct radeon_asic rs690_asic = {
+ .init = &rs690_init,
+ .fini = &rs690_fini,
+ .suspend = &rs690_suspend,
+ .resume = &rs690_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .asic_reset = &rs600_asic_reset,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &rs690_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &rs400_gart_tlb_flush,
+ .set_page = &rs400_gart_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
+ },
+ .irq = {
+ .set = &rs600_irq_set,
+ .process = &rs600_irq_process,
+ },
+ .display = {
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .bandwidth_update = &rs690_bandwidth_update,
+ .wait_for_vblank = &avivo_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &r600_hdmi_enable,
+ .hdmi_setmode = &r600_hdmi_setmode,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r200_copy_dma,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &rs600_hpd_init,
+ .fini = &rs600_hpd_fini,
+ .sense = &rs600_hpd_sense,
+ .set_polarity = &rs600_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &rs600_pm_misc,
+ .prepare = &rs600_pm_prepare,
+ .finish = &rs600_pm_finish,
+ .init_profile = &r420_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
+ },
+};
+
+static struct radeon_asic rv515_asic = {
+ .init = &rv515_init,
+ .fini = &rv515_fini,
+ .suspend = &rv515_suspend,
+ .resume = &rv515_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .asic_reset = &rs600_asic_reset,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &rv515_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .set_page = &rv370_pcie_gart_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &rv515_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
+ },
+ .irq = {
+ .set = &rs600_irq_set,
+ .process = &rs600_irq_process,
+ },
+ .display = {
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .bandwidth_update = &rv515_bandwidth_update,
+ .wait_for_vblank = &avivo_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &rs600_hpd_init,
+ .fini = &rs600_hpd_fini,
+ .sense = &rs600_hpd_sense,
+ .set_polarity = &rs600_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &rs600_pm_misc,
+ .prepare = &rs600_pm_prepare,
+ .finish = &rs600_pm_finish,
+ .init_profile = &r420_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
+ },
+};
+
+static struct radeon_asic r520_asic = {
+ .init = &r520_init,
+ .fini = &rv515_fini,
+ .suspend = &rv515_suspend,
+ .resume = &r520_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .asic_reset = &rs600_asic_reset,
+ .ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .mc_wait_for_idle = &r520_mc_wait_for_idle,
+ .gart = {
+ .tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .set_page = &rv370_pcie_gart_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &rv515_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
+ },
+ .irq = {
+ .set = &rs600_irq_set,
+ .process = &rs600_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &rv515_bandwidth_update,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .wait_for_vblank = &avivo_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
+ },
+ .copy = {
+ .blit = &r100_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r200_copy_dma,
+ .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r100_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r100_set_surface_reg,
+ .clear_reg = r100_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &rs600_hpd_init,
+ .fini = &rs600_hpd_fini,
+ .sense = &rs600_hpd_sense,
+ .set_polarity = &rs600_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &rs600_pm_misc,
+ .prepare = &rs600_pm_prepare,
+ .finish = &rs600_pm_finish,
+ .init_profile = &r420_pm_init_profile,
+ .get_dynpm_state = &r100_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ },
+ .pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
+ },
+};
+
+static struct radeon_asic r600_asic = {
+ .init = &r600_init,
+ .fini = &r600_fini,
+ .suspend = &r600_suspend,
+ .resume = &r600_resume,
+ .vga_set_state = &r600_vga_set_state,
+ .asic_reset = &r600_asic_reset,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .gart = {
+ .tlb_flush = &r600_pcie_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &r600_gfx_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
+ }
+ },
+ .irq = {
+ .set = &r600_irq_set,
+ .process = &r600_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &rv515_bandwidth_update,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .wait_for_vblank = &avivo_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &r600_hdmi_enable,
+ .hdmi_setmode = &r600_hdmi_setmode,
+ },
+ .copy = {
+ .blit = &r600_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r600_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &r600_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r600_set_surface_reg,
+ .clear_reg = r600_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r600_hpd_init,
+ .fini = &r600_hpd_fini,
+ .sense = &r600_hpd_sense,
+ .set_polarity = &r600_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &r600_pm_misc,
+ .prepare = &rs600_pm_prepare,
+ .finish = &rs600_pm_finish,
+ .init_profile = &r600_pm_init_profile,
+ .get_dynpm_state = &r600_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
+ .set_clock_gating = NULL,
+ },
+ .pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
+ },
+};
+
+static struct radeon_asic rs780_asic = {
+ .init = &r600_init,
+ .fini = &r600_fini,
+ .suspend = &r600_suspend,
+ .resume = &r600_resume,
+ .vga_set_state = &r600_vga_set_state,
+ .asic_reset = &r600_asic_reset,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .gart = {
+ .tlb_flush = &r600_pcie_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &r600_gfx_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
+ }
+ },
+ .irq = {
+ .set = &r600_irq_set,
+ .process = &r600_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &rs690_bandwidth_update,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .wait_for_vblank = &avivo_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &r600_hdmi_enable,
+ .hdmi_setmode = &r600_hdmi_setmode,
+ },
+ .copy = {
+ .blit = &r600_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &r600_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &r600_copy_blit,
+ .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ },
+ .surface = {
+ .set_reg = r600_set_surface_reg,
+ .clear_reg = r600_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r600_hpd_init,
+ .fini = &r600_hpd_fini,
+ .sense = &r600_hpd_sense,
+ .set_polarity = &r600_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &r600_pm_misc,
+ .prepare = &rs600_pm_prepare,
+ .finish = &rs600_pm_finish,
+ .init_profile = &rs780_pm_init_profile,
+ .get_dynpm_state = &r600_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = NULL,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ },
+ .pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
+ },
+};
+
+static struct radeon_asic rv770_asic = {
+ .init = &rv770_init,
+ .fini = &rv770_fini,
+ .suspend = &rv770_suspend,
+ .resume = &rv770_resume,
+ .asic_reset = &r600_asic_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .gart = {
+ .tlb_flush = &r600_pcie_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &r600_gfx_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &r600_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
+ }
+ },
+ .irq = {
+ .set = &r600_irq_set,
+ .process = &r600_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &rv515_bandwidth_update,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .wait_for_vblank = &avivo_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &r600_hdmi_enable,
+ .hdmi_setmode = &r600_hdmi_setmode,
+ },
+ .copy = {
+ .blit = &r600_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &rv770_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &rv770_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+ },
+ .surface = {
+ .set_reg = r600_set_surface_reg,
+ .clear_reg = r600_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &r600_hpd_init,
+ .fini = &r600_hpd_fini,
+ .sense = &r600_hpd_sense,
+ .set_polarity = &r600_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &rv770_pm_misc,
+ .prepare = &rs600_pm_prepare,
+ .finish = &rs600_pm_finish,
+ .init_profile = &r600_pm_init_profile,
+ .get_dynpm_state = &r600_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_uvd_clocks = &rv770_set_uvd_clocks,
+ },
+ .pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rv770_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
+ },
+};
+
+static struct radeon_asic evergreen_asic = {
+ .init = &evergreen_init,
+ .fini = &evergreen_fini,
+ .suspend = &evergreen_suspend,
+ .resume = &evergreen_resume,
+ .asic_reset = &evergreen_asic_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .gart = {
+ .tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gfx_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &evergreen_dma_is_lockup,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &r600_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
+ }
+ },
+ .irq = {
+ .set = &evergreen_irq_set,
+ .process = &evergreen_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
+ },
+ .copy = {
+ .blit = &r600_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+ },
+ .surface = {
+ .set_reg = r600_set_surface_reg,
+ .clear_reg = r600_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &evergreen_hpd_init,
+ .fini = &evergreen_hpd_fini,
+ .sense = &evergreen_hpd_sense,
+ .set_polarity = &evergreen_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &evergreen_pm_misc,
+ .prepare = &evergreen_pm_prepare,
+ .finish = &evergreen_pm_finish,
+ .init_profile = &r600_pm_init_profile,
+ .get_dynpm_state = &r600_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
+ .set_clock_gating = NULL,
+ .set_uvd_clocks = &evergreen_set_uvd_clocks,
+ },
+ .pflip = {
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
+ },
+};
+
+static struct radeon_asic sumo_asic = {
+ .init = &evergreen_init,
+ .fini = &evergreen_fini,
+ .suspend = &evergreen_suspend,
+ .resume = &evergreen_resume,
+ .asic_reset = &evergreen_asic_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .gart = {
+ .tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gfx_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &evergreen_dma_is_lockup,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &r600_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
+ }
+ },
+ .irq = {
+ .set = &evergreen_irq_set,
+ .process = &evergreen_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
+ },
+ .copy = {
+ .blit = &r600_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+ },
+ .surface = {
+ .set_reg = r600_set_surface_reg,
+ .clear_reg = r600_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &evergreen_hpd_init,
+ .fini = &evergreen_hpd_fini,
+ .sense = &evergreen_hpd_sense,
+ .set_polarity = &evergreen_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &evergreen_pm_misc,
+ .prepare = &evergreen_pm_prepare,
+ .finish = &evergreen_pm_finish,
+ .init_profile = &sumo_pm_init_profile,
+ .get_dynpm_state = &r600_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = NULL,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_uvd_clocks = &sumo_set_uvd_clocks,
+ },
+ .pflip = {
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
+ },
+};
+
+static struct radeon_asic btc_asic = {
+ .init = &evergreen_init,
+ .fini = &evergreen_fini,
+ .suspend = &evergreen_suspend,
+ .resume = &evergreen_resume,
+ .asic_reset = &evergreen_asic_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .gart = {
+ .tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gfx_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &evergreen_dma_is_lockup,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &r600_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
+ }
+ },
+ .irq = {
+ .set = &evergreen_irq_set,
+ .process = &evergreen_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
+ },
+ .copy = {
+ .blit = &r600_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+ },
+ .surface = {
+ .set_reg = r600_set_surface_reg,
+ .clear_reg = r600_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &evergreen_hpd_init,
+ .fini = &evergreen_hpd_fini,
+ .sense = &evergreen_hpd_sense,
+ .set_polarity = &evergreen_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &evergreen_pm_misc,
+ .prepare = &evergreen_pm_prepare,
+ .finish = &evergreen_pm_finish,
+ .init_profile = &btc_pm_init_profile,
+ .get_dynpm_state = &r600_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
+ .set_clock_gating = NULL,
+ .set_uvd_clocks = &evergreen_set_uvd_clocks,
+ },
+ .pflip = {
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
+ },
+};
+
+static struct radeon_asic cayman_asic = {
+ .init = &cayman_init,
+ .fini = &cayman_fini,
+ .suspend = &cayman_suspend,
+ .resume = &cayman_resume,
+ .asic_reset = &cayman_asic_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .gart = {
+ .tlb_flush = &cayman_pcie_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
+ .vm = {
+ .init = &cayman_vm_init,
+ .fini = &cayman_vm_fini,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .set_page = &cayman_vm_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &cayman_gfx_is_lockup,
+ .vm_flush = &cayman_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_CP1_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &cayman_gfx_is_lockup,
+ .vm_flush = &cayman_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_CP2_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &cayman_gfx_is_lockup,
+ .vm_flush = &cayman_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &cayman_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
+ }
+ },
+ .irq = {
+ .set = &evergreen_irq_set,
+ .process = &evergreen_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
+ },
+ .copy = {
+ .blit = &r600_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+ },
+ .surface = {
+ .set_reg = r600_set_surface_reg,
+ .clear_reg = r600_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &evergreen_hpd_init,
+ .fini = &evergreen_hpd_fini,
+ .sense = &evergreen_hpd_sense,
+ .set_polarity = &evergreen_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &evergreen_pm_misc,
+ .prepare = &evergreen_pm_prepare,
+ .finish = &evergreen_pm_finish,
+ .init_profile = &btc_pm_init_profile,
+ .get_dynpm_state = &r600_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
+ .set_clock_gating = NULL,
+ .set_uvd_clocks = &evergreen_set_uvd_clocks,
+ },
+ .pflip = {
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
+ },
+};
+
+static struct radeon_asic trinity_asic = {
+ .init = &cayman_init,
+ .fini = &cayman_fini,
+ .suspend = &cayman_suspend,
+ .resume = &cayman_resume,
+ .asic_reset = &cayman_asic_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .gart = {
+ .tlb_flush = &cayman_pcie_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
+ .vm = {
+ .init = &cayman_vm_init,
+ .fini = &cayman_vm_fini,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .set_page = &cayman_vm_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &cayman_gfx_is_lockup,
+ .vm_flush = &cayman_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_CP1_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &cayman_gfx_is_lockup,
+ .vm_flush = &cayman_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_CP2_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &cayman_gfx_is_lockup,
+ .vm_flush = &cayman_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &cayman_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
+ }
+ },
+ .irq = {
+ .set = &evergreen_irq_set,
+ .process = &evergreen_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &dce6_bandwidth_update,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
+ },
+ .copy = {
+ .blit = &r600_copy_blit,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &evergreen_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &evergreen_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+ },
+ .surface = {
+ .set_reg = r600_set_surface_reg,
+ .clear_reg = r600_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &evergreen_hpd_init,
+ .fini = &evergreen_hpd_fini,
+ .sense = &evergreen_hpd_sense,
+ .set_polarity = &evergreen_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &evergreen_pm_misc,
+ .prepare = &evergreen_pm_prepare,
+ .finish = &evergreen_pm_finish,
+ .init_profile = &sumo_pm_init_profile,
+ .get_dynpm_state = &r600_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = NULL,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_uvd_clocks = &sumo_set_uvd_clocks,
+ },
+ .pflip = {
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
+ },
+};
+
+static struct radeon_asic si_asic = {
+ .init = &si_init,
+ .fini = &si_fini,
+ .suspend = &si_suspend,
+ .resume = &si_resume,
+ .asic_reset = &si_asic_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &si_get_xclk,
+ .get_gpu_clock_counter = &si_get_gpu_clock_counter,
+ .gart = {
+ .tlb_flush = &si_pcie_gart_tlb_flush,
+ .set_page = &rs600_gart_set_page,
+ },
+ .vm = {
+ .init = &si_vm_init,
+ .fini = &si_vm_fini,
+ .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .set_page = &si_vm_set_page,
+ },
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &si_ring_ib_execute,
+ .ib_parse = &si_ib_parse,
+ .emit_fence = &si_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &si_gfx_is_lockup,
+ .vm_flush = &si_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_CP1_INDEX] = {
+ .ib_execute = &si_ring_ib_execute,
+ .ib_parse = &si_ib_parse,
+ .emit_fence = &si_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &si_gfx_is_lockup,
+ .vm_flush = &si_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_CP2_INDEX] = {
+ .ib_execute = &si_ring_ib_execute,
+ .ib_parse = &si_ib_parse,
+ .emit_fence = &si_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &si_gfx_is_lockup,
+ .vm_flush = &si_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &si_dma_is_lockup,
+ .vm_flush = &si_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &si_dma_is_lockup,
+ .vm_flush = &si_dma_vm_flush,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &cayman_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
+ }
+ },
+ .irq = {
+ .set = &si_irq_set,
+ .process = &si_irq_process,
+ },
+ .display = {
+ .bandwidth_update = &dce6_bandwidth_update,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .wait_for_vblank = &dce4_wait_for_vblank,
+ .set_backlight_level = &atombios_set_backlight_level,
+ .get_backlight_level = &atombios_get_backlight_level,
+ },
+ .copy = {
+ .blit = NULL,
+ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .dma = &si_copy_dma,
+ .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .copy = &si_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+ },
+ .surface = {
+ .set_reg = r600_set_surface_reg,
+ .clear_reg = r600_clear_surface_reg,
+ },
+ .hpd = {
+ .init = &evergreen_hpd_init,
+ .fini = &evergreen_hpd_fini,
+ .sense = &evergreen_hpd_sense,
+ .set_polarity = &evergreen_hpd_set_polarity,
+ },
+ .pm = {
+ .misc = &evergreen_pm_misc,
+ .prepare = &evergreen_pm_prepare,
+ .finish = &evergreen_pm_finish,
+ .init_profile = &sumo_pm_init_profile,
+ .get_dynpm_state = &r600_pm_get_dynpm_state,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
+ .set_clock_gating = NULL,
+ .set_uvd_clocks = &si_set_uvd_clocks,
+ },
+ .pflip = {
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
+ },
+};
+
+/**
+ * radeon_asic_init - register asic specific callbacks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Registers the appropriate asic specific callbacks for each
+ * chip family. Also sets other asics specific info like the number
+ * of crtcs and the register aperture accessors (all asics).
+ * Returns 0 for success.
+ */
+int radeon_asic_init(struct radeon_device *rdev)
+{
+ radeon_register_accessor_init(rdev);
+
+ /* set the number of crtcs */
+ if (rdev->flags & RADEON_SINGLE_CRTC)
+ rdev->num_crtc = 1;
+ else
+ rdev->num_crtc = 2;
+
+ rdev->has_uvd = false;
+
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ rdev->asic = &r100_asic;
+ break;
+ case CHIP_R200:
+ case CHIP_RV250:
+ case CHIP_RS300:
+ case CHIP_RV280:
+ rdev->asic = &r200_asic;
+ break;
+ case CHIP_R300:
+ case CHIP_R350:
+ case CHIP_RV350:
+ case CHIP_RV380:
+ if (rdev->flags & RADEON_IS_PCIE)
+ rdev->asic = &r300_asic_pcie;
+ else
+ rdev->asic = &r300_asic;
+ break;
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
+ rdev->asic = &r420_asic;
+ /* handle macs */
+ if (rdev->bios == NULL) {
+ rdev->asic->pm.get_engine_clock = &radeon_legacy_get_engine_clock;
+ rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock;
+ rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock;
+ rdev->asic->pm.set_memory_clock = NULL;
+ rdev->asic->display.set_backlight_level = &radeon_legacy_set_backlight_level;
+ }
+ break;
+ case CHIP_RS400:
+ case CHIP_RS480:
+ rdev->asic = &rs400_asic;
+ break;
+ case CHIP_RS600:
+ rdev->asic = &rs600_asic;
+ break;
+ case CHIP_RS690:
+ case CHIP_RS740:
+ rdev->asic = &rs690_asic;
+ break;
+ case CHIP_RV515:
+ rdev->asic = &rv515_asic;
+ break;
+ case CHIP_R520:
+ case CHIP_RV530:
+ case CHIP_RV560:
+ case CHIP_RV570:
+ case CHIP_R580:
+ rdev->asic = &r520_asic;
+ break;
+ case CHIP_R600:
+ case CHIP_RV610:
+ case CHIP_RV630:
+ case CHIP_RV620:
+ case CHIP_RV635:
+ case CHIP_RV670:
+ rdev->asic = &r600_asic;
+ if (rdev->family == CHIP_R600)
+ rdev->has_uvd = false;
+ else
+ rdev->has_uvd = true;
+ break;
+ case CHIP_RS780:
+ case CHIP_RS880:
+ rdev->asic = &rs780_asic;
+ rdev->has_uvd = true;
+ break;
+ case CHIP_RV770:
+ case CHIP_RV730:
+ case CHIP_RV710:
+ case CHIP_RV740:
+ rdev->asic = &rv770_asic;
+ rdev->has_uvd = true;
+ break;
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ case CHIP_JUNIPER:
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ /* set num crtcs */
+ if (rdev->family == CHIP_CEDAR)
+ rdev->num_crtc = 4;
+ else
+ rdev->num_crtc = 6;
+ rdev->asic = &evergreen_asic;
+ rdev->has_uvd = true;
+ break;
+ case CHIP_PALM:
+ case CHIP_SUMO:
+ case CHIP_SUMO2:
+ rdev->asic = &sumo_asic;
+ rdev->has_uvd = true;
+ break;
+ case CHIP_BARTS:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
+ /* set num crtcs */
+ if (rdev->family == CHIP_CAICOS)
+ rdev->num_crtc = 4;
+ else
+ rdev->num_crtc = 6;
+ rdev->asic = &btc_asic;
+ rdev->has_uvd = true;
+ break;
+ case CHIP_CAYMAN:
+ rdev->asic = &cayman_asic;
+ /* set num crtcs */
+ rdev->num_crtc = 6;
+ rdev->has_uvd = true;
+ break;
+ case CHIP_ARUBA:
+ rdev->asic = &trinity_asic;
+ /* set num crtcs */
+ rdev->num_crtc = 4;
+ rdev->has_uvd = true;
+ break;
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ rdev->asic = &si_asic;
+ /* set num crtcs */
+ if (rdev->family == CHIP_HAINAN)
+ rdev->num_crtc = 0;
+ else if (rdev->family == CHIP_OLAND)
+ rdev->num_crtc = 2;
+ else
+ rdev->num_crtc = 6;
+ if (rdev->family == CHIP_HAINAN)
+ rdev->has_uvd = false;
+ else
+ rdev->has_uvd = true;
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ rdev->asic->pm.get_memory_clock = NULL;
+ rdev->asic->pm.set_memory_clock = NULL;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
new file mode 100644
index 0000000..34223fc
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -0,0 +1,556 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RADEON_ASIC_H__
+#define __RADEON_ASIC_H__
+
+/*
+ * common functions
+ */
+uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev);
+void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
+uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev);
+void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
+
+uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev);
+void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
+uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev);
+void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
+void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
+
+void atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
+u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder);
+void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
+u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
+
+
+/*
+ * r100,rv100,rs100,rv200,rs200
+ */
+struct r100_mc_save {
+ u32 GENMO_WT;
+ u32 CRTC_EXT_CNTL;
+ u32 CRTC_GEN_CNTL;
+ u32 CRTC2_GEN_CNTL;
+ u32 CUR_OFFSET;
+ u32 CUR2_OFFSET;
+};
+int r100_init(struct radeon_device *rdev);
+void r100_fini(struct radeon_device *rdev);
+int r100_suspend(struct radeon_device *rdev);
+int r100_resume(struct radeon_device *rdev);
+void r100_vga_set_state(struct radeon_device *rdev, bool state);
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+int r100_asic_reset(struct radeon_device *rdev);
+u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
+void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
+int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+int r100_irq_set(struct radeon_device *rdev);
+int r100_irq_process(struct radeon_device *rdev);
+void r100_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *cp,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+int r100_cs_parse(struct radeon_cs_parser *p);
+void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
+int r100_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
+int r100_set_surface_reg(struct radeon_device *rdev, int reg,
+ uint32_t tiling_flags, uint32_t pitch,
+ uint32_t offset, uint32_t obj_size);
+void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
+void r100_bandwidth_update(struct radeon_device *rdev);
+void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+void r100_hpd_init(struct radeon_device *rdev);
+void r100_hpd_fini(struct radeon_device *rdev);
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd);
+int r100_debugfs_rbbm_init(struct radeon_device *rdev);
+int r100_debugfs_cp_init(struct radeon_device *rdev);
+void r100_cp_disable(struct radeon_device *rdev);
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
+void r100_cp_fini(struct radeon_device *rdev);
+int r100_pci_gart_init(struct radeon_device *rdev);
+void r100_pci_gart_fini(struct radeon_device *rdev);
+int r100_pci_gart_enable(struct radeon_device *rdev);
+void r100_pci_gart_disable(struct radeon_device *rdev);
+int r100_debugfs_mc_info_init(struct radeon_device *rdev);
+int r100_gui_wait_for_idle(struct radeon_device *rdev);
+int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void r100_irq_disable(struct radeon_device *rdev);
+void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
+void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
+void r100_vram_init_sizes(struct radeon_device *rdev);
+int r100_cp_reset(struct radeon_device *rdev);
+void r100_vga_render_disable(struct radeon_device *rdev);
+void r100_restore_sanity(struct radeon_device *rdev);
+int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ struct radeon_bo *robj);
+int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ const unsigned *auth, unsigned n,
+ radeon_packet0_check_t check);
+int r100_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx);
+void r100_enable_bm(struct radeon_device *rdev);
+void r100_set_common_regs(struct radeon_device *rdev);
+void r100_bm_disable(struct radeon_device *rdev);
+extern bool r100_gui_idle(struct radeon_device *rdev);
+extern void r100_pm_misc(struct radeon_device *rdev);
+extern void r100_pm_prepare(struct radeon_device *rdev);
+extern void r100_pm_finish(struct radeon_device *rdev);
+extern void r100_pm_init_profile(struct radeon_device *rdev);
+extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
+extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
+extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * r200,rv250,rs300,rv280
+ */
+extern int r200_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
+void r200_set_safe_registers(struct radeon_device *rdev);
+
+/*
+ * r300,r350,rv350,rv380
+ */
+extern int r300_init(struct radeon_device *rdev);
+extern void r300_fini(struct radeon_device *rdev);
+extern int r300_suspend(struct radeon_device *rdev);
+extern int r300_resume(struct radeon_device *rdev);
+extern int r300_asic_reset(struct radeon_device *rdev);
+extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+extern void r300_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+extern int r300_cs_parse(struct radeon_cs_parser *p);
+extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
+extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
+extern void r300_set_reg_safe(struct radeon_device *rdev);
+extern void r300_mc_program(struct radeon_device *rdev);
+extern void r300_mc_init(struct radeon_device *rdev);
+extern void r300_clock_startup(struct radeon_device *rdev);
+extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
+extern int rv370_pcie_gart_init(struct radeon_device *rdev);
+extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
+extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
+extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
+extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * r420,r423,rv410
+ */
+extern int r420_init(struct radeon_device *rdev);
+extern void r420_fini(struct radeon_device *rdev);
+extern int r420_suspend(struct radeon_device *rdev);
+extern int r420_resume(struct radeon_device *rdev);
+extern void r420_pm_init_profile(struct radeon_device *rdev);
+extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
+extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
+extern void r420_pipes_init(struct radeon_device *rdev);
+
+/*
+ * rs400,rs480
+ */
+extern int rs400_init(struct radeon_device *rdev);
+extern void rs400_fini(struct radeon_device *rdev);
+extern int rs400_suspend(struct radeon_device *rdev);
+extern int rs400_resume(struct radeon_device *rdev);
+void rs400_gart_tlb_flush(struct radeon_device *rdev);
+int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+int rs400_gart_init(struct radeon_device *rdev);
+int rs400_gart_enable(struct radeon_device *rdev);
+void rs400_gart_adjust_size(struct radeon_device *rdev);
+void rs400_gart_disable(struct radeon_device *rdev);
+void rs400_gart_fini(struct radeon_device *rdev);
+extern int rs400_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * rs600.
+ */
+extern int rs600_asic_reset(struct radeon_device *rdev);
+extern int rs600_init(struct radeon_device *rdev);
+extern void rs600_fini(struct radeon_device *rdev);
+extern int rs600_suspend(struct radeon_device *rdev);
+extern int rs600_resume(struct radeon_device *rdev);
+int rs600_irq_set(struct radeon_device *rdev);
+int rs600_irq_process(struct radeon_device *rdev);
+void rs600_irq_disable(struct radeon_device *rdev);
+u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
+void rs600_gart_tlb_flush(struct radeon_device *rdev);
+int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void rs600_bandwidth_update(struct radeon_device *rdev);
+void rs600_hpd_init(struct radeon_device *rdev);
+void rs600_hpd_fini(struct radeon_device *rdev);
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd);
+extern void rs600_pm_misc(struct radeon_device *rdev);
+extern void rs600_pm_prepare(struct radeon_device *rdev);
+extern void rs600_pm_finish(struct radeon_device *rdev);
+extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
+void rs600_set_safe_registers(struct radeon_device *rdev);
+extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc);
+extern int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * rs690,rs740
+ */
+int rs690_init(struct radeon_device *rdev);
+void rs690_fini(struct radeon_device *rdev);
+int rs690_resume(struct radeon_device *rdev);
+int rs690_suspend(struct radeon_device *rdev);
+uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void rs690_bandwidth_update(struct radeon_device *rdev);
+void rs690_line_buffer_adjust(struct radeon_device *rdev,
+ struct drm_display_mode *mode1,
+ struct drm_display_mode *mode2);
+extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * rv515
+ */
+struct rv515_mc_save {
+ u32 vga_render_control;
+ u32 vga_hdp_control;
+ bool crtc_enabled[2];
+};
+
+int rv515_init(struct radeon_device *rdev);
+void rv515_fini(struct radeon_device *rdev);
+uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+void rv515_bandwidth_update(struct radeon_device *rdev);
+int rv515_resume(struct radeon_device *rdev);
+int rv515_suspend(struct radeon_device *rdev);
+void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
+void rv515_vga_render_disable(struct radeon_device *rdev);
+void rv515_set_safe_registers(struct radeon_device *rdev);
+void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_clock_startup(struct radeon_device *rdev);
+void rv515_debugfs(struct radeon_device *rdev);
+int rv515_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * r520,rv530,rv560,rv570,r580
+ */
+int r520_init(struct radeon_device *rdev);
+int r520_resume(struct radeon_device *rdev);
+int r520_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
+ */
+int r600_init(struct radeon_device *rdev);
+void r600_fini(struct radeon_device *rdev);
+int r600_suspend(struct radeon_device *rdev);
+int r600_resume(struct radeon_device *rdev);
+void r600_vga_set_state(struct radeon_device *rdev, bool state);
+int r600_wb_init(struct radeon_device *rdev);
+void r600_wb_fini(struct radeon_device *rdev);
+void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
+uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
+void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+int r600_cs_parse(struct radeon_cs_parser *p);
+int r600_dma_cs_parse(struct radeon_cs_parser *p);
+void r600_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *cp,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_asic_reset(struct radeon_device *rdev);
+int r600_set_surface_reg(struct radeon_device *rdev, int reg,
+ uint32_t tiling_flags, uint32_t pitch,
+ uint32_t offset, uint32_t obj_size);
+void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
+int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int r600_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages, struct radeon_fence **fence);
+int r600_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages, struct radeon_fence **fence);
+void r600_hpd_init(struct radeon_device *rdev);
+void r600_hpd_fini(struct radeon_device *rdev);
+bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r600_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd);
+extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
+extern bool r600_gui_idle(struct radeon_device *rdev);
+extern void r600_pm_misc(struct radeon_device *rdev);
+extern void r600_pm_init_profile(struct radeon_device *rdev);
+extern void rs780_pm_init_profile(struct radeon_device *rdev);
+extern uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+extern void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+extern int r600_get_pcie_lanes(struct radeon_device *rdev);
+bool r600_card_posted(struct radeon_device *rdev);
+void r600_cp_stop(struct radeon_device *rdev);
+int r600_cp_start(struct radeon_device *rdev);
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size);
+int r600_cp_resume(struct radeon_device *rdev);
+void r600_cp_fini(struct radeon_device *rdev);
+int r600_count_pipe_bits(uint32_t val);
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
+int r600_pcie_gart_init(struct radeon_device *rdev);
+void r600_scratch_init(struct radeon_device *rdev);
+int r600_blit_init(struct radeon_device *rdev);
+void r600_blit_fini(struct radeon_device *rdev);
+int r600_init_microcode(struct radeon_device *rdev);
+/* r600 irq */
+int r600_irq_process(struct radeon_device *rdev);
+int r600_irq_init(struct radeon_device *rdev);
+void r600_irq_fini(struct radeon_device *rdev);
+void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
+int r600_irq_set(struct radeon_device *rdev);
+void r600_irq_suspend(struct radeon_device *rdev);
+void r600_disable_interrupts(struct radeon_device *rdev);
+void r600_rlc_stop(struct radeon_device *rdev);
+/* r600 audio */
+int r600_audio_init(struct radeon_device *rdev);
+struct r600_audio r600_audio_status(struct radeon_device *rdev);
+void r600_audio_fini(struct radeon_device *rdev);
+int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
+void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+/* r600 blit */
+int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
+ struct radeon_fence **fence, struct radeon_sa_bo **vb,
+ struct radeon_semaphore **sem);
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
+ struct radeon_sa_bo *vb, struct radeon_semaphore *sem);
+void r600_kms_blit_copy(struct radeon_device *rdev,
+ u64 src_gpu_addr, u64 dst_gpu_addr,
+ unsigned num_gpu_pages,
+ struct radeon_sa_bo *vb);
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
+u32 r600_get_xclk(struct radeon_device *rdev);
+uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
+
+/* uvd */
+int r600_uvd_init(struct radeon_device *rdev);
+int r600_uvd_rbc_start(struct radeon_device *rdev);
+void r600_uvd_stop(struct radeon_device *rdev);
+int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void r600_uvd_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+void r600_uvd_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+
+/*
+ * rv770,rv730,rv710,rv740
+ */
+int rv770_init(struct radeon_device *rdev);
+void rv770_fini(struct radeon_device *rdev);
+int rv770_suspend(struct radeon_device *rdev);
+int rv770_resume(struct radeon_device *rdev);
+void rv770_pm_misc(struct radeon_device *rdev);
+u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
+void r700_cp_stop(struct radeon_device *rdev);
+void r700_cp_fini(struct radeon_device *rdev);
+int rv770_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
+u32 rv770_get_xclk(struct radeon_device *rdev);
+int rv770_uvd_resume(struct radeon_device *rdev);
+int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+
+/*
+ * evergreen
+ */
+struct evergreen_mc_save {
+ u32 vga_render_control;
+ u32 vga_hdp_control;
+ bool crtc_enabled[RADEON_MAX_CRTCS];
+};
+
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int evergreen_init(struct radeon_device *rdev);
+void evergreen_fini(struct radeon_device *rdev);
+int evergreen_suspend(struct radeon_device *rdev);
+int evergreen_resume(struct radeon_device *rdev);
+bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+int evergreen_asic_reset(struct radeon_device *rdev);
+void evergreen_bandwidth_update(struct radeon_device *rdev);
+void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+void evergreen_hpd_init(struct radeon_device *rdev);
+void evergreen_hpd_fini(struct radeon_device *rdev);
+bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void evergreen_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd);
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
+int evergreen_irq_set(struct radeon_device *rdev);
+int evergreen_irq_process(struct radeon_device *rdev);
+extern int evergreen_cs_parse(struct radeon_cs_parser *p);
+extern int evergreen_dma_cs_parse(struct radeon_cs_parser *p);
+extern void evergreen_pm_misc(struct radeon_device *rdev);
+extern void evergreen_pm_prepare(struct radeon_device *rdev);
+extern void evergreen_pm_finish(struct radeon_device *rdev);
+extern void sumo_pm_init_profile(struct radeon_device *rdev);
+extern void btc_pm_init_profile(struct radeon_device *rdev);
+int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
+extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
+void evergreen_disable_interrupt_state(struct radeon_device *rdev);
+int evergreen_blit_init(struct radeon_device *rdev);
+int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib);
+int evergreen_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
+void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
+void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+
+/*
+ * cayman
+ */
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int cayman_init(struct radeon_device *rdev);
+void cayman_fini(struct radeon_device *rdev);
+int cayman_suspend(struct radeon_device *rdev);
+int cayman_resume(struct radeon_device *rdev);
+int cayman_asic_reset(struct radeon_device *rdev);
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int cayman_vm_init(struct radeon_device *rdev);
+void cayman_vm_fini(struct radeon_device *rdev);
+void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
+void cayman_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib);
+bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+
+/* DCE6 - SI */
+void dce6_bandwidth_update(struct radeon_device *rdev);
+
+/*
+ * si
+ */
+void si_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+void si_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int si_init(struct radeon_device *rdev);
+void si_fini(struct radeon_device *rdev);
+int si_suspend(struct radeon_device *rdev);
+int si_resume(struct radeon_device *rdev);
+bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+int si_asic_reset(struct radeon_device *rdev);
+void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int si_irq_set(struct radeon_device *rdev);
+int si_irq_process(struct radeon_device *rdev);
+int si_vm_init(struct radeon_device *rdev);
+void si_vm_fini(struct radeon_device *rdev);
+void si_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+int si_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+u32 si_get_xclk(struct radeon_device *rdev);
+uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
+int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
new file mode 100644
index 0000000..239a407
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -0,0 +1,3315 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#include "atom.h"
+#include "atom-bits.h"
+
+/* from radeon_encoder.c */
+extern uint32_t
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+ uint8_t dac);
+extern void radeon_link_encoder_connector(struct drm_device *dev);
+extern void
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
+ uint32_t supported_device, u16 caps);
+
+/* from radeon_connector.c */
+extern void
+radeon_add_atom_connector(struct drm_device *dev,
+ uint32_t connector_id,
+ uint32_t supported_device,
+ int connector_type,
+ struct radeon_i2c_bus_rec *i2c_bus,
+ uint32_t igp_lane_info,
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd,
+ struct radeon_router *router);
+
+/* from radeon_legacy_encoder.c */
+extern void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
+ uint32_t supported_device);
+
+/* local */
+static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
+ u16 voltage_id, u16 *voltage);
+
+union atom_supported_devices {
+ struct _ATOM_SUPPORTED_DEVICES_INFO info;
+ struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2;
+ struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
+};
+
+static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev,
+ ATOM_GPIO_I2C_ASSIGMENT *gpio,
+ u8 index)
+{
+ /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
+ if ((rdev->family == CHIP_R420) ||
+ (rdev->family == CHIP_R423) ||
+ (rdev->family == CHIP_RV410)) {
+ if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
+ gpio->ucClkMaskShift = 0x19;
+ gpio->ucDataMaskShift = 0x18;
+ }
+ }
+
+ /* some evergreen boards have bad data for this entry */
+ if (ASIC_IS_DCE4(rdev)) {
+ if ((index == 7) &&
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
+ (gpio->sucI2cId.ucAccess == 0)) {
+ gpio->sucI2cId.ucAccess = 0x97;
+ gpio->ucDataMaskShift = 8;
+ gpio->ucDataEnShift = 8;
+ gpio->ucDataY_Shift = 8;
+ gpio->ucDataA_Shift = 8;
+ }
+ }
+
+ /* some DCE3 boards have bad data for this entry */
+ if (ASIC_IS_DCE3(rdev)) {
+ if ((index == 4) &&
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
+ (gpio->sucI2cId.ucAccess == 0x94))
+ gpio->sucI2cId.ucAccess = 0x14;
+ }
+}
+
+static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
+{
+ struct radeon_i2c_bus_rec i2c;
+
+ memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+
+ i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+ i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+ i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+ i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+ i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+ i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+ i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+ i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+ i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+ i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+ i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+ i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+ i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+ i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+ i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+ i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+ if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+ i2c.hw_capable = true;
+ else
+ i2c.hw_capable = false;
+
+ if (gpio->sucI2cId.ucAccess == 0xa0)
+ i2c.mm_i2c = true;
+ else
+ i2c.mm_i2c = false;
+
+ i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+ if (i2c.mask_clk_reg)
+ i2c.valid = true;
+ else
+ i2c.valid = false;
+
+ return i2c;
+}
+
+static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
+ uint8_t id)
+{
+ struct atom_context *ctx = rdev->mode_info.atom_context;
+ ATOM_GPIO_I2C_ASSIGMENT *gpio;
+ struct radeon_i2c_bus_rec i2c;
+ int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+ struct _ATOM_GPIO_I2C_INFO *i2c_info;
+ uint16_t data_offset, size;
+ int i, num_indices;
+
+ memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+ i2c.valid = false;
+
+ if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+ i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+
+ for (i = 0; i < num_indices; i++) {
+ gpio = &i2c_info->asGPIO_Info[i];
+
+ radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
+
+ if (gpio->sucI2cId.ucAccess == id) {
+ i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
+ break;
+ }
+ }
+ }
+
+ return i2c;
+}
+
+void radeon_atombios_i2c_init(struct radeon_device *rdev)
+{
+ struct atom_context *ctx = rdev->mode_info.atom_context;
+ ATOM_GPIO_I2C_ASSIGMENT *gpio;
+ struct radeon_i2c_bus_rec i2c;
+ int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+ struct _ATOM_GPIO_I2C_INFO *i2c_info;
+ uint16_t data_offset, size;
+ int i, num_indices;
+ char stmp[32];
+
+ if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+ i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+
+ for (i = 0; i < num_indices; i++) {
+ gpio = &i2c_info->asGPIO_Info[i];
+
+ radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
+
+ i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
+
+ if (i2c.valid) {
+ sprintf(stmp, "0x%x", i2c.i2c_id);
+ rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
+ }
+ }
+ }
+}
+
+static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
+ u8 id)
+{
+ struct atom_context *ctx = rdev->mode_info.atom_context;
+ struct radeon_gpio_rec gpio;
+ int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT);
+ struct _ATOM_GPIO_PIN_LUT *gpio_info;
+ ATOM_GPIO_PIN_ASSIGNMENT *pin;
+ u16 data_offset, size;
+ int i, num_indices;
+
+ memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
+ gpio.valid = false;
+
+ if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+ gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
+
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+
+ for (i = 0; i < num_indices; i++) {
+ pin = &gpio_info->asGPIO_Pin[i];
+ if (id == pin->ucGPIO_ID) {
+ gpio.id = pin->ucGPIO_ID;
+ gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
+ gpio.mask = (1 << pin->ucGpioPinBitShift);
+ gpio.valid = true;
+ break;
+ }
+ }
+ }
+
+ return gpio;
+}
+
+static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev,
+ struct radeon_gpio_rec *gpio)
+{
+ struct radeon_hpd hpd;
+ u32 reg;
+
+ memset(&hpd, 0, sizeof(struct radeon_hpd));
+
+ if (ASIC_IS_DCE6(rdev))
+ reg = SI_DC_GPIO_HPD_A;
+ else if (ASIC_IS_DCE4(rdev))
+ reg = EVERGREEN_DC_GPIO_HPD_A;
+ else
+ reg = AVIVO_DC_GPIO_HPD_A;
+
+ hpd.gpio = *gpio;
+ if (gpio->reg == reg) {
+ switch(gpio->mask) {
+ case (1 << 0):
+ hpd.hpd = RADEON_HPD_1;
+ break;
+ case (1 << 8):
+ hpd.hpd = RADEON_HPD_2;
+ break;
+ case (1 << 16):
+ hpd.hpd = RADEON_HPD_3;
+ break;
+ case (1 << 24):
+ hpd.hpd = RADEON_HPD_4;
+ break;
+ case (1 << 26):
+ hpd.hpd = RADEON_HPD_5;
+ break;
+ case (1 << 28):
+ hpd.hpd = RADEON_HPD_6;
+ break;
+ default:
+ hpd.hpd = RADEON_HPD_NONE;
+ break;
+ }
+ } else
+ hpd.hpd = RADEON_HPD_NONE;
+ return hpd;
+}
+
+static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ uint32_t supported_device,
+ int *connector_type,
+ struct radeon_i2c_bus_rec *i2c_bus,
+ uint16_t *line_mux,
+ struct radeon_hpd *hpd)
+{
+
+ /* Asus M2A-VM HDMI board lists the DVI port as HDMI */
+ if ((dev->pdev->device == 0x791e) &&
+ (dev->pdev->subsystem_vendor == 0x1043) &&
+ (dev->pdev->subsystem_device == 0x826d)) {
+ if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+ (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+ *connector_type = DRM_MODE_CONNECTOR_DVID;
+ }
+
+ /* Asrock RS600 board lists the DVI port as HDMI */
+ if ((dev->pdev->device == 0x7941) &&
+ (dev->pdev->subsystem_vendor == 0x1849) &&
+ (dev->pdev->subsystem_device == 0x7941)) {
+ if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+ (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+ *connector_type = DRM_MODE_CONNECTOR_DVID;
+ }
+
+ /* MSI K9A2GM V2/V3 board has no HDMI or DVI */
+ if ((dev->pdev->device == 0x796e) &&
+ (dev->pdev->subsystem_vendor == 0x1462) &&
+ (dev->pdev->subsystem_device == 0x7302)) {
+ if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
+ (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+ return false;
+ }
+
+ /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
+ if ((dev->pdev->device == 0x7941) &&
+ (dev->pdev->subsystem_vendor == 0x147b) &&
+ (dev->pdev->subsystem_device == 0x2412)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_DVII)
+ return false;
+ }
+
+ /* Falcon NW laptop lists vga ddc line for LVDS */
+ if ((dev->pdev->device == 0x5653) &&
+ (dev->pdev->subsystem_vendor == 0x1462) &&
+ (dev->pdev->subsystem_device == 0x0291)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ i2c_bus->valid = false;
+ *line_mux = 53;
+ }
+ }
+
+ /* HIS X1300 is DVI+VGA, not DVI+DVI */
+ if ((dev->pdev->device == 0x7146) &&
+ (dev->pdev->subsystem_vendor == 0x17af) &&
+ (dev->pdev->subsystem_device == 0x2058)) {
+ if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+ return false;
+ }
+
+ /* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
+ if ((dev->pdev->device == 0x7142) &&
+ (dev->pdev->subsystem_vendor == 0x1458) &&
+ (dev->pdev->subsystem_device == 0x2134)) {
+ if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+ return false;
+ }
+
+
+ /* Funky macbooks */
+ if ((dev->pdev->device == 0x71C5) &&
+ (dev->pdev->subsystem_vendor == 0x106b) &&
+ (dev->pdev->subsystem_device == 0x0080)) {
+ if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
+ (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
+ return false;
+ if (supported_device == ATOM_DEVICE_CRT2_SUPPORT)
+ *line_mux = 0x90;
+ }
+
+ /* mac rv630, rv730, others */
+ if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
+ (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
+ *connector_type = DRM_MODE_CONNECTOR_9PinDIN;
+ *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
+ }
+
+ /* ASUS HD 3600 XT board lists the DVI port as HDMI */
+ if ((dev->pdev->device == 0x9598) &&
+ (dev->pdev->subsystem_vendor == 0x1043) &&
+ (dev->pdev->subsystem_device == 0x01da)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ *connector_type = DRM_MODE_CONNECTOR_DVII;
+ }
+ }
+
+ /* ASUS HD 3600 board lists the DVI port as HDMI */
+ if ((dev->pdev->device == 0x9598) &&
+ (dev->pdev->subsystem_vendor == 0x1043) &&
+ (dev->pdev->subsystem_device == 0x01e4)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ *connector_type = DRM_MODE_CONNECTOR_DVII;
+ }
+ }
+
+ /* ASUS HD 3450 board lists the DVI port as HDMI */
+ if ((dev->pdev->device == 0x95C5) &&
+ (dev->pdev->subsystem_vendor == 0x1043) &&
+ (dev->pdev->subsystem_device == 0x01e2)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ *connector_type = DRM_MODE_CONNECTOR_DVII;
+ }
+ }
+
+ /* some BIOSes seem to report DAC on HDMI - usually this is a board with
+ * HDMI + VGA reporting as HDMI
+ */
+ if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ if (supported_device & (ATOM_DEVICE_CRT_SUPPORT)) {
+ *connector_type = DRM_MODE_CONNECTOR_VGA;
+ *line_mux = 0;
+ }
+ }
+
+ /* Acer laptop (Acer TravelMate 5730/5730G) has an HDMI port
+ * on the laptop and a DVI port on the docking station and
+ * both share the same encoder, hpd pin, and ddc line.
+ * So while the bios table is technically correct,
+ * we drop the DVI port here since xrandr has no concept of
+ * encoders and will try and drive both connectors
+ * with different crtcs which isn't possible on the hardware
+ * side and leaves no crtcs for LVDS or VGA.
+ */
+ if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) &&
+ (dev->pdev->subsystem_vendor == 0x1025) &&
+ (dev->pdev->subsystem_device == 0x013c)) {
+ if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
+ (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
+ /* actually it's a DVI-D port not DVI-I */
+ *connector_type = DRM_MODE_CONNECTOR_DVID;
+ return false;
+ }
+ }
+
+ /* XFX Pine Group device rv730 reports no VGA DDC lines
+ * even though they are wired up to record 0x93
+ */
+ if ((dev->pdev->device == 0x9498) &&
+ (dev->pdev->subsystem_vendor == 0x1682) &&
+ (dev->pdev->subsystem_device == 0x2452) &&
+ (i2c_bus->valid == false) &&
+ !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
+ struct radeon_device *rdev = dev->dev_private;
+ *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
+ }
+
+ /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
+ if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
+ (dev->pdev->subsystem_vendor == 0x1734) &&
+ (dev->pdev->subsystem_device == 0x11bd)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
+ *connector_type = DRM_MODE_CONNECTOR_DVII;
+ *line_mux = 0x3103;
+ } else if (*connector_type == DRM_MODE_CONNECTOR_DVID) {
+ *connector_type = DRM_MODE_CONNECTOR_DVII;
+ }
+ }
+
+
+ return true;
+}
+
+const int supported_devices_connector_convert[] = {
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_VGA,
+ DRM_MODE_CONNECTOR_DVII,
+ DRM_MODE_CONNECTOR_DVID,
+ DRM_MODE_CONNECTOR_DVIA,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ DRM_MODE_CONNECTOR_Composite,
+ DRM_MODE_CONNECTOR_LVDS,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_HDMIA,
+ DRM_MODE_CONNECTOR_HDMIB,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_9PinDIN,
+ DRM_MODE_CONNECTOR_DisplayPort
+};
+
+const uint16_t supported_devices_connector_object_id_convert[] = {
+ CONNECTOR_OBJECT_ID_NONE,
+ CONNECTOR_OBJECT_ID_VGA,
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D, /* not all boards support DL */
+ CONNECTOR_OBJECT_ID_VGA, /* technically DVI-A */
+ CONNECTOR_OBJECT_ID_COMPOSITE,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ CONNECTOR_OBJECT_ID_LVDS,
+ CONNECTOR_OBJECT_ID_9PIN_DIN,
+ CONNECTOR_OBJECT_ID_9PIN_DIN,
+ CONNECTOR_OBJECT_ID_DISPLAYPORT,
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A,
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_B,
+ CONNECTOR_OBJECT_ID_SVIDEO
+};
+
+const int object_connector_convert[] = {
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_DVII,
+ DRM_MODE_CONNECTOR_DVII,
+ DRM_MODE_CONNECTOR_DVID,
+ DRM_MODE_CONNECTOR_DVID,
+ DRM_MODE_CONNECTOR_VGA,
+ DRM_MODE_CONNECTOR_Composite,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_9PinDIN,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_HDMIA,
+ DRM_MODE_CONNECTOR_HDMIB,
+ DRM_MODE_CONNECTOR_LVDS,
+ DRM_MODE_CONNECTOR_9PinDIN,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ DRM_MODE_CONNECTOR_eDP,
+ DRM_MODE_CONNECTOR_Unknown
+};
+
+bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, Object_Header);
+ u16 size, data_offset;
+ u8 frev, crev;
+ ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
+ ATOM_ENCODER_OBJECT_TABLE *enc_obj;
+ ATOM_OBJECT_TABLE *router_obj;
+ ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
+ ATOM_OBJECT_HEADER *obj_header;
+ int i, j, k, path_size, device_support;
+ int connector_type;
+ u16 igp_lane_info, conn_id, connector_object_id;
+ struct radeon_i2c_bus_rec ddc_bus;
+ struct radeon_router router;
+ struct radeon_gpio_rec gpio;
+ struct radeon_hpd hpd;
+
+ if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+ return false;
+
+ if (crev < 2)
+ return false;
+
+ obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
+ path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(obj_header->usDisplayPathTableOffset));
+ con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(obj_header->usConnectorObjectTableOffset));
+ enc_obj = (ATOM_ENCODER_OBJECT_TABLE *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(obj_header->usEncoderObjectTableOffset));
+ router_obj = (ATOM_OBJECT_TABLE *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(obj_header->usRouterObjectTableOffset));
+ device_support = le16_to_cpu(obj_header->usDeviceSupport);
+
+ path_size = 0;
+ for (i = 0; i < path_obj->ucNumOfDispPath; i++) {
+ uint8_t *addr = (uint8_t *) path_obj->asDispPath;
+ ATOM_DISPLAY_OBJECT_PATH *path;
+ addr += path_size;
+ path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
+ path_size += le16_to_cpu(path->usSize);
+
+ if (device_support & le16_to_cpu(path->usDeviceTag)) {
+ uint8_t con_obj_id, con_obj_num, con_obj_type;
+
+ con_obj_id =
+ (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
+ >> OBJECT_ID_SHIFT;
+ con_obj_num =
+ (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
+ >> ENUM_ID_SHIFT;
+ con_obj_type =
+ (le16_to_cpu(path->usConnObjectId) &
+ OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+
+ /* TODO CV support */
+ if (le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_CV_SUPPORT)
+ continue;
+
+ /* IGP chips */
+ if ((rdev->flags & RADEON_IS_IGP) &&
+ (con_obj_id ==
+ CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) {
+ uint16_t igp_offset = 0;
+ ATOM_INTEGRATED_SYSTEM_INFO_V2 *igp_obj;
+
+ index =
+ GetIndexIntoMasterTable(DATA,
+ IntegratedSystemInfo);
+
+ if (atom_parse_data_header(ctx, index, &size, &frev,
+ &crev, &igp_offset)) {
+
+ if (crev >= 2) {
+ igp_obj =
+ (ATOM_INTEGRATED_SYSTEM_INFO_V2
+ *) (ctx->bios + igp_offset);
+
+ if (igp_obj) {
+ uint32_t slot_config, ct;
+
+ if (con_obj_num == 1)
+ slot_config =
+ igp_obj->
+ ulDDISlot1Config;
+ else
+ slot_config =
+ igp_obj->
+ ulDDISlot2Config;
+
+ ct = (slot_config >> 16) & 0xff;
+ connector_type =
+ object_connector_convert
+ [ct];
+ connector_object_id = ct;
+ igp_lane_info =
+ slot_config & 0xffff;
+ } else
+ continue;
+ } else
+ continue;
+ } else {
+ igp_lane_info = 0;
+ connector_type =
+ object_connector_convert[con_obj_id];
+ connector_object_id = con_obj_id;
+ }
+ } else {
+ igp_lane_info = 0;
+ connector_type =
+ object_connector_convert[con_obj_id];
+ connector_object_id = con_obj_id;
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+ continue;
+
+ router.ddc_valid = false;
+ router.cd_valid = false;
+ for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
+ uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
+
+ grph_obj_id =
+ (le16_to_cpu(path->usGraphicObjIds[j]) &
+ OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ grph_obj_num =
+ (le16_to_cpu(path->usGraphicObjIds[j]) &
+ ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+ grph_obj_type =
+ (le16_to_cpu(path->usGraphicObjIds[j]) &
+ OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+
+ if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
+ for (k = 0; k < enc_obj->ucNumberOfObjects; k++) {
+ u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID);
+ if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) {
+ ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(enc_obj->asObjects[k].usRecordOffset));
+ ATOM_ENCODER_CAP_RECORD *cap_record;
+ u16 caps = 0;
+
+ while (record->ucRecordSize > 0 &&
+ record->ucRecordType > 0 &&
+ record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+ switch (record->ucRecordType) {
+ case ATOM_ENCODER_CAP_RECORD_TYPE:
+ cap_record =(ATOM_ENCODER_CAP_RECORD *)
+ record;
+ caps = le16_to_cpu(cap_record->usEncoderCap);
+ break;
+ }
+ record = (ATOM_COMMON_RECORD_HEADER *)
+ ((char *)record + record->ucRecordSize);
+ }
+ radeon_add_atom_encoder(dev,
+ encoder_obj,
+ le16_to_cpu
+ (path->
+ usDeviceTag),
+ caps);
+ }
+ }
+ } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
+ for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
+ u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
+ if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
+ ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(router_obj->asObjects[k].usRecordOffset));
+ ATOM_I2C_RECORD *i2c_record;
+ ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
+ ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
+ ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
+ ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
+ (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
+ u8 *num_dst_objs = (u8 *)
+ ((u8 *)router_src_dst_table + 1 +
+ (router_src_dst_table->ucNumberOfSrc * 2));
+ u16 *dst_objs = (u16 *)(num_dst_objs + 1);
+ int enum_id;
+
+ router.router_id = router_obj_id;
+ for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
+ if (le16_to_cpu(path->usConnObjectId) ==
+ le16_to_cpu(dst_objs[enum_id]))
+ break;
+ }
+
+ while (record->ucRecordSize > 0 &&
+ record->ucRecordType > 0 &&
+ record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+ switch (record->ucRecordType) {
+ case ATOM_I2C_RECORD_TYPE:
+ i2c_record =
+ (ATOM_I2C_RECORD *)
+ record;
+ i2c_config =
+ (ATOM_I2C_ID_CONFIG_ACCESS *)
+ &i2c_record->sucI2cId;
+ router.i2c_info =
+ radeon_lookup_i2c_gpio(rdev,
+ i2c_config->
+ ucAccess);
+ router.i2c_addr = i2c_record->ucI2CAddr >> 1;
+ break;
+ case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
+ ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
+ record;
+ router.ddc_valid = true;
+ router.ddc_mux_type = ddc_path->ucMuxType;
+ router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
+ router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
+ break;
+ case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
+ cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
+ record;
+ router.cd_valid = true;
+ router.cd_mux_type = cd_path->ucMuxType;
+ router.cd_mux_control_pin = cd_path->ucMuxControlPin;
+ router.cd_mux_state = cd_path->ucMuxState[enum_id];
+ break;
+ }
+ record = (ATOM_COMMON_RECORD_HEADER *)
+ ((char *)record + record->ucRecordSize);
+ }
+ }
+ }
+ }
+ }
+
+ /* look up gpio for ddc, hpd */
+ ddc_bus.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ if ((le16_to_cpu(path->usDeviceTag) &
+ (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
+ for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
+ if (le16_to_cpu(path->usConnObjectId) ==
+ le16_to_cpu(con_obj->asObjects[j].
+ usObjectID)) {
+ ATOM_COMMON_RECORD_HEADER
+ *record =
+ (ATOM_COMMON_RECORD_HEADER
+ *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(con_obj->
+ asObjects[j].
+ usRecordOffset));
+ ATOM_I2C_RECORD *i2c_record;
+ ATOM_HPD_INT_RECORD *hpd_record;
+ ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
+
+ while (record->ucRecordSize > 0 &&
+ record->ucRecordType > 0 &&
+ record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+ switch (record->ucRecordType) {
+ case ATOM_I2C_RECORD_TYPE:
+ i2c_record =
+ (ATOM_I2C_RECORD *)
+ record;
+ i2c_config =
+ (ATOM_I2C_ID_CONFIG_ACCESS *)
+ &i2c_record->sucI2cId;
+ ddc_bus = radeon_lookup_i2c_gpio(rdev,
+ i2c_config->
+ ucAccess);
+ break;
+ case ATOM_HPD_INT_RECORD_TYPE:
+ hpd_record =
+ (ATOM_HPD_INT_RECORD *)
+ record;
+ gpio = radeon_lookup_gpio(rdev,
+ hpd_record->ucHPDIntGPIOID);
+ hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+ hpd.plugged_state = hpd_record->ucPlugged_PinState;
+ break;
+ }
+ record =
+ (ATOM_COMMON_RECORD_HEADER
+ *) ((char *)record
+ +
+ record->
+ ucRecordSize);
+ }
+ break;
+ }
+ }
+ }
+
+ /* needed for aux chan transactions */
+ ddc_bus.hpd = hpd.hpd;
+
+ conn_id = le16_to_cpu(path->usConnObjectId);
+
+ if (!radeon_atom_apply_quirks
+ (dev, le16_to_cpu(path->usDeviceTag), &connector_type,
+ &ddc_bus, &conn_id, &hpd))
+ continue;
+
+ radeon_add_atom_connector(dev,
+ conn_id,
+ le16_to_cpu(path->
+ usDeviceTag),
+ connector_type, &ddc_bus,
+ igp_lane_info,
+ connector_object_id,
+ &hpd,
+ &router);
+
+ }
+ }
+
+ radeon_link_encoder_connector(dev);
+
+ return true;
+}
+
+static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
+ int connector_type,
+ uint16_t devices)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ return supported_devices_connector_object_id_convert
+ [connector_type];
+ } else if (((connector_type == DRM_MODE_CONNECTOR_DVII) ||
+ (connector_type == DRM_MODE_CONNECTOR_DVID)) &&
+ (devices & ATOM_DEVICE_DFP2_SUPPORT)) {
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, XTMDS_Info);
+ uint16_t size, data_offset;
+ uint8_t frev, crev;
+ ATOM_XTMDS_INFO *xtmds;
+
+ if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) {
+ xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset);
+
+ if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+ else
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+ } else {
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+ else
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+ }
+ } else
+ return supported_devices_connector_object_id_convert
+ [connector_type];
+ } else {
+ return supported_devices_connector_object_id_convert
+ [connector_type];
+ }
+}
+
+struct bios_connector {
+ bool valid;
+ uint16_t line_mux;
+ uint16_t devices;
+ int connector_type;
+ struct radeon_i2c_bus_rec ddc_bus;
+ struct radeon_hpd hpd;
+};
+
+bool radeon_get_atom_connector_info_from_supported_devices_table(struct
+ drm_device
+ *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, SupportedDevicesInfo);
+ uint16_t size, data_offset;
+ uint8_t frev, crev;
+ uint16_t device_support;
+ uint8_t dac;
+ union atom_supported_devices *supported_devices;
+ int i, j, max_device;
+ struct bios_connector *bios_connectors;
+ size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
+ struct radeon_router router;
+
+ router.ddc_valid = false;
+ router.cd_valid = false;
+
+ bios_connectors = kzalloc(bc_size, GFP_KERNEL);
+ if (!bios_connectors)
+ return false;
+
+ if (!atom_parse_data_header(ctx, index, &size, &frev, &crev,
+ &data_offset)) {
+ kfree(bios_connectors);
+ return false;
+ }
+
+ supported_devices =
+ (union atom_supported_devices *)(ctx->bios + data_offset);
+
+ device_support = le16_to_cpu(supported_devices->info.usDeviceSupport);
+
+ if (frev > 1)
+ max_device = ATOM_MAX_SUPPORTED_DEVICE;
+ else
+ max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
+
+ for (i = 0; i < max_device; i++) {
+ ATOM_CONNECTOR_INFO_I2C ci =
+ supported_devices->info.asConnInfo[i];
+
+ bios_connectors[i].valid = false;
+
+ if (!(device_support & (1 << i))) {
+ continue;
+ }
+
+ if (i == ATOM_DEVICE_CV_INDEX) {
+ DRM_DEBUG_KMS("Skipping Component Video\n");
+ continue;
+ }
+
+ bios_connectors[i].connector_type =
+ supported_devices_connector_convert[ci.sucConnectorInfo.
+ sbfAccess.
+ bfConnectorType];
+
+ if (bios_connectors[i].connector_type ==
+ DRM_MODE_CONNECTOR_Unknown)
+ continue;
+
+ dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
+
+ bios_connectors[i].line_mux =
+ ci.sucI2cId.ucAccess;
+
+ /* give tv unique connector ids */
+ if (i == ATOM_DEVICE_TV1_INDEX) {
+ bios_connectors[i].ddc_bus.valid = false;
+ bios_connectors[i].line_mux = 50;
+ } else if (i == ATOM_DEVICE_TV2_INDEX) {
+ bios_connectors[i].ddc_bus.valid = false;
+ bios_connectors[i].line_mux = 51;
+ } else if (i == ATOM_DEVICE_CV_INDEX) {
+ bios_connectors[i].ddc_bus.valid = false;
+ bios_connectors[i].line_mux = 52;
+ } else
+ bios_connectors[i].ddc_bus =
+ radeon_lookup_i2c_gpio(rdev,
+ bios_connectors[i].line_mux);
+
+ if ((crev > 1) && (frev > 1)) {
+ u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap;
+ switch (isb) {
+ case 0x4:
+ bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+ break;
+ case 0xa:
+ bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+ break;
+ default:
+ bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+ break;
+ }
+ } else {
+ if (i == ATOM_DEVICE_DFP1_INDEX)
+ bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+ else if (i == ATOM_DEVICE_DFP2_INDEX)
+ bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+ else
+ bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+ }
+
+ /* Always set the connector type to VGA for CRT1/CRT2. if they are
+ * shared with a DVI port, we'll pick up the DVI connector when we
+ * merge the outputs. Some bioses incorrectly list VGA ports as DVI.
+ */
+ if (i == ATOM_DEVICE_CRT1_INDEX || i == ATOM_DEVICE_CRT2_INDEX)
+ bios_connectors[i].connector_type =
+ DRM_MODE_CONNECTOR_VGA;
+
+ if (!radeon_atom_apply_quirks
+ (dev, (1 << i), &bios_connectors[i].connector_type,
+ &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux,
+ &bios_connectors[i].hpd))
+ continue;
+
+ bios_connectors[i].valid = true;
+ bios_connectors[i].devices = (1 << i);
+
+ if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
+ radeon_add_atom_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ (1 << i),
+ dac),
+ (1 << i),
+ 0);
+ else
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ (1 << i),
+ dac),
+ (1 << i));
+ }
+
+ /* combine shared connectors */
+ for (i = 0; i < max_device; i++) {
+ if (bios_connectors[i].valid) {
+ for (j = 0; j < max_device; j++) {
+ if (bios_connectors[j].valid && (i != j)) {
+ if (bios_connectors[i].line_mux ==
+ bios_connectors[j].line_mux) {
+ /* make sure not to combine LVDS */
+ if (bios_connectors[i].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ bios_connectors[i].line_mux = 53;
+ bios_connectors[i].ddc_bus.valid = false;
+ continue;
+ }
+ if (bios_connectors[j].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ bios_connectors[j].line_mux = 53;
+ bios_connectors[j].ddc_bus.valid = false;
+ continue;
+ }
+ /* combine analog and digital for DVI-I */
+ if (((bios_connectors[i].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
+ (bios_connectors[j].devices & (ATOM_DEVICE_CRT_SUPPORT))) ||
+ ((bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
+ (bios_connectors[i].devices & (ATOM_DEVICE_CRT_SUPPORT)))) {
+ bios_connectors[i].devices |=
+ bios_connectors[j].devices;
+ bios_connectors[i].connector_type =
+ DRM_MODE_CONNECTOR_DVII;
+ if (bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT))
+ bios_connectors[i].hpd =
+ bios_connectors[j].hpd;
+ bios_connectors[j].valid = false;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /* add the connectors */
+ for (i = 0; i < max_device; i++) {
+ if (bios_connectors[i].valid) {
+ uint16_t connector_object_id =
+ atombios_get_connector_object_id(dev,
+ bios_connectors[i].connector_type,
+ bios_connectors[i].devices);
+ radeon_add_atom_connector(dev,
+ bios_connectors[i].line_mux,
+ bios_connectors[i].devices,
+ bios_connectors[i].
+ connector_type,
+ &bios_connectors[i].ddc_bus,
+ 0,
+ connector_object_id,
+ &bios_connectors[i].hpd,
+ &router);
+ }
+ }
+
+ radeon_link_encoder_connector(dev);
+
+ kfree(bios_connectors);
+ return true;
+}
+
+union firmware_info {
+ ATOM_FIRMWARE_INFO info;
+ ATOM_FIRMWARE_INFO_V1_2 info_12;
+ ATOM_FIRMWARE_INFO_V1_3 info_13;
+ ATOM_FIRMWARE_INFO_V1_4 info_14;
+ ATOM_FIRMWARE_INFO_V2_1 info_21;
+ ATOM_FIRMWARE_INFO_V2_2 info_22;
+};
+
+bool radeon_atom_get_clock_info(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+ union firmware_info *firmware_info;
+ uint8_t frev, crev;
+ struct radeon_pll *p1pll = &rdev->clock.p1pll;
+ struct radeon_pll *p2pll = &rdev->clock.p2pll;
+ struct radeon_pll *dcpll = &rdev->clock.dcpll;
+ struct radeon_pll *spll = &rdev->clock.spll;
+ struct radeon_pll *mpll = &rdev->clock.mpll;
+ uint16_t data_offset;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ firmware_info =
+ (union firmware_info *)(mode_info->atom_context->bios +
+ data_offset);
+ /* pixel clocks */
+ p1pll->reference_freq =
+ le16_to_cpu(firmware_info->info.usReferenceClock);
+ p1pll->reference_div = 0;
+
+ if (crev < 2)
+ p1pll->pll_out_min =
+ le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
+ else
+ p1pll->pll_out_min =
+ le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
+ p1pll->pll_out_max =
+ le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
+
+ if (crev >= 4) {
+ p1pll->lcd_pll_out_min =
+ le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+ if (p1pll->lcd_pll_out_min == 0)
+ p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+ p1pll->lcd_pll_out_max =
+ le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
+ if (p1pll->lcd_pll_out_max == 0)
+ p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+ } else {
+ p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+ p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+ }
+
+ if (p1pll->pll_out_min == 0) {
+ if (ASIC_IS_AVIVO(rdev))
+ p1pll->pll_out_min = 64800;
+ else
+ p1pll->pll_out_min = 20000;
+ }
+
+ p1pll->pll_in_min =
+ le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Input);
+ p1pll->pll_in_max =
+ le16_to_cpu(firmware_info->info.usMaxPixelClockPLL_Input);
+
+ *p2pll = *p1pll;
+
+ /* system clock */
+ if (ASIC_IS_DCE4(rdev))
+ spll->reference_freq =
+ le16_to_cpu(firmware_info->info_21.usCoreReferenceClock);
+ else
+ spll->reference_freq =
+ le16_to_cpu(firmware_info->info.usReferenceClock);
+ spll->reference_div = 0;
+
+ spll->pll_out_min =
+ le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Output);
+ spll->pll_out_max =
+ le32_to_cpu(firmware_info->info.ulMaxEngineClockPLL_Output);
+
+ /* ??? */
+ if (spll->pll_out_min == 0) {
+ if (ASIC_IS_AVIVO(rdev))
+ spll->pll_out_min = 64800;
+ else
+ spll->pll_out_min = 20000;
+ }
+
+ spll->pll_in_min =
+ le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Input);
+ spll->pll_in_max =
+ le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
+
+ /* memory clock */
+ if (ASIC_IS_DCE4(rdev))
+ mpll->reference_freq =
+ le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock);
+ else
+ mpll->reference_freq =
+ le16_to_cpu(firmware_info->info.usReferenceClock);
+ mpll->reference_div = 0;
+
+ mpll->pll_out_min =
+ le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Output);
+ mpll->pll_out_max =
+ le32_to_cpu(firmware_info->info.ulMaxMemoryClockPLL_Output);
+
+ /* ??? */
+ if (mpll->pll_out_min == 0) {
+ if (ASIC_IS_AVIVO(rdev))
+ mpll->pll_out_min = 64800;
+ else
+ mpll->pll_out_min = 20000;
+ }
+
+ mpll->pll_in_min =
+ le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Input);
+ mpll->pll_in_max =
+ le16_to_cpu(firmware_info->info.usMaxMemoryClockPLL_Input);
+
+ rdev->clock.default_sclk =
+ le32_to_cpu(firmware_info->info.ulDefaultEngineClock);
+ rdev->clock.default_mclk =
+ le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
+
+ if (ASIC_IS_DCE4(rdev)) {
+ rdev->clock.default_dispclk =
+ le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
+ if (rdev->clock.default_dispclk == 0) {
+ if (ASIC_IS_DCE5(rdev))
+ rdev->clock.default_dispclk = 54000; /* 540 Mhz */
+ else
+ rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+ }
+ rdev->clock.dp_extclk =
+ le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
+ }
+ *dcpll = *p1pll;
+
+ rdev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
+ if (rdev->clock.max_pixel_clock == 0)
+ rdev->clock.max_pixel_clock = 40000;
+
+ /* not technically a clock, but... */
+ rdev->mode_info.firmware_flags =
+ le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
+
+ return true;
+ }
+
+ return false;
+}
+
+union igp_info {
+ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+};
+
+bool radeon_atombios_sideport_present(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ union igp_info *igp_info;
+ u8 frev, crev;
+ u16 data_offset;
+
+ /* sideport is AMD only */
+ if (rdev->family == CHIP_RS600)
+ return false;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)(mode_info->atom_context->bios +
+ data_offset);
+ switch (crev) {
+ case 1:
+ if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock))
+ return true;
+ break;
+ case 2:
+ if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock))
+ return true;
+ break;
+ default:
+ DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+ break;
+ }
+ }
+ return false;
+}
+
+bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, TMDS_Info);
+ uint16_t data_offset;
+ struct _ATOM_TMDS_INFO *tmds_info;
+ uint8_t frev, crev;
+ uint16_t maxfreq;
+ int i;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ tmds_info =
+ (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
+ data_offset);
+
+ maxfreq = le16_to_cpu(tmds_info->usMaxFrequency);
+ for (i = 0; i < 4; i++) {
+ tmds->tmds_pll[i].freq =
+ le16_to_cpu(tmds_info->asMiscInfo[i].usFrequency);
+ tmds->tmds_pll[i].value =
+ tmds_info->asMiscInfo[i].ucPLL_ChargePump & 0x3f;
+ tmds->tmds_pll[i].value |=
+ (tmds_info->asMiscInfo[i].
+ ucPLL_VCO_Gain & 0x3f) << 6;
+ tmds->tmds_pll[i].value |=
+ (tmds_info->asMiscInfo[i].
+ ucPLL_DutyCycle & 0xf) << 12;
+ tmds->tmds_pll[i].value |=
+ (tmds_info->asMiscInfo[i].
+ ucPLL_VoltageSwing & 0xf) << 16;
+
+ DRM_DEBUG_KMS("TMDS PLL From ATOMBIOS %u %x\n",
+ tmds->tmds_pll[i].freq,
+ tmds->tmds_pll[i].value);
+
+ if (maxfreq == tmds->tmds_pll[i].freq) {
+ tmds->tmds_pll[i].freq = 0xffffffff;
+ break;
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
+ struct radeon_atom_ss *ss,
+ int id)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
+ uint16_t data_offset, size;
+ struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
+ uint8_t frev, crev;
+ int i, num_indices;
+
+ memset(ss, 0, sizeof(struct radeon_atom_ss));
+ if (atom_parse_data_header(mode_info->atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ ss_info =
+ (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
+
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
+
+ for (i = 0; i < num_indices; i++) {
+ if (ss_info->asSS_Info[i].ucSS_Id == id) {
+ ss->percentage =
+ le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
+ ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
+ ss->step = ss_info->asSS_Info[i].ucSS_Step;
+ ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
+ ss->range = ss_info->asSS_Info[i].ucSS_Range;
+ ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
+ struct radeon_atom_ss *ss,
+ int id)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ u16 data_offset, size;
+ union igp_info *igp_info;
+ u8 frev, crev;
+ u16 percentage = 0, rate = 0;
+
+ /* get any igp specific overrides */
+ if (atom_parse_data_header(mode_info->atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (crev) {
+ case 6:
+ switch (id) {
+ case ASIC_INTERNAL_SS_ON_TMDS:
+ percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage);
+ rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_HDMI:
+ percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage);
+ rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_LVDS:
+ percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage);
+ rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz);
+ break;
+ }
+ break;
+ case 7:
+ switch (id) {
+ case ASIC_INTERNAL_SS_ON_TMDS:
+ percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage);
+ rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_HDMI:
+ percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage);
+ rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_LVDS:
+ percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage);
+ rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz);
+ break;
+ }
+ break;
+ default:
+ DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+ break;
+ }
+ if (percentage)
+ ss->percentage = percentage;
+ if (rate)
+ ss->rate = rate;
+ }
+}
+
+union asic_ss_info {
+ struct _ATOM_ASIC_INTERNAL_SS_INFO info;
+ struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
+ struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
+};
+
+bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
+ struct radeon_atom_ss *ss,
+ int id, u32 clock)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
+ uint16_t data_offset, size;
+ union asic_ss_info *ss_info;
+ uint8_t frev, crev;
+ int i, num_indices;
+
+ memset(ss, 0, sizeof(struct radeon_atom_ss));
+ if (atom_parse_data_header(mode_info->atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+
+ ss_info =
+ (union asic_ss_info *)(mode_info->atom_context->bios + data_offset);
+
+ switch (frev) {
+ case 1:
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT);
+
+ for (i = 0; i < num_indices; i++) {
+ if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
+ (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
+ ss->percentage =
+ le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+ ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz);
+ return true;
+ }
+ }
+ break;
+ case 2:
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+ for (i = 0; i < num_indices; i++) {
+ if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
+ (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
+ ss->percentage =
+ le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+ ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+ return true;
+ }
+ }
+ break;
+ case 3:
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+ for (i = 0; i < num_indices; i++) {
+ if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
+ (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
+ ss->percentage =
+ le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+ ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+ if (rdev->flags & RADEON_IS_IGP)
+ radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
+ return true;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev);
+ break;
+ }
+
+ }
+ return false;
+}
+
+union lvds_info {
+ struct _ATOM_LVDS_INFO info;
+ struct _ATOM_LVDS_INFO_V12 info_12;
+};
+
+struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
+ radeon_encoder
+ *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
+ uint16_t data_offset, misc;
+ union lvds_info *lvds_info;
+ uint8_t frev, crev;
+ struct radeon_encoder_atom_dig *lvds = NULL;
+ int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ lvds_info =
+ (union lvds_info *)(mode_info->atom_context->bios + data_offset);
+ lvds =
+ kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
+
+ if (!lvds)
+ return NULL;
+
+ lvds->native_mode.clock =
+ le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10;
+ lvds->native_mode.hdisplay =
+ le16_to_cpu(lvds_info->info.sLCDTiming.usHActive);
+ lvds->native_mode.vdisplay =
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVActive);
+ lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time);
+ lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset);
+ lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth);
+ lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
+ lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
+ lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+ lvds->panel_pwr_delay =
+ le16_to_cpu(lvds_info->info.usOffDelayInMs);
+ lvds->lcd_misc = lvds_info->info.ucLVDS_Misc;
+
+ misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
+ if (misc & ATOM_VSYNC_POLARITY)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+ if (misc & ATOM_HSYNC_POLARITY)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+ if (misc & ATOM_COMPOSITESYNC)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC;
+ if (misc & ATOM_INTERLACE)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+ if (misc & ATOM_DOUBLE_CLOCK_MODE)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
+
+ lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
+ lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
+
+ /* set crtc values */
+ drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
+
+ lvds->lcd_ss_id = lvds_info->info.ucSS_Id;
+
+ encoder->native_mode = lvds->native_mode;
+
+ if (encoder_enum == 2)
+ lvds->linkb = true;
+ else
+ lvds->linkb = false;
+
+ /* parse the lcd record table */
+ if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
+ ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
+ ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
+ bool bad_record = false;
+ u8 *record;
+
+ if ((frev == 1) && (crev < 2))
+ /* absolute */
+ record = (u8 *)(mode_info->atom_context->bios +
+ le16_to_cpu(lvds_info->info.usModePatchTableOffset));
+ else
+ /* relative */
+ record = (u8 *)(mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(lvds_info->info.usModePatchTableOffset));
+ while (*record != ATOM_RECORD_END_TYPE) {
+ switch (*record) {
+ case LCD_MODE_PATCH_RECORD_MODE_TYPE:
+ record += sizeof(ATOM_PATCH_RECORD_MODE);
+ break;
+ case LCD_RTS_RECORD_TYPE:
+ record += sizeof(ATOM_LCD_RTS_RECORD);
+ break;
+ case LCD_CAP_RECORD_TYPE:
+ record += sizeof(ATOM_LCD_MODE_CONTROL_CAP);
+ break;
+ case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
+ fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
+ if (fake_edid_record->ucFakeEDIDLength) {
+ struct edid *edid;
+ int edid_size =
+ max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
+ edid = kmalloc(edid_size, GFP_KERNEL);
+ if (edid) {
+ memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
+ fake_edid_record->ucFakeEDIDLength);
+
+ if (drm_edid_is_valid(edid)) {
+ rdev->mode_info.bios_hardcoded_edid = edid;
+ rdev->mode_info.bios_hardcoded_edid_size = edid_size;
+ } else
+ kfree(edid);
+ }
+ }
+ record += fake_edid_record->ucFakeEDIDLength ?
+ fake_edid_record->ucFakeEDIDLength + 2 :
+ sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
+ break;
+ case LCD_PANEL_RESOLUTION_RECORD_TYPE:
+ panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
+ lvds->native_mode.width_mm = panel_res_record->usHSize;
+ lvds->native_mode.height_mm = panel_res_record->usVSize;
+ record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD);
+ break;
+ default:
+ DRM_ERROR("Bad LCD record %d\n", *record);
+ bad_record = true;
+ break;
+ }
+ if (bad_record)
+ break;
+ }
+ }
+ }
+ return lvds;
+}
+
+struct radeon_encoder_primary_dac *
+radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, CompassionateData);
+ uint16_t data_offset;
+ struct _COMPASSIONATE_DATA *dac_info;
+ uint8_t frev, crev;
+ uint8_t bg, dac;
+ struct radeon_encoder_primary_dac *p_dac = NULL;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ dac_info = (struct _COMPASSIONATE_DATA *)
+ (mode_info->atom_context->bios + data_offset);
+
+ p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL);
+
+ if (!p_dac)
+ return NULL;
+
+ bg = dac_info->ucDAC1_BG_Adjustment;
+ dac = dac_info->ucDAC1_DAC_Adjustment;
+ p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+
+ }
+ return p_dac;
+}
+
+bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
+ struct drm_display_mode *mode)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ ATOM_ANALOG_TV_INFO *tv_info;
+ ATOM_ANALOG_TV_INFO_V1_2 *tv_info_v1_2;
+ ATOM_DTD_FORMAT *dtd_timings;
+ int data_index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
+ u8 frev, crev;
+ u16 data_offset, misc;
+
+ if (!atom_parse_data_header(mode_info->atom_context, data_index, NULL,
+ &frev, &crev, &data_offset))
+ return false;
+
+ switch (crev) {
+ case 1:
+ tv_info = (ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
+ if (index >= MAX_SUPPORTED_TV_TIMING)
+ return false;
+
+ mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total);
+ mode->crtc_hdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Disp);
+ mode->crtc_hsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart);
+ mode->crtc_hsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart) +
+ le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncWidth);
+
+ mode->crtc_vtotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Total);
+ mode->crtc_vdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Disp);
+ mode->crtc_vsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart);
+ mode->crtc_vsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart) +
+ le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncWidth);
+
+ mode->flags = 0;
+ misc = le16_to_cpu(tv_info->aModeTimings[index].susModeMiscInfo.usAccess);
+ if (misc & ATOM_VSYNC_POLARITY)
+ mode->flags |= DRM_MODE_FLAG_NVSYNC;
+ if (misc & ATOM_HSYNC_POLARITY)
+ mode->flags |= DRM_MODE_FLAG_NHSYNC;
+ if (misc & ATOM_COMPOSITESYNC)
+ mode->flags |= DRM_MODE_FLAG_CSYNC;
+ if (misc & ATOM_INTERLACE)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (misc & ATOM_DOUBLE_CLOCK_MODE)
+ mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+ mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
+
+ if (index == 1) {
+ /* PAL timings appear to have wrong values for totals */
+ mode->crtc_htotal -= 1;
+ mode->crtc_vtotal -= 1;
+ }
+ break;
+ case 2:
+ tv_info_v1_2 = (ATOM_ANALOG_TV_INFO_V1_2 *)(mode_info->atom_context->bios + data_offset);
+ if (index >= MAX_SUPPORTED_TV_TIMING_V1_2)
+ return false;
+
+ dtd_timings = &tv_info_v1_2->aModeTimings[index];
+ mode->crtc_htotal = le16_to_cpu(dtd_timings->usHActive) +
+ le16_to_cpu(dtd_timings->usHBlanking_Time);
+ mode->crtc_hdisplay = le16_to_cpu(dtd_timings->usHActive);
+ mode->crtc_hsync_start = le16_to_cpu(dtd_timings->usHActive) +
+ le16_to_cpu(dtd_timings->usHSyncOffset);
+ mode->crtc_hsync_end = mode->crtc_hsync_start +
+ le16_to_cpu(dtd_timings->usHSyncWidth);
+
+ mode->crtc_vtotal = le16_to_cpu(dtd_timings->usVActive) +
+ le16_to_cpu(dtd_timings->usVBlanking_Time);
+ mode->crtc_vdisplay = le16_to_cpu(dtd_timings->usVActive);
+ mode->crtc_vsync_start = le16_to_cpu(dtd_timings->usVActive) +
+ le16_to_cpu(dtd_timings->usVSyncOffset);
+ mode->crtc_vsync_end = mode->crtc_vsync_start +
+ le16_to_cpu(dtd_timings->usVSyncWidth);
+
+ mode->flags = 0;
+ misc = le16_to_cpu(dtd_timings->susModeMiscInfo.usAccess);
+ if (misc & ATOM_VSYNC_POLARITY)
+ mode->flags |= DRM_MODE_FLAG_NVSYNC;
+ if (misc & ATOM_HSYNC_POLARITY)
+ mode->flags |= DRM_MODE_FLAG_NHSYNC;
+ if (misc & ATOM_COMPOSITESYNC)
+ mode->flags |= DRM_MODE_FLAG_CSYNC;
+ if (misc & ATOM_INTERLACE)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (misc & ATOM_DOUBLE_CLOCK_MODE)
+ mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+ mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10;
+ break;
+ }
+ return true;
+}
+
+enum radeon_tv_std
+radeon_atombios_get_tv_info(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
+ uint16_t data_offset;
+ uint8_t frev, crev;
+ struct _ATOM_ANALOG_TV_INFO *tv_info;
+ enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+
+ tv_info = (struct _ATOM_ANALOG_TV_INFO *)
+ (mode_info->atom_context->bios + data_offset);
+
+ switch (tv_info->ucTV_BootUpDefaultStandard) {
+ case ATOM_TV_NTSC:
+ tv_std = TV_STD_NTSC;
+ DRM_DEBUG_KMS("Default TV standard: NTSC\n");
+ break;
+ case ATOM_TV_NTSCJ:
+ tv_std = TV_STD_NTSC_J;
+ DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
+ break;
+ case ATOM_TV_PAL:
+ tv_std = TV_STD_PAL;
+ DRM_DEBUG_KMS("Default TV standard: PAL\n");
+ break;
+ case ATOM_TV_PALM:
+ tv_std = TV_STD_PAL_M;
+ DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
+ break;
+ case ATOM_TV_PALN:
+ tv_std = TV_STD_PAL_N;
+ DRM_DEBUG_KMS("Default TV standard: PAL-N\n");
+ break;
+ case ATOM_TV_PALCN:
+ tv_std = TV_STD_PAL_CN;
+ DRM_DEBUG_KMS("Default TV standard: PAL-CN\n");
+ break;
+ case ATOM_TV_PAL60:
+ tv_std = TV_STD_PAL_60;
+ DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
+ break;
+ case ATOM_TV_SECAM:
+ tv_std = TV_STD_SECAM;
+ DRM_DEBUG_KMS("Default TV standard: SECAM\n");
+ break;
+ default:
+ tv_std = TV_STD_NTSC;
+ DRM_DEBUG_KMS("Unknown TV standard; defaulting to NTSC\n");
+ break;
+ }
+ }
+ return tv_std;
+}
+
+struct radeon_encoder_tv_dac *
+radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, CompassionateData);
+ uint16_t data_offset;
+ struct _COMPASSIONATE_DATA *dac_info;
+ uint8_t frev, crev;
+ uint8_t bg, dac;
+ struct radeon_encoder_tv_dac *tv_dac = NULL;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+
+ dac_info = (struct _COMPASSIONATE_DATA *)
+ (mode_info->atom_context->bios + data_offset);
+
+ tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
+
+ if (!tv_dac)
+ return NULL;
+
+ bg = dac_info->ucDAC2_CRT2_BG_Adjustment;
+ dac = dac_info->ucDAC2_CRT2_DAC_Adjustment;
+ tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+ bg = dac_info->ucDAC2_PAL_BG_Adjustment;
+ dac = dac_info->ucDAC2_PAL_DAC_Adjustment;
+ tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+ bg = dac_info->ucDAC2_NTSC_BG_Adjustment;
+ dac = dac_info->ucDAC2_NTSC_DAC_Adjustment;
+ tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+
+ tv_dac->tv_std = radeon_atombios_get_tv_info(rdev);
+ }
+ return tv_dac;
+}
+
+static const char *thermal_controller_names[] = {
+ "NONE",
+ "lm63",
+ "adm1032",
+ "adm1030",
+ "max6649",
+ "lm64",
+ "f75375",
+ "asc7xxx",
+};
+
+static const char *pp_lib_thermal_controller_names[] = {
+ "NONE",
+ "lm63",
+ "adm1032",
+ "adm1030",
+ "max6649",
+ "lm64",
+ "f75375",
+ "RV6xx",
+ "RV770",
+ "adt7473",
+ "NONE",
+ "External GPIO",
+ "Evergreen",
+ "emc2103",
+ "Sumo",
+ "Northern Islands",
+ "Southern Islands",
+ "lm96163",
+};
+
+union power_info {
+ struct _ATOM_POWERPLAY_INFO info;
+ struct _ATOM_POWERPLAY_INFO_V2 info_2;
+ struct _ATOM_POWERPLAY_INFO_V3 info_3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+ struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+ struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+ struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+ struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+ struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+};
+
+union pplib_power_state {
+ struct _ATOM_PPLIB_STATE v1;
+ struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev,
+ int state_index,
+ u32 misc, u32 misc2)
+{
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
+ /* order matters! */
+ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_POWERSAVE;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ }
+ if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ } else if (state_index == 0) {
+ rdev->pm.power_state[state_index].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+}
+
+static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ u32 misc, misc2 = 0;
+ int num_modes = 0, i;
+ int state_index = 0;
+ struct radeon_i2c_bus_rec i2c_bus;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return state_index;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ /* add the i2c bus for thermal/fan chip */
+ if ((power_info->info.ucOverdriveThermalController > 0) &&
+ (power_info->info.ucOverdriveThermalController < ARRAY_SIZE(thermal_controller_names))) {
+ DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+ thermal_controller_names[power_info->info.ucOverdriveThermalController],
+ power_info->info.ucOverdriveControllerAddress >> 1);
+ i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
+ rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = thermal_controller_names[power_info->info.
+ ucOverdriveThermalController];
+ info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ }
+ }
+ num_modes = power_info->info.ucNumOfPowerModeEntries;
+ if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
+ num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+ if (num_modes == 0)
+ return state_index;
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
+ if (!rdev->pm.power_state)
+ return state_index;
+ /* last mode is usually default, array is low to high */
+ for (i = 0; i < num_modes; i++) {
+ rdev->pm.power_state[state_index].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ if (!rdev->pm.power_state[state_index].clock_info)
+ return state_index;
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ switch (frev) {
+ case 1:
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
+ }
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0);
+ state_index++;
+ break;
+ case 2:
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
+ misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
+ }
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+ state_index++;
+ break;
+ case 3:
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
+ misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
+ if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
+ true;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
+ power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
+ }
+ }
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+ state_index++;
+ break;
+ }
+ }
+ /* last mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[state_index - 1].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index - 1;
+ rdev->pm.power_state[state_index - 1].default_clock_mode =
+ &rdev->pm.power_state[state_index - 1].clock_info[0];
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = 0;
+ rdev->pm.power_state[state_index].misc2 = 0;
+ }
+ return state_index;
+}
+
+static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev,
+ ATOM_PPLIB_THERMALCONTROLLER *controller)
+{
+ struct radeon_i2c_bus_rec i2c_bus;
+
+ /* add the i2c bus for thermal/fan chip */
+ if (controller->ucType > 0) {
+ if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_NI;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_SI;
+ } else if ((controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+ (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
+ (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
+ DRM_INFO("Special thermal controller config\n");
+ } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
+ DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+ pp_lib_thermal_controller_names[controller->ucType],
+ controller->ucI2cAddress >> 1,
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
+ rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = pp_lib_thermal_controller_names[controller->ucType];
+ info.addr = controller->ucI2cAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ }
+ } else {
+ DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
+ controller->ucType,
+ controller->ucI2cAddress >> 1,
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ }
+ }
+}
+
+static void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
+ u16 *vddc, u16 *vddci)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+ u8 frev, crev;
+ u16 data_offset;
+ union firmware_info *firmware_info;
+
+ *vddc = 0;
+ *vddci = 0;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ firmware_info =
+ (union firmware_info *)(mode_info->atom_context->bios +
+ data_offset);
+ *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+ if ((frev == 2) && (crev >= 2))
+ *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
+ }
+}
+
+static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
+ int state_index, int mode_index,
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info)
+{
+ int j;
+ u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+ u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
+ u16 vddc, vddci;
+
+ radeon_atombios_get_default_voltages(rdev, &vddc, &vddci);
+
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
+ ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+ switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+ case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
+ if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ }
+ rdev->pm.power_state[state_index].flags = 0;
+ if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+ rdev->pm.power_state[state_index].flags |=
+ RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
+ if ((rdev->family >= CHIP_BARTS) && !(rdev->flags & RADEON_IS_IGP)) {
+ /* NI chips post without MC ucode, so default clocks are strobe mode only */
+ rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
+ rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
+ rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
+ rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci;
+ } else {
+ /* patch the table values with the default slck/mclk from firmware info */
+ for (j = 0; j < mode_index; j++) {
+ rdev->pm.power_state[state_index].clock_info[j].mclk =
+ rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[j].sclk =
+ rdev->clock.default_sclk;
+ if (vddc)
+ rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
+ vddc;
+ }
+ }
+ }
+}
+
+static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
+ int state_index, int mode_index,
+ union pplib_clock_info *clock_info)
+{
+ u32 sclk, mclk;
+ u16 vddc;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (rdev->family >= CHIP_PALM) {
+ sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+ sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ } else {
+ sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow);
+ sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ }
+ } else if (rdev->family >= CHIP_TAHITI) {
+ sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+ sclk |= clock_info->si.ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+ mclk |= clock_info->si.ucMemoryClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ le16_to_cpu(clock_info->si.usVDDC);
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
+ le16_to_cpu(clock_info->si.usVDDCI);
+ } else if (rdev->family >= CHIP_CEDAR) {
+ sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
+ sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
+ mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ le16_to_cpu(clock_info->evergreen.usVDDC);
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
+ le16_to_cpu(clock_info->evergreen.usVDDCI);
+ } else {
+ sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
+ sclk |= clock_info->r600.ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
+ mclk |= clock_info->r600.ucMemoryClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ le16_to_cpu(clock_info->r600.usVDDC);
+ }
+
+ /* patch up vddc if necessary */
+ switch (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage) {
+ case ATOM_VIRTUAL_VOLTAGE_ID0:
+ case ATOM_VIRTUAL_VOLTAGE_ID1:
+ case ATOM_VIRTUAL_VOLTAGE_ID2:
+ case ATOM_VIRTUAL_VOLTAGE_ID3:
+ if (radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC,
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage,
+ &vddc) == 0)
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc;
+ break;
+ default:
+ break;
+ }
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ /* skip invalid modes */
+ if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
+ return false;
+ } else {
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
+ return false;
+ }
+ return true;
+}
+
+static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j;
+ int state_index = 0, mode_index = 0;
+ union pplib_clock_info *clock_info;
+ bool valid;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return state_index;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+ if (power_info->pplib.ucNumStates == 0)
+ return state_index;
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+ power_info->pplib.ucNumStates, GFP_KERNEL);
+ if (!rdev->pm.power_state)
+ return state_index;
+ /* first mode is usually default, followed by low to high */
+ for (i = 0; i < power_info->pplib.ucNumStates; i++) {
+ mode_index = 0;
+ power_state = (union pplib_power_state *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usStateArrayOffset) +
+ i * power_info->pplib.ucStateEntrySize);
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
+ (power_state->v1.ucNonClockStateIndex *
+ power_info->pplib.ucNonClockSize));
+ rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
+ ((power_info->pplib.ucStateEntrySize - 1) ?
+ (power_info->pplib.ucStateEntrySize - 1) : 1),
+ GFP_KERNEL);
+ if (!rdev->pm.power_state[i].clock_info)
+ return state_index;
+ if (power_info->pplib.ucStateEntrySize - 1) {
+ for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
+ clock_info = (union pplib_clock_info *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
+ (power_state->v1.ucClockStateIndices[j] *
+ power_info->pplib.ucClockInfoSize));
+ valid = radeon_atombios_parse_pplib_clock_info(rdev,
+ state_index, mode_index,
+ clock_info);
+ if (valid)
+ mode_index++;
+ }
+ } else {
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ rdev->clock.default_sclk;
+ mode_index++;
+ }
+ rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+ if (mode_index) {
+ radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+ non_clock_info);
+ state_index++;
+ }
+ }
+ /* if multiple clock modes, mark the lowest as no display */
+ for (i = 0; i < state_index; i++) {
+ if (rdev->pm.power_state[i].num_clock_modes > 1)
+ rdev->pm.power_state[i].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+ /* first mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[0].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = 0;
+ rdev->pm.power_state[0].default_clock_mode =
+ &rdev->pm.power_state[0].clock_info[0];
+ }
+ return state_index;
+}
+
+static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j, non_clock_array_index, clock_array_index;
+ int state_index = 0, mode_index = 0;
+ union pplib_clock_info *clock_info;
+ struct _StateArray *state_array;
+ struct _ClockInfoArray *clock_info_array;
+ struct _NonClockInfoArray *non_clock_info_array;
+ bool valid;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ u8 *power_state_offset;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return state_index;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+ state_array = (struct _StateArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usStateArrayOffset));
+ clock_info_array = (struct _ClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+ non_clock_info_array = (struct _NonClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+ if (state_array->ucNumEntries == 0)
+ return state_index;
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+ state_array->ucNumEntries, GFP_KERNEL);
+ if (!rdev->pm.power_state)
+ return state_index;
+ power_state_offset = (u8 *)state_array->states;
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ mode_index = 0;
+ power_state = (union pplib_power_state *)power_state_offset;
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
+ (power_state->v2.ucNumDPMLevels ?
+ power_state->v2.ucNumDPMLevels : 1),
+ GFP_KERNEL);
+ if (!rdev->pm.power_state[i].clock_info)
+ return state_index;
+ if (power_state->v2.ucNumDPMLevels) {
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = power_state->v2.clockInfoIndex[j];
+ clock_info = (union pplib_clock_info *)
+ &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+ valid = radeon_atombios_parse_pplib_clock_info(rdev,
+ state_index, mode_index,
+ clock_info);
+ if (valid)
+ mode_index++;
+ }
+ } else {
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ rdev->clock.default_sclk;
+ mode_index++;
+ }
+ rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+ if (mode_index) {
+ radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+ non_clock_info);
+ state_index++;
+ }
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ }
+ /* if multiple clock modes, mark the lowest as no display */
+ for (i = 0; i < state_index; i++) {
+ if (rdev->pm.power_state[i].num_clock_modes > 1)
+ rdev->pm.power_state[i].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+ /* first mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[0].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = 0;
+ rdev->pm.power_state[0].default_clock_mode =
+ &rdev->pm.power_state[0].clock_info[0];
+ }
+ return state_index;
+}
+
+void radeon_atombios_get_power_modes(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ int state_index = 0;
+
+ rdev->pm.default_power_state_index = -1;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ switch (frev) {
+ case 1:
+ case 2:
+ case 3:
+ state_index = radeon_atombios_parse_power_table_1_3(rdev);
+ break;
+ case 4:
+ case 5:
+ state_index = radeon_atombios_parse_power_table_4_5(rdev);
+ break;
+ case 6:
+ state_index = radeon_atombios_parse_power_table_6(rdev);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (state_index == 0) {
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
+ if (rdev->pm.power_state) {
+ rdev->pm.power_state[0].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ if (rdev->pm.power_state[0].clock_info) {
+ /* add the default mode */
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ rdev->pm.power_state[state_index].pcie_lanes = 16;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].flags = 0;
+ state_index++;
+ }
+ }
+ }
+
+ rdev->pm.num_power_states = state_index;
+
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
+ if (rdev->pm.default_power_state_index >= 0)
+ rdev->pm.current_vddc =
+ rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ else
+ rdev->pm.current_vddc = 0;
+}
+
+union get_clock_dividers {
+ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1;
+ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2;
+ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3;
+ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4;
+ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5;
+};
+
+int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
+ u8 clock_type,
+ u32 clock,
+ bool strobe_mode,
+ struct atom_clock_dividers *dividers)
+{
+ union get_clock_dividers args;
+ int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL);
+ u8 frev, crev;
+
+ memset(&args, 0, sizeof(args));
+ memset(dividers, 0, sizeof(struct atom_clock_dividers));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return -EINVAL;
+
+ switch (crev) {
+ case 1:
+ /* r4xx, r5xx */
+ args.v1.ucAction = clock_type;
+ args.v1.ulClock = cpu_to_le32(clock); /* 10 khz */
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v1.ucPostDiv;
+ dividers->fb_div = args.v1.ucFbDiv;
+ dividers->enable_post_div = true;
+ break;
+ case 2:
+ case 3:
+ /* r6xx, r7xx, evergreen, ni */
+ if (rdev->family <= CHIP_RV770) {
+ args.v2.ucAction = clock_type;
+ args.v2.ulClock = cpu_to_le32(clock); /* 10 khz */
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v2.ucPostDiv;
+ dividers->fb_div = le16_to_cpu(args.v2.usFbDiv);
+ dividers->ref_div = args.v2.ucAction;
+ if (rdev->family == CHIP_RV770) {
+ dividers->enable_post_div = (le32_to_cpu(args.v2.ulClock) & (1 << 24)) ?
+ true : false;
+ dividers->vco_mode = (le32_to_cpu(args.v2.ulClock) & (1 << 25)) ? 1 : 0;
+ } else
+ dividers->enable_post_div = (dividers->fb_div & 1) ? true : false;
+ } else {
+ if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
+ args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v3.ucPostDiv;
+ dividers->enable_post_div = (args.v3.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+ dividers->enable_dithen = (args.v3.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+ dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
+ dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
+ dividers->ref_div = args.v3.ucRefDiv;
+ dividers->vco_mode = (args.v3.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+ } else {
+ args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+ if (strobe_mode)
+ args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v5.ucPostDiv;
+ dividers->enable_post_div = (args.v5.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+ dividers->enable_dithen = (args.v5.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+ dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
+ dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
+ dividers->ref_div = args.v5.ucRefDiv;
+ dividers->vco_mode = (args.v5.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+ }
+ }
+ break;
+ case 4:
+ /* fusion */
+ args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v4.ucPostDiv;
+ dividers->real_clock = le32_to_cpu(args.v4.ulClock);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
+{
+ DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
+
+ args.ucEnable = enable;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
+{
+ GET_ENGINE_CLOCK_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ return le32_to_cpu(args.ulReturnEngineClock);
+}
+
+uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
+{
+ GET_MEMORY_CLOCK_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ return le32_to_cpu(args.ulReturnMemoryClock);
+}
+
+void radeon_atom_set_engine_clock(struct radeon_device *rdev,
+ uint32_t eng_clock)
+{
+ SET_ENGINE_CLOCK_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
+
+ args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void radeon_atom_set_memory_clock(struct radeon_device *rdev,
+ uint32_t mem_clock)
+{
+ SET_MEMORY_CLOCK_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union set_voltage {
+ struct _SET_VOLTAGE_PS_ALLOCATION alloc;
+ struct _SET_VOLTAGE_PARAMETERS v1;
+ struct _SET_VOLTAGE_PARAMETERS_V2 v2;
+ struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
+};
+
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type)
+{
+ union set_voltage args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+ u8 frev, crev, volt_index = voltage_level;
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ /* 0xff01 is a flag rather then an actual voltage */
+ if (voltage_level == 0xff01)
+ return;
+
+ switch (crev) {
+ case 1:
+ args.v1.ucVoltageType = voltage_type;
+ args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
+ args.v1.ucVoltageIndex = volt_index;
+ break;
+ case 2:
+ args.v2.ucVoltageType = voltage_type;
+ args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
+ args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
+ break;
+ case 3:
+ args.v3.ucVoltageType = voltage_type;
+ args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
+ args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return;
+ }
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
+ u16 voltage_id, u16 *voltage)
+{
+ union set_voltage args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+ u8 frev, crev;
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return -EINVAL;
+
+ switch (crev) {
+ case 1:
+ return -EINVAL;
+ case 2:
+ args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
+ args.v2.ucVoltageMode = 0;
+ args.v2.usVoltageLevel = 0;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ *voltage = le16_to_cpu(args.v2.usVoltageLevel);
+ break;
+ case 3:
+ args.v3.ucVoltageType = voltage_type;
+ args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
+ args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ *voltage = le16_to_cpu(args.v3.usVoltageLevel);
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t bios_2_scratch, bios_6_scratch;
+
+ if (rdev->family >= CHIP_R600) {
+ bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+ bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+ } else {
+ bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+ bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+ }
+
+ /* let the bios control the backlight */
+ bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
+
+ /* tell the bios not to handle mode switching */
+ bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
+
+ if (rdev->family >= CHIP_R600) {
+ WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+ WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+ } else {
+ WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+ WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+ }
+
+}
+
+void radeon_save_bios_scratch_regs(struct radeon_device *rdev)
+{
+ uint32_t scratch_reg;
+ int i;
+
+ if (rdev->family >= CHIP_R600)
+ scratch_reg = R600_BIOS_0_SCRATCH;
+ else
+ scratch_reg = RADEON_BIOS_0_SCRATCH;
+
+ for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++)
+ rdev->bios_scratch[i] = RREG32(scratch_reg + (i * 4));
+}
+
+void radeon_restore_bios_scratch_regs(struct radeon_device *rdev)
+{
+ uint32_t scratch_reg;
+ int i;
+
+ if (rdev->family >= CHIP_R600)
+ scratch_reg = R600_BIOS_0_SCRATCH;
+ else
+ scratch_reg = RADEON_BIOS_0_SCRATCH;
+
+ for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++)
+ WREG32(scratch_reg + (i * 4), rdev->bios_scratch[i]);
+}
+
+void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t bios_6_scratch;
+
+ if (rdev->family >= CHIP_R600)
+ bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+ else
+ bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+ if (lock) {
+ bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
+ bios_6_scratch &= ~ATOM_S6_ACC_MODE;
+ } else {
+ bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
+ bios_6_scratch |= ATOM_S6_ACC_MODE;
+ }
+
+ if (rdev->family >= CHIP_R600)
+ WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+ else
+ WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}
+
+/* at some point we may want to break this out into individual functions */
+void
+radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+ struct drm_encoder *encoder,
+ bool connected)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector =
+ to_radeon_connector(connector);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t bios_0_scratch, bios_3_scratch, bios_6_scratch;
+
+ if (rdev->family >= CHIP_R600) {
+ bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+ bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
+ bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+ } else {
+ bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+ bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH);
+ bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+ }
+
+ if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("TV1 connected\n");
+ bios_3_scratch |= ATOM_S3_TV1_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_TV1;
+ } else {
+ DRM_DEBUG_KMS("TV1 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_TV1_MASK;
+ bios_3_scratch &= ~ATOM_S3_TV1_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_TV1;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("CV connected\n");
+ bios_3_scratch |= ATOM_S3_CV_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_CV;
+ } else {
+ DRM_DEBUG_KMS("CV disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_CV_MASK;
+ bios_3_scratch &= ~ATOM_S3_CV_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_CV;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("LCD1 connected\n");
+ bios_0_scratch |= ATOM_S0_LCD1;
+ bios_3_scratch |= ATOM_S3_LCD1_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1;
+ } else {
+ DRM_DEBUG_KMS("LCD1 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_LCD1;
+ bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("CRT1 connected\n");
+ bios_0_scratch |= ATOM_S0_CRT1_COLOR;
+ bios_3_scratch |= ATOM_S3_CRT1_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1;
+ } else {
+ DRM_DEBUG_KMS("CRT1 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_CRT1_MASK;
+ bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("CRT2 connected\n");
+ bios_0_scratch |= ATOM_S0_CRT2_COLOR;
+ bios_3_scratch |= ATOM_S3_CRT2_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2;
+ } else {
+ DRM_DEBUG_KMS("CRT2 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_CRT2_MASK;
+ bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("DFP1 connected\n");
+ bios_0_scratch |= ATOM_S0_DFP1;
+ bios_3_scratch |= ATOM_S3_DFP1_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1;
+ } else {
+ DRM_DEBUG_KMS("DFP1 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_DFP1;
+ bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("DFP2 connected\n");
+ bios_0_scratch |= ATOM_S0_DFP2;
+ bios_3_scratch |= ATOM_S3_DFP2_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2;
+ } else {
+ DRM_DEBUG_KMS("DFP2 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_DFP2;
+ bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("DFP3 connected\n");
+ bios_0_scratch |= ATOM_S0_DFP3;
+ bios_3_scratch |= ATOM_S3_DFP3_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3;
+ } else {
+ DRM_DEBUG_KMS("DFP3 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_DFP3;
+ bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("DFP4 connected\n");
+ bios_0_scratch |= ATOM_S0_DFP4;
+ bios_3_scratch |= ATOM_S3_DFP4_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4;
+ } else {
+ DRM_DEBUG_KMS("DFP4 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_DFP4;
+ bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("DFP5 connected\n");
+ bios_0_scratch |= ATOM_S0_DFP5;
+ bios_3_scratch |= ATOM_S3_DFP5_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5;
+ } else {
+ DRM_DEBUG_KMS("DFP5 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_DFP5;
+ bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_DFP6_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_DFP6_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("DFP6 connected\n");
+ bios_0_scratch |= ATOM_S0_DFP6;
+ bios_3_scratch |= ATOM_S3_DFP6_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP6;
+ } else {
+ DRM_DEBUG_KMS("DFP6 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_DFP6;
+ bios_3_scratch &= ~ATOM_S3_DFP6_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP6;
+ }
+ }
+
+ if (rdev->family >= CHIP_R600) {
+ WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch);
+ WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch);
+ WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+ } else {
+ WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch);
+ WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch);
+ WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+ }
+}
+
+void
+radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t bios_3_scratch;
+
+ if (ASIC_IS_DCE4(rdev))
+ return;
+
+ if (rdev->family >= CHIP_R600)
+ bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
+ else
+ bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH);
+
+ if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ bios_3_scratch &= ~ATOM_S3_TV1_CRTC_ACTIVE;
+ bios_3_scratch |= (crtc << 18);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
+ bios_3_scratch &= ~ATOM_S3_CV_CRTC_ACTIVE;
+ bios_3_scratch |= (crtc << 24);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+ bios_3_scratch &= ~ATOM_S3_CRT1_CRTC_ACTIVE;
+ bios_3_scratch |= (crtc << 16);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+ bios_3_scratch &= ~ATOM_S3_CRT2_CRTC_ACTIVE;
+ bios_3_scratch |= (crtc << 20);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+ bios_3_scratch &= ~ATOM_S3_LCD1_CRTC_ACTIVE;
+ bios_3_scratch |= (crtc << 17);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+ bios_3_scratch &= ~ATOM_S3_DFP1_CRTC_ACTIVE;
+ bios_3_scratch |= (crtc << 19);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+ bios_3_scratch &= ~ATOM_S3_DFP2_CRTC_ACTIVE;
+ bios_3_scratch |= (crtc << 23);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) {
+ bios_3_scratch &= ~ATOM_S3_DFP3_CRTC_ACTIVE;
+ bios_3_scratch |= (crtc << 25);
+ }
+
+ if (rdev->family >= CHIP_R600)
+ WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch);
+ else
+ WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch);
+}
+
+void
+radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t bios_2_scratch;
+
+ if (ASIC_IS_DCE4(rdev))
+ return;
+
+ if (rdev->family >= CHIP_R600)
+ bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+ else
+ bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+ if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_TV1_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_TV1_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_CV_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_CV_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_CRT1_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_CRT1_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_CRT2_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_CRT2_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_LCD1_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_LCD1_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_DFP1_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_DFP1_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_DFP2_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_DFP2_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_DFP3_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_DFP3_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_DFP4_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_DFP4_DPMS_STATE;
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) {
+ if (on)
+ bios_2_scratch &= ~ATOM_S2_DFP5_DPMS_STATE;
+ else
+ bios_2_scratch |= ATOM_S2_DFP5_DPMS_STATE;
+ }
+
+ if (rdev->family >= CHIP_R600)
+ WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+ else
+ WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
new file mode 100644
index 0000000..d96070b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -0,0 +1,560 @@
+/*
+ * Copyright (c) 2010 Red Hat Inc.
+ * Author : Dave Airlie <airlied@redhat.com>
+ *
+ * Licensed under GPLv2
+ *
+ * ATPX support for both Intel/ATI
+ */
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
+#include <acpi/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <linux/pci.h>
+
+#include "radeon_acpi.h"
+
+struct radeon_atpx_functions {
+ bool px_params;
+ bool power_cntl;
+ bool disp_mux_cntl;
+ bool i2c_mux_cntl;
+ bool switch_start;
+ bool switch_end;
+ bool disp_connectors_mapping;
+ bool disp_detetion_ports;
+};
+
+struct radeon_atpx {
+ acpi_handle handle;
+ struct radeon_atpx_functions functions;
+};
+
+static struct radeon_atpx_priv {
+ bool atpx_detected;
+ /* handle for device - and atpx */
+ acpi_handle dhandle;
+ struct radeon_atpx atpx;
+} radeon_atpx_priv;
+
+struct atpx_verify_interface {
+ u16 size; /* structure size in bytes (includes size field) */
+ u16 version; /* version */
+ u32 function_bits; /* supported functions bit vector */
+} __packed;
+
+struct atpx_px_params {
+ u16 size; /* structure size in bytes (includes size field) */
+ u32 valid_flags; /* which flags are valid */
+ u32 flags; /* flags */
+} __packed;
+
+struct atpx_power_control {
+ u16 size;
+ u8 dgpu_state;
+} __packed;
+
+struct atpx_mux {
+ u16 size;
+ u16 mux;
+} __packed;
+
+/**
+ * radeon_atpx_call - call an ATPX method
+ *
+ * @handle: acpi handle
+ * @function: the ATPX function to execute
+ * @params: ATPX function params
+ *
+ * Executes the requested ATPX function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
+ struct acpi_buffer *params)
+{
+ acpi_status status;
+ union acpi_object atpx_arg_elements[2];
+ struct acpi_object_list atpx_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ atpx_arg.count = 2;
+ atpx_arg.pointer = &atpx_arg_elements[0];
+
+ atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ atpx_arg_elements[0].integer.value = function;
+
+ if (params) {
+ atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
+ atpx_arg_elements[1].buffer.length = params->length;
+ atpx_arg_elements[1].buffer.pointer = params->pointer;
+ } else {
+ /* We need a second fake parameter */
+ atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ atpx_arg_elements[1].integer.value = 0;
+ }
+
+ status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
+
+ /* Fail only if calling the method fails and ATPX is supported */
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ printk("failed to evaluate ATPX got %s\n",
+ acpi_format_exception(status));
+ kfree(buffer.pointer);
+ return NULL;
+ }
+
+ return buffer.pointer;
+}
+
+/**
+ * radeon_atpx_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATPX
+ *
+ * Use the supported functions mask from ATPX function
+ * ATPX_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mask)
+{
+ f->px_params = mask & ATPX_GET_PX_PARAMETERS_SUPPORTED;
+ f->power_cntl = mask & ATPX_POWER_CONTROL_SUPPORTED;
+ f->disp_mux_cntl = mask & ATPX_DISPLAY_MUX_CONTROL_SUPPORTED;
+ f->i2c_mux_cntl = mask & ATPX_I2C_MUX_CONTROL_SUPPORTED;
+ f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
+ f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
+ f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
+ f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
+}
+
+/**
+ * radeon_atpx_validate_functions - validate ATPX functions
+ *
+ * @atpx: radeon atpx struct
+ *
+ * Validate that required functions are enabled (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atpx_validate(struct radeon_atpx *atpx)
+{
+ /* make sure required functions are enabled */
+ /* dGPU power control is required */
+ atpx->functions.power_cntl = true;
+
+ if (atpx->functions.px_params) {
+ union acpi_object *info;
+ struct atpx_px_params output;
+ size_t size;
+ u32 valid_bits;
+
+ info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
+ if (!info)
+ return -EIO;
+
+ memset(&output, 0, sizeof(output));
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 10) {
+ printk("ATPX buffer is too small: %zu\n", size);
+ kfree(info);
+ return -EINVAL;
+ }
+ size = min(sizeof(output), size);
+
+ memcpy(&output, info->buffer.pointer, size);
+
+ valid_bits = output.flags & output.valid_flags;
+ /* if separate mux flag is set, mux controls are required */
+ if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+ atpx->functions.i2c_mux_cntl = true;
+ atpx->functions.disp_mux_cntl = true;
+ }
+ /* if any outputs are muxed, mux controls are required */
+ if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+ ATPX_TV_SIGNAL_MUXED |
+ ATPX_DFP_SIGNAL_MUXED))
+ atpx->functions.disp_mux_cntl = true;
+
+ kfree(info);
+ }
+ return 0;
+}
+
+/**
+ * radeon_atpx_verify_interface - verify ATPX
+ *
+ * @atpx: radeon atpx struct
+ *
+ * Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function
+ * to initialize ATPX and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
+{
+ union acpi_object *info;
+ struct atpx_verify_interface output;
+ size_t size;
+ int err = 0;
+
+ info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL);
+ if (!info)
+ return -EIO;
+
+ memset(&output, 0, sizeof(output));
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 8) {
+ printk("ATPX buffer is too small: %zu\n", size);
+ err = -EINVAL;
+ goto out;
+ }
+ size = min(sizeof(output), size);
+
+ memcpy(&output, info->buffer.pointer, size);
+
+ /* TODO: check version? */
+ printk("ATPX version %u\n", output.version);
+
+ radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
+
+out:
+ kfree(info);
+ return err;
+}
+
+/**
+ * radeon_atpx_set_discrete_state - power up/down discrete GPU
+ *
+ * @atpx: atpx info struct
+ * @state: discrete GPU state (0 = power down, 1 = power up)
+ *
+ * Execute the ATPX_FUNCTION_POWER_CONTROL ATPX function to
+ * power down/up the discrete GPU (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
+{
+ struct acpi_buffer params;
+ union acpi_object *info;
+ struct atpx_power_control input;
+
+ if (atpx->functions.power_cntl) {
+ input.size = 3;
+ input.dgpu_state = state;
+ params.length = input.size;
+ params.pointer = &input;
+ info = radeon_atpx_call(atpx->handle,
+ ATPX_FUNCTION_POWER_CONTROL,
+ &params);
+ if (!info)
+ return -EIO;
+ kfree(info);
+ }
+ return 0;
+}
+
+/**
+ * radeon_atpx_switch_disp_mux - switch display mux
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_DISPLAY_MUX_CONTROL ATPX function to
+ * switch the display mux between the discrete GPU and integrated GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_disp_mux(struct radeon_atpx *atpx, u16 mux_id)
+{
+ struct acpi_buffer params;
+ union acpi_object *info;
+ struct atpx_mux input;
+
+ if (atpx->functions.disp_mux_cntl) {
+ input.size = 4;
+ input.mux = mux_id;
+ params.length = input.size;
+ params.pointer = &input;
+ info = radeon_atpx_call(atpx->handle,
+ ATPX_FUNCTION_DISPLAY_MUX_CONTROL,
+ &params);
+ if (!info)
+ return -EIO;
+ kfree(info);
+ }
+ return 0;
+}
+
+/**
+ * radeon_atpx_switch_i2c_mux - switch i2c/hpd mux
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_I2C_MUX_CONTROL ATPX function to
+ * switch the i2c/hpd mux between the discrete GPU and integrated GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_i2c_mux(struct radeon_atpx *atpx, u16 mux_id)
+{
+ struct acpi_buffer params;
+ union acpi_object *info;
+ struct atpx_mux input;
+
+ if (atpx->functions.i2c_mux_cntl) {
+ input.size = 4;
+ input.mux = mux_id;
+ params.length = input.size;
+ params.pointer = &input;
+ info = radeon_atpx_call(atpx->handle,
+ ATPX_FUNCTION_I2C_MUX_CONTROL,
+ &params);
+ if (!info)
+ return -EIO;
+ kfree(info);
+ }
+ return 0;
+}
+
+/**
+ * radeon_atpx_switch_start - notify the sbios of a GPU switch
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION ATPX
+ * function to notify the sbios that a switch between the discrete GPU and
+ * integrated GPU has begun (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_start(struct radeon_atpx *atpx, u16 mux_id)
+{
+ struct acpi_buffer params;
+ union acpi_object *info;
+ struct atpx_mux input;
+
+ if (atpx->functions.switch_start) {
+ input.size = 4;
+ input.mux = mux_id;
+ params.length = input.size;
+ params.pointer = &input;
+ info = radeon_atpx_call(atpx->handle,
+ ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION,
+ &params);
+ if (!info)
+ return -EIO;
+ kfree(info);
+ }
+ return 0;
+}
+
+/**
+ * radeon_atpx_switch_end - notify the sbios of a GPU switch
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION ATPX
+ * function to notify the sbios that a switch between the discrete GPU and
+ * integrated GPU has ended (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_end(struct radeon_atpx *atpx, u16 mux_id)
+{
+ struct acpi_buffer params;
+ union acpi_object *info;
+ struct atpx_mux input;
+
+ if (atpx->functions.switch_end) {
+ input.size = 4;
+ input.mux = mux_id;
+ params.length = input.size;
+ params.pointer = &input;
+ info = radeon_atpx_call(atpx->handle,
+ ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION,
+ &params);
+ if (!info)
+ return -EIO;
+ kfree(info);
+ }
+ return 0;
+}
+
+/**
+ * radeon_atpx_switchto - switch to the requested GPU
+ *
+ * @id: GPU to switch to
+ *
+ * Execute the necessary ATPX functions to switch between the discrete GPU and
+ * integrated GPU (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
+{
+ u16 gpu_id;
+
+ if (id == VGA_SWITCHEROO_IGD)
+ gpu_id = ATPX_INTEGRATED_GPU;
+ else
+ gpu_id = ATPX_DISCRETE_GPU;
+
+ radeon_atpx_switch_start(&radeon_atpx_priv.atpx, gpu_id);
+ radeon_atpx_switch_disp_mux(&radeon_atpx_priv.atpx, gpu_id);
+ radeon_atpx_switch_i2c_mux(&radeon_atpx_priv.atpx, gpu_id);
+ radeon_atpx_switch_end(&radeon_atpx_priv.atpx, gpu_id);
+
+ return 0;
+}
+
+/**
+ * radeon_atpx_power_state - power down/up the requested GPU
+ *
+ * @id: GPU to power down/up
+ * @state: requested power state (0 = off, 1 = on)
+ *
+ * Execute the necessary ATPX function to power down/up the discrete GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
+ enum vga_switcheroo_state state)
+{
+ /* on w500 ACPI can't change intel gpu state */
+ if (id == VGA_SWITCHEROO_IGD)
+ return 0;
+
+ radeon_atpx_set_discrete_state(&radeon_atpx_priv.atpx, state);
+ return 0;
+}
+
+/**
+ * radeon_atpx_pci_probe_handle - look up the ATPX handle
+ *
+ * @pdev: pci device
+ *
+ * Look up the ATPX handles (all asics).
+ * Returns true if the handles are found, false if not.
+ */
+static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
+{
+ acpi_handle dhandle, atpx_handle;
+ acpi_status status;
+
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+
+ status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ radeon_atpx_priv.dhandle = dhandle;
+ radeon_atpx_priv.atpx.handle = atpx_handle;
+ return true;
+}
+
+/**
+ * radeon_atpx_init - verify the ATPX interface
+ *
+ * Verify the ATPX interface (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_init(void)
+{
+ int r;
+
+ /* set up the ATPX handle */
+ r = radeon_atpx_verify_interface(&radeon_atpx_priv.atpx);
+ if (r)
+ return r;
+
+ /* validate the atpx setup */
+ r = radeon_atpx_validate(&radeon_atpx_priv.atpx);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+/**
+ * radeon_atpx_get_client_id - get the client id
+ *
+ * @pdev: pci device
+ *
+ * look up whether we are the integrated or discrete GPU (all asics).
+ * Returns the client id.
+ */
+static int radeon_atpx_get_client_id(struct pci_dev *pdev)
+{
+ if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ return VGA_SWITCHEROO_IGD;
+ else
+ return VGA_SWITCHEROO_DIS;
+}
+
+static struct vga_switcheroo_handler radeon_atpx_handler = {
+ .switchto = radeon_atpx_switchto,
+ .power_state = radeon_atpx_power_state,
+ .init = radeon_atpx_init,
+ .get_client_id = radeon_atpx_get_client_id,
+};
+
+/**
+ * radeon_atpx_detect - detect whether we have PX
+ *
+ * Check if we have a PX system (all asics).
+ * Returns true if we have a PX system, false if not.
+ */
+static bool radeon_atpx_detect(void)
+{
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+ struct pci_dev *pdev = NULL;
+ bool has_atpx = false;
+ int vga_count = 0;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ vga_count++;
+
+ has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+ }
+
+ if (has_atpx && vga_count == 2) {
+ acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
+ printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
+ acpi_method_name);
+ radeon_atpx_priv.atpx_detected = true;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * radeon_register_atpx_handler - register with vga_switcheroo
+ *
+ * Register the PX callbacks with vga_switcheroo (all asics).
+ */
+void radeon_register_atpx_handler(void)
+{
+ bool r;
+
+ /* detect if we have any ATPX + 2 VGA in the system */
+ r = radeon_atpx_detect();
+ if (!r)
+ return;
+
+ vga_switcheroo_register_handler(&radeon_atpx_handler);
+}
+
+/**
+ * radeon_unregister_atpx_handler - unregister with vga_switcheroo
+ *
+ * Unregister the PX callbacks with vga_switcheroo (all asics).
+ */
+void radeon_unregister_atpx_handler(void)
+{
+ vga_switcheroo_unregister_handler();
+}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
new file mode 100644
index 0000000..6e05a2e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+#define RADEON_BENCHMARK_COPY_BLIT 1
+#define RADEON_BENCHMARK_COPY_DMA 0
+
+#define RADEON_BENCHMARK_ITERATIONS 1024
+#define RADEON_BENCHMARK_COMMON_MODES_N 17
+
+static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
+ uint64_t saddr, uint64_t daddr,
+ int flag, int n)
+{
+ unsigned long start_jiffies;
+ unsigned long end_jiffies;
+ struct radeon_fence *fence = NULL;
+ int i, r;
+
+ start_jiffies = jiffies;
+ for (i = 0; i < n; i++) {
+ switch (flag) {
+ case RADEON_BENCHMARK_COPY_DMA:
+ r = radeon_copy_dma(rdev, saddr, daddr,
+ size / RADEON_GPU_PAGE_SIZE,
+ &fence);
+ break;
+ case RADEON_BENCHMARK_COPY_BLIT:
+ r = radeon_copy_blit(rdev, saddr, daddr,
+ size / RADEON_GPU_PAGE_SIZE,
+ &fence);
+ break;
+ default:
+ DRM_ERROR("Unknown copy method\n");
+ r = -EINVAL;
+ }
+ if (r)
+ goto exit_do_move;
+ r = radeon_fence_wait(fence, false);
+ if (r)
+ goto exit_do_move;
+ radeon_fence_unref(&fence);
+ }
+ end_jiffies = jiffies;
+ r = jiffies_to_msecs(end_jiffies - start_jiffies);
+
+exit_do_move:
+ if (fence)
+ radeon_fence_unref(&fence);
+ return r;
+}
+
+
+static void radeon_benchmark_log_results(int n, unsigned size,
+ unsigned int time,
+ unsigned sdomain, unsigned ddomain,
+ char *kind)
+{
+ unsigned int throughput = (n * (size >> 10)) / time;
+ DRM_INFO("radeon: %s %u bo moves of %u kB from"
+ " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
+ kind, n, size >> 10, sdomain, ddomain, time,
+ throughput * 8, throughput);
+}
+
+static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
+ unsigned sdomain, unsigned ddomain)
+{
+ struct radeon_bo *dobj = NULL;
+ struct radeon_bo *sobj = NULL;
+ uint64_t saddr, daddr;
+ int r, n;
+ int time;
+
+ n = RADEON_BENCHMARK_ITERATIONS;
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj);
+ if (r) {
+ goto out_cleanup;
+ }
+ r = radeon_bo_reserve(sobj, false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(sobj, sdomain, &saddr);
+ radeon_bo_unreserve(sobj);
+ if (r) {
+ goto out_cleanup;
+ }
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj);
+ if (r) {
+ goto out_cleanup;
+ }
+ r = radeon_bo_reserve(dobj, false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(dobj, ddomain, &daddr);
+ radeon_bo_unreserve(dobj);
+ if (r) {
+ goto out_cleanup;
+ }
+
+ if (rdev->asic->copy.dma) {
+ time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
+ RADEON_BENCHMARK_COPY_DMA, n);
+ if (time < 0)
+ goto out_cleanup;
+ if (time > 0)
+ radeon_benchmark_log_results(n, size, time,
+ sdomain, ddomain, "dma");
+ }
+
+ if (rdev->asic->copy.blit) {
+ time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
+ RADEON_BENCHMARK_COPY_BLIT, n);
+ if (time < 0)
+ goto out_cleanup;
+ if (time > 0)
+ radeon_benchmark_log_results(n, size, time,
+ sdomain, ddomain, "blit");
+ }
+
+out_cleanup:
+ if (sobj) {
+ r = radeon_bo_reserve(sobj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(sobj);
+ radeon_bo_unreserve(sobj);
+ }
+ radeon_bo_unref(&sobj);
+ }
+ if (dobj) {
+ r = radeon_bo_reserve(dobj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(dobj);
+ radeon_bo_unreserve(dobj);
+ }
+ radeon_bo_unref(&dobj);
+ }
+
+ if (r) {
+ DRM_ERROR("Error while benchmarking BO move.\n");
+ }
+}
+
+void radeon_benchmark(struct radeon_device *rdev, int test_number)
+{
+ int i;
+ int common_modes[RADEON_BENCHMARK_COMMON_MODES_N] = {
+ 640 * 480 * 4,
+ 720 * 480 * 4,
+ 800 * 600 * 4,
+ 848 * 480 * 4,
+ 1024 * 768 * 4,
+ 1152 * 768 * 4,
+ 1280 * 720 * 4,
+ 1280 * 800 * 4,
+ 1280 * 854 * 4,
+ 1280 * 960 * 4,
+ 1280 * 1024 * 4,
+ 1440 * 900 * 4,
+ 1400 * 1050 * 4,
+ 1680 * 1050 * 4,
+ 1600 * 1200 * 4,
+ 1920 * 1080 * 4,
+ 1920 * 1200 * 4
+ };
+
+ switch (test_number) {
+ case 1:
+ /* simple test, VRAM to GTT and GTT to VRAM */
+ radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_DOMAIN_VRAM);
+ radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_GTT);
+ break;
+ case 2:
+ /* simple test, VRAM to VRAM */
+ radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_VRAM);
+ break;
+ case 3:
+ /* GTT to VRAM, buffer size sweep, powers of 2 */
+ for (i = 1; i <= 16384; i <<= 1)
+ radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
+ RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_DOMAIN_VRAM);
+ break;
+ case 4:
+ /* VRAM to GTT, buffer size sweep, powers of 2 */
+ for (i = 1; i <= 16384; i <<= 1)
+ radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
+ RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_GTT);
+ break;
+ case 5:
+ /* VRAM to VRAM, buffer size sweep, powers of 2 */
+ for (i = 1; i <= 16384; i <<= 1)
+ radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
+ RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_VRAM);
+ break;
+ case 6:
+ /* GTT to VRAM, buffer size sweep, common modes */
+ for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ radeon_benchmark_move(rdev, common_modes[i],
+ RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_DOMAIN_VRAM);
+ break;
+ case 7:
+ /* VRAM to GTT, buffer size sweep, common modes */
+ for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ radeon_benchmark_move(rdev, common_modes[i],
+ RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_GTT);
+ break;
+ case 8:
+ /* VRAM to VRAM, buffer size sweep, common modes */
+ for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ radeon_benchmark_move(rdev, common_modes[i],
+ RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_VRAM);
+ break;
+
+ default:
+ DRM_ERROR("Unknown benchmark\n");
+ }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
new file mode 100644
index 0000000..973e5ba
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -0,0 +1,696 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+/*
+ * BIOS.
+ */
+
+/* If you boot an IGP board with a discrete card as the primary,
+ * the IGP rom is not accessible via the rom bar as the IGP rom is
+ * part of the system bios. On boot, the system bios puts a
+ * copy of the igp rom at the start of vram if a discrete card is
+ * present.
+ */
+static bool igp_read_bios_from_vram(struct radeon_device *rdev)
+{
+ uint8_t __iomem *bios;
+ resource_size_t vram_base;
+ resource_size_t size = 256 * 1024; /* ??? */
+
+ if (!(rdev->flags & RADEON_IS_IGP))
+ if (!radeon_card_posted(rdev))
+ return false;
+
+ rdev->bios = NULL;
+ vram_base = pci_resource_start(rdev->pdev, 0);
+ bios = ioremap(vram_base, size);
+ if (!bios) {
+ return false;
+ }
+
+ if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+ iounmap(bios);
+ return false;
+ }
+ rdev->bios = kmalloc(size, GFP_KERNEL);
+ if (rdev->bios == NULL) {
+ iounmap(bios);
+ return false;
+ }
+ memcpy_fromio(rdev->bios, bios, size);
+ iounmap(bios);
+ return true;
+}
+
+static bool radeon_read_bios(struct radeon_device *rdev)
+{
+ uint8_t __iomem *bios;
+ size_t size;
+
+ rdev->bios = NULL;
+ /* XXX: some cards may return 0 for rom size? ddx has a workaround */
+ bios = pci_map_rom(rdev->pdev, &size);
+ if (!bios) {
+ return false;
+ }
+
+ if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+ pci_unmap_rom(rdev->pdev, bios);
+ return false;
+ }
+ rdev->bios = kmemdup(bios, size, GFP_KERNEL);
+ if (rdev->bios == NULL) {
+ pci_unmap_rom(rdev->pdev, bios);
+ return false;
+ }
+ pci_unmap_rom(rdev->pdev, bios);
+ return true;
+}
+
+static bool radeon_read_platform_bios(struct radeon_device *rdev)
+{
+ uint8_t __iomem *bios;
+ size_t size;
+
+ rdev->bios = NULL;
+
+ bios = pci_platform_rom(rdev->pdev, &size);
+ if (!bios) {
+ return false;
+ }
+
+ if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+ return false;
+ }
+ rdev->bios = kmemdup(bios, size, GFP_KERNEL);
+ if (rdev->bios == NULL) {
+ return false;
+ }
+
+ return true;
+}
+
+#ifdef CONFIG_ACPI
+/* ATRM is used to get the BIOS on the discrete cards in
+ * dual-gpu systems.
+ */
+/* retrieve the ROM in 4k blocks */
+#define ATRM_BIOS_PAGE 4096
+/**
+ * radeon_atrm_call - fetch a chunk of the vbios
+ *
+ * @atrm_handle: acpi ATRM handle
+ * @bios: vbios image pointer
+ * @offset: offset of vbios image data to fetch
+ * @len: length of vbios image data to fetch
+ *
+ * Executes ATRM to fetch a chunk of the discrete
+ * vbios image on PX systems (all asics).
+ * Returns the length of the buffer fetched.
+ */
+static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
+ int offset, int len)
+{
+ acpi_status status;
+ union acpi_object atrm_arg_elements[2], *obj;
+ struct acpi_object_list atrm_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+ atrm_arg.count = 2;
+ atrm_arg.pointer = &atrm_arg_elements[0];
+
+ atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ atrm_arg_elements[0].integer.value = offset;
+
+ atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ atrm_arg_elements[1].integer.value = len;
+
+ status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
+ return -ENODEV;
+ }
+
+ obj = (union acpi_object *)buffer.pointer;
+ memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
+ len = obj->buffer.length;
+ kfree(buffer.pointer);
+ return len;
+}
+
+static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+{
+ int ret;
+ int size = 256 * 1024;
+ int i;
+ struct pci_dev *pdev = NULL;
+ acpi_handle dhandle, atrm_handle;
+ acpi_status status;
+ bool found = false;
+
+ /* ATRM is for the discrete card only */
+ if (rdev->flags & RADEON_IS_IGP)
+ return false;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ continue;
+
+ status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+ if (!ACPI_FAILURE(status)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return false;
+
+ rdev->bios = kmalloc(size, GFP_KERNEL);
+ if (!rdev->bios) {
+ DRM_ERROR("Unable to allocate bios\n");
+ return false;
+ }
+
+ for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
+ ret = radeon_atrm_call(atrm_handle,
+ rdev->bios,
+ (i * ATRM_BIOS_PAGE),
+ ATRM_BIOS_PAGE);
+ if (ret < ATRM_BIOS_PAGE)
+ break;
+ }
+
+ if (i == 0 || rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
+ kfree(rdev->bios);
+ return false;
+ }
+ return true;
+}
+#else
+static inline bool radeon_atrm_get_bios(struct radeon_device *rdev)
+{
+ return false;
+}
+#endif
+
+static bool ni_read_disabled_bios(struct radeon_device *rdev)
+{
+ u32 bus_cntl;
+ u32 d1vga_control;
+ u32 d2vga_control;
+ u32 vga_render_control;
+ u32 rom_cntl;
+ bool r;
+
+ bus_cntl = RREG32(R600_BUS_CNTL);
+ d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+ d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+ vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+ rom_cntl = RREG32(R600_ROM_CNTL);
+
+ /* enable the rom */
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+ if (!ASIC_IS_NODCE(rdev)) {
+ /* Disable VGA mode */
+ WREG32(AVIVO_D1VGA_CONTROL,
+ (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_D2VGA_CONTROL,
+ (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_VGA_RENDER_CONTROL,
+ (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+ }
+ WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
+
+ r = radeon_read_bios(rdev);
+
+ /* restore regs */
+ WREG32(R600_BUS_CNTL, bus_cntl);
+ if (!ASIC_IS_NODCE(rdev)) {
+ WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+ WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+ WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+ }
+ WREG32(R600_ROM_CNTL, rom_cntl);
+ return r;
+}
+
+static bool r700_read_disabled_bios(struct radeon_device *rdev)
+{
+ uint32_t viph_control;
+ uint32_t bus_cntl;
+ uint32_t d1vga_control;
+ uint32_t d2vga_control;
+ uint32_t vga_render_control;
+ uint32_t rom_cntl;
+ uint32_t cg_spll_func_cntl = 0;
+ uint32_t cg_spll_status;
+ bool r;
+
+ viph_control = RREG32(RADEON_VIPH_CONTROL);
+ bus_cntl = RREG32(R600_BUS_CNTL);
+ d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+ d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+ vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+ rom_cntl = RREG32(R600_ROM_CNTL);
+
+ /* disable VIP */
+ WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+ /* enable the rom */
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+ /* Disable VGA mode */
+ WREG32(AVIVO_D1VGA_CONTROL,
+ (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_D2VGA_CONTROL,
+ (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_VGA_RENDER_CONTROL,
+ (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+ if (rdev->family == CHIP_RV730) {
+ cg_spll_func_cntl = RREG32(R600_CG_SPLL_FUNC_CNTL);
+
+ /* enable bypass mode */
+ WREG32(R600_CG_SPLL_FUNC_CNTL, (cg_spll_func_cntl |
+ R600_SPLL_BYPASS_EN));
+
+ /* wait for SPLL_CHG_STATUS to change to 1 */
+ cg_spll_status = 0;
+ while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
+ cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
+
+ WREG32(R600_ROM_CNTL, (rom_cntl & ~R600_SCK_OVERWRITE));
+ } else
+ WREG32(R600_ROM_CNTL, (rom_cntl | R600_SCK_OVERWRITE));
+
+ r = radeon_read_bios(rdev);
+
+ /* restore regs */
+ if (rdev->family == CHIP_RV730) {
+ WREG32(R600_CG_SPLL_FUNC_CNTL, cg_spll_func_cntl);
+
+ /* wait for SPLL_CHG_STATUS to change to 1 */
+ cg_spll_status = 0;
+ while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
+ cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
+ }
+ WREG32(RADEON_VIPH_CONTROL, viph_control);
+ WREG32(R600_BUS_CNTL, bus_cntl);
+ WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+ WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+ WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+ WREG32(R600_ROM_CNTL, rom_cntl);
+ return r;
+}
+
+static bool r600_read_disabled_bios(struct radeon_device *rdev)
+{
+ uint32_t viph_control;
+ uint32_t bus_cntl;
+ uint32_t d1vga_control;
+ uint32_t d2vga_control;
+ uint32_t vga_render_control;
+ uint32_t rom_cntl;
+ uint32_t general_pwrmgt;
+ uint32_t low_vid_lower_gpio_cntl;
+ uint32_t medium_vid_lower_gpio_cntl;
+ uint32_t high_vid_lower_gpio_cntl;
+ uint32_t ctxsw_vid_lower_gpio_cntl;
+ uint32_t lower_gpio_enable;
+ bool r;
+
+ viph_control = RREG32(RADEON_VIPH_CONTROL);
+ bus_cntl = RREG32(R600_BUS_CNTL);
+ d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+ d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+ vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+ rom_cntl = RREG32(R600_ROM_CNTL);
+ general_pwrmgt = RREG32(R600_GENERAL_PWRMGT);
+ low_vid_lower_gpio_cntl = RREG32(R600_LOW_VID_LOWER_GPIO_CNTL);
+ medium_vid_lower_gpio_cntl = RREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL);
+ high_vid_lower_gpio_cntl = RREG32(R600_HIGH_VID_LOWER_GPIO_CNTL);
+ ctxsw_vid_lower_gpio_cntl = RREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL);
+ lower_gpio_enable = RREG32(R600_LOWER_GPIO_ENABLE);
+
+ /* disable VIP */
+ WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+ /* enable the rom */
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+ /* Disable VGA mode */
+ WREG32(AVIVO_D1VGA_CONTROL,
+ (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_D2VGA_CONTROL,
+ (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_VGA_RENDER_CONTROL,
+ (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+ WREG32(R600_ROM_CNTL,
+ ((rom_cntl & ~R600_SCK_PRESCALE_CRYSTAL_CLK_MASK) |
+ (1 << R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT) |
+ R600_SCK_OVERWRITE));
+
+ WREG32(R600_GENERAL_PWRMGT, (general_pwrmgt & ~R600_OPEN_DRAIN_PADS));
+ WREG32(R600_LOW_VID_LOWER_GPIO_CNTL,
+ (low_vid_lower_gpio_cntl & ~0x400));
+ WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL,
+ (medium_vid_lower_gpio_cntl & ~0x400));
+ WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL,
+ (high_vid_lower_gpio_cntl & ~0x400));
+ WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL,
+ (ctxsw_vid_lower_gpio_cntl & ~0x400));
+ WREG32(R600_LOWER_GPIO_ENABLE, (lower_gpio_enable | 0x400));
+
+ r = radeon_read_bios(rdev);
+
+ /* restore regs */
+ WREG32(RADEON_VIPH_CONTROL, viph_control);
+ WREG32(R600_BUS_CNTL, bus_cntl);
+ WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+ WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+ WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+ WREG32(R600_ROM_CNTL, rom_cntl);
+ WREG32(R600_GENERAL_PWRMGT, general_pwrmgt);
+ WREG32(R600_LOW_VID_LOWER_GPIO_CNTL, low_vid_lower_gpio_cntl);
+ WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL, medium_vid_lower_gpio_cntl);
+ WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL, high_vid_lower_gpio_cntl);
+ WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL, ctxsw_vid_lower_gpio_cntl);
+ WREG32(R600_LOWER_GPIO_ENABLE, lower_gpio_enable);
+ return r;
+}
+
+static bool avivo_read_disabled_bios(struct radeon_device *rdev)
+{
+ uint32_t seprom_cntl1;
+ uint32_t viph_control;
+ uint32_t bus_cntl;
+ uint32_t d1vga_control;
+ uint32_t d2vga_control;
+ uint32_t vga_render_control;
+ uint32_t gpiopad_a;
+ uint32_t gpiopad_en;
+ uint32_t gpiopad_mask;
+ bool r;
+
+ seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
+ viph_control = RREG32(RADEON_VIPH_CONTROL);
+ bus_cntl = RREG32(RV370_BUS_CNTL);
+ d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+ d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+ vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+ gpiopad_a = RREG32(RADEON_GPIOPAD_A);
+ gpiopad_en = RREG32(RADEON_GPIOPAD_EN);
+ gpiopad_mask = RREG32(RADEON_GPIOPAD_MASK);
+
+ WREG32(RADEON_SEPROM_CNTL1,
+ ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
+ (0xc << RADEON_SCK_PRESCALE_SHIFT)));
+ WREG32(RADEON_GPIOPAD_A, 0);
+ WREG32(RADEON_GPIOPAD_EN, 0);
+ WREG32(RADEON_GPIOPAD_MASK, 0);
+
+ /* disable VIP */
+ WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+
+ /* enable the rom */
+ WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
+
+ /* Disable VGA mode */
+ WREG32(AVIVO_D1VGA_CONTROL,
+ (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_D2VGA_CONTROL,
+ (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_VGA_RENDER_CONTROL,
+ (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+ r = radeon_read_bios(rdev);
+
+ /* restore regs */
+ WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
+ WREG32(RADEON_VIPH_CONTROL, viph_control);
+ WREG32(RV370_BUS_CNTL, bus_cntl);
+ WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+ WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+ WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+ WREG32(RADEON_GPIOPAD_A, gpiopad_a);
+ WREG32(RADEON_GPIOPAD_EN, gpiopad_en);
+ WREG32(RADEON_GPIOPAD_MASK, gpiopad_mask);
+ return r;
+}
+
+static bool legacy_read_disabled_bios(struct radeon_device *rdev)
+{
+ uint32_t seprom_cntl1;
+ uint32_t viph_control;
+ uint32_t bus_cntl;
+ uint32_t crtc_gen_cntl;
+ uint32_t crtc2_gen_cntl;
+ uint32_t crtc_ext_cntl;
+ uint32_t fp2_gen_cntl;
+ bool r;
+
+ seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
+ viph_control = RREG32(RADEON_VIPH_CONTROL);
+ if (rdev->flags & RADEON_IS_PCIE)
+ bus_cntl = RREG32(RV370_BUS_CNTL);
+ else
+ bus_cntl = RREG32(RADEON_BUS_CNTL);
+ crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
+ crtc2_gen_cntl = 0;
+ crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+ fp2_gen_cntl = 0;
+
+ if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+ }
+
+ if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+ }
+
+ WREG32(RADEON_SEPROM_CNTL1,
+ ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
+ (0xc << RADEON_SCK_PRESCALE_SHIFT)));
+
+ /* disable VIP */
+ WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+
+ /* enable the rom */
+ if (rdev->flags & RADEON_IS_PCIE)
+ WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
+ else
+ WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+
+ /* Turn off mem requests and CRTC for both controllers */
+ WREG32(RADEON_CRTC_GEN_CNTL,
+ ((crtc_gen_cntl & ~RADEON_CRTC_EN) |
+ (RADEON_CRTC_DISP_REQ_EN_B |
+ RADEON_CRTC_EXT_DISP_EN)));
+ if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+ WREG32(RADEON_CRTC2_GEN_CNTL,
+ ((crtc2_gen_cntl & ~RADEON_CRTC2_EN) |
+ RADEON_CRTC2_DISP_REQ_EN_B));
+ }
+ /* Turn off CRTC */
+ WREG32(RADEON_CRTC_EXT_CNTL,
+ ((crtc_ext_cntl & ~RADEON_CRTC_CRT_ON) |
+ (RADEON_CRTC_SYNC_TRISTAT |
+ RADEON_CRTC_DISPLAY_DIS)));
+
+ if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
+ }
+
+ r = radeon_read_bios(rdev);
+
+ /* restore regs */
+ WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
+ WREG32(RADEON_VIPH_CONTROL, viph_control);
+ if (rdev->flags & RADEON_IS_PCIE)
+ WREG32(RV370_BUS_CNTL, bus_cntl);
+ else
+ WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
+ if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+ }
+ WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+ if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+ }
+ return r;
+}
+
+static bool radeon_read_disabled_bios(struct radeon_device *rdev)
+{
+ if (rdev->flags & RADEON_IS_IGP)
+ return igp_read_bios_from_vram(rdev);
+ else if (rdev->family >= CHIP_BARTS)
+ return ni_read_disabled_bios(rdev);
+ else if (rdev->family >= CHIP_RV770)
+ return r700_read_disabled_bios(rdev);
+ else if (rdev->family >= CHIP_R600)
+ return r600_read_disabled_bios(rdev);
+ else if (rdev->family >= CHIP_RS600)
+ return avivo_read_disabled_bios(rdev);
+ else
+ return legacy_read_disabled_bios(rdev);
+}
+
+#ifdef CONFIG_ACPI
+static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+{
+ bool ret = false;
+ struct acpi_table_header *hdr;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
+ acpi_size tbl_size;
+#else
+ /* acpi_get_table_with_size() not exported on kernels < 3.6 */
+ acpi_size tbl_size = 0x7fffffff;
+#endif
+ UEFI_ACPI_VFCT *vfct;
+ GOP_VBIOS_CONTENT *vbios;
+ VFCT_IMAGE_HEADER *vhdr;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
+ if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
+#else
+ if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
+#endif
+ return false;
+ if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
+ DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
+ goto out_unmap;
+ }
+
+ vfct = (UEFI_ACPI_VFCT *)hdr;
+ if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
+ DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+ goto out_unmap;
+ }
+
+ vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
+ vhdr = &vbios->VbiosHeader;
+ DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
+ vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
+ vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
+
+ if (vhdr->PCIBus != rdev->pdev->bus->number ||
+ vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
+ vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
+ vhdr->VendorID != rdev->pdev->vendor ||
+ vhdr->DeviceID != rdev->pdev->device) {
+ DRM_INFO("ACPI VFCT table is not for this card\n");
+ goto out_unmap;
+ };
+
+ if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
+ DRM_ERROR("ACPI VFCT image truncated\n");
+ goto out_unmap;
+ }
+
+ rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
+ ret = !!rdev->bios;
+
+out_unmap:
+ return ret;
+}
+#else
+static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+{
+ return false;
+}
+#endif
+
+bool radeon_get_bios(struct radeon_device *rdev)
+{
+ bool r;
+ uint16_t tmp;
+
+ r = radeon_atrm_get_bios(rdev);
+ if (r == false)
+ r = radeon_acpi_vfct_bios(rdev);
+ if (r == false)
+ r = igp_read_bios_from_vram(rdev);
+ if (r == false)
+ r = radeon_read_bios(rdev);
+ if (r == false) {
+ r = radeon_read_disabled_bios(rdev);
+ }
+ if (r == false) {
+ r = radeon_read_platform_bios(rdev);
+ }
+ if (r == false || rdev->bios == NULL) {
+ DRM_ERROR("Unable to locate a BIOS ROM\n");
+ rdev->bios = NULL;
+ return false;
+ }
+ if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
+ printk("BIOS signature incorrect %x %x\n", rdev->bios[0], rdev->bios[1]);
+ goto free_bios;
+ }
+
+ tmp = RBIOS16(0x18);
+ if (RBIOS8(tmp + 0x14) != 0x0) {
+ DRM_INFO("Not an x86 BIOS ROM, not using.\n");
+ goto free_bios;
+ }
+
+ rdev->bios_header_start = RBIOS16(0x48);
+ if (!rdev->bios_header_start) {
+ goto free_bios;
+ }
+ tmp = rdev->bios_header_start + 4;
+ if (!memcmp(rdev->bios + tmp, "ATOM", 4) ||
+ !memcmp(rdev->bios + tmp, "MOTA", 4)) {
+ rdev->is_atom_bios = true;
+ } else {
+ rdev->is_atom_bios = false;
+ }
+
+ DRM_DEBUG("%sBIOS detected\n", rdev->is_atom_bios ? "ATOM" : "COM");
+ return true;
+free_bios:
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+ return false;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_blit_common.h b/drivers/gpu/drm/radeon/radeon_blit_common.h
new file mode 100644
index 0000000..4ecbe72
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_blit_common.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ * Copyright 2012 Alcatel-Lucent, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RADEON_BLIT_COMMON_H__
+
+#define DI_PT_RECTLIST 0x11
+#define DI_INDEX_SIZE_16_BIT 0x0
+#define DI_SRC_SEL_AUTO_INDEX 0x2
+
+#define FMT_8 0x1
+#define FMT_5_6_5 0x8
+#define FMT_8_8_8_8 0x1a
+#define COLOR_8 0x1
+#define COLOR_5_6_5 0x8
+#define COLOR_8_8_8_8 0x1a
+
+#define RECT_UNIT_H 32
+#define RECT_UNIT_W (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H)
+
+#define __RADEON_BLIT_COMMON_H__
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
new file mode 100644
index 0000000..38e396d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -0,0 +1,912 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+/* 10 khz */
+uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
+{
+ struct radeon_pll *spll = &rdev->clock.spll;
+ uint32_t fb_div, ref_div, post_div, sclk;
+
+ fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+ fb_div = (fb_div >> RADEON_SPLL_FB_DIV_SHIFT) & RADEON_SPLL_FB_DIV_MASK;
+ fb_div <<= 1;
+ fb_div *= spll->reference_freq;
+
+ ref_div =
+ RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+ if (ref_div == 0)
+ return 0;
+
+ sclk = fb_div / ref_div;
+
+ post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
+ if (post_div == 2)
+ sclk >>= 1;
+ else if (post_div == 3)
+ sclk >>= 2;
+ else if (post_div == 4)
+ sclk >>= 3;
+
+ return sclk;
+}
+
+/* 10 khz */
+uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
+{
+ struct radeon_pll *mpll = &rdev->clock.mpll;
+ uint32_t fb_div, ref_div, post_div, mclk;
+
+ fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+ fb_div = (fb_div >> RADEON_MPLL_FB_DIV_SHIFT) & RADEON_MPLL_FB_DIV_MASK;
+ fb_div <<= 1;
+ fb_div *= mpll->reference_freq;
+
+ ref_div =
+ RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+ if (ref_div == 0)
+ return 0;
+
+ mclk = fb_div / ref_div;
+
+ post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
+ if (post_div == 2)
+ mclk >>= 1;
+ else if (post_div == 3)
+ mclk >>= 2;
+ else if (post_div == 4)
+ mclk >>= 3;
+
+ return mclk;
+}
+
+#ifdef CONFIG_OF
+/*
+ * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device
+ * tree. Hopefully, ATI OF driver is kind enough to fill these
+ */
+static bool radeon_read_clocks_OF(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct device_node *dp = rdev->pdev->dev.of_node;
+ const u32 *val;
+ struct radeon_pll *p1pll = &rdev->clock.p1pll;
+ struct radeon_pll *p2pll = &rdev->clock.p2pll;
+ struct radeon_pll *spll = &rdev->clock.spll;
+ struct radeon_pll *mpll = &rdev->clock.mpll;
+
+ if (dp == NULL)
+ return false;
+ val = of_get_property(dp, "ATY,RefCLK", NULL);
+ if (!val || !*val) {
+ printk(KERN_WARNING "radeonfb: No ATY,RefCLK property !\n");
+ return false;
+ }
+ p1pll->reference_freq = p2pll->reference_freq = (*val) / 10;
+ p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+ if (p1pll->reference_div < 2)
+ p1pll->reference_div = 12;
+ p2pll->reference_div = p1pll->reference_div;
+
+ /* These aren't in the device-tree */
+ if (rdev->family >= CHIP_R420) {
+ p1pll->pll_in_min = 100;
+ p1pll->pll_in_max = 1350;
+ p1pll->pll_out_min = 20000;
+ p1pll->pll_out_max = 50000;
+ p2pll->pll_in_min = 100;
+ p2pll->pll_in_max = 1350;
+ p2pll->pll_out_min = 20000;
+ p2pll->pll_out_max = 50000;
+ } else {
+ p1pll->pll_in_min = 40;
+ p1pll->pll_in_max = 500;
+ p1pll->pll_out_min = 12500;
+ p1pll->pll_out_max = 35000;
+ p2pll->pll_in_min = 40;
+ p2pll->pll_in_max = 500;
+ p2pll->pll_out_min = 12500;
+ p2pll->pll_out_max = 35000;
+ }
+ /* not sure what the max should be in all cases */
+ rdev->clock.max_pixel_clock = 35000;
+
+ spll->reference_freq = mpll->reference_freq = p1pll->reference_freq;
+ spll->reference_div = mpll->reference_div =
+ RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+ RADEON_M_SPLL_REF_DIV_MASK;
+
+ val = of_get_property(dp, "ATY,SCLK", NULL);
+ if (val && *val)
+ rdev->clock.default_sclk = (*val) / 10;
+ else
+ rdev->clock.default_sclk =
+ radeon_legacy_get_engine_clock(rdev);
+
+ val = of_get_property(dp, "ATY,MCLK", NULL);
+ if (val && *val)
+ rdev->clock.default_mclk = (*val) / 10;
+ else
+ rdev->clock.default_mclk =
+ radeon_legacy_get_memory_clock(rdev);
+
+ DRM_INFO("Using device-tree clock info\n");
+
+ return true;
+}
+#else
+static bool radeon_read_clocks_OF(struct drm_device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_OF */
+
+void radeon_get_clock_info(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_pll *p1pll = &rdev->clock.p1pll;
+ struct radeon_pll *p2pll = &rdev->clock.p2pll;
+ struct radeon_pll *dcpll = &rdev->clock.dcpll;
+ struct radeon_pll *spll = &rdev->clock.spll;
+ struct radeon_pll *mpll = &rdev->clock.mpll;
+ int ret;
+
+ if (rdev->is_atom_bios)
+ ret = radeon_atom_get_clock_info(dev);
+ else
+ ret = radeon_combios_get_clock_info(dev);
+ if (!ret)
+ ret = radeon_read_clocks_OF(dev);
+
+ if (ret) {
+ if (p1pll->reference_div < 2) {
+ if (!ASIC_IS_AVIVO(rdev)) {
+ u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV);
+ if (ASIC_IS_R300(rdev))
+ p1pll->reference_div =
+ (tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT;
+ else
+ p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK;
+ if (p1pll->reference_div < 2)
+ p1pll->reference_div = 12;
+ } else
+ p1pll->reference_div = 12;
+ }
+ if (p2pll->reference_div < 2)
+ p2pll->reference_div = 12;
+ if (rdev->family < CHIP_RS600) {
+ if (spll->reference_div < 2)
+ spll->reference_div =
+ RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+ RADEON_M_SPLL_REF_DIV_MASK;
+ }
+ if (mpll->reference_div < 2)
+ mpll->reference_div = spll->reference_div;
+ } else {
+ if (ASIC_IS_AVIVO(rdev)) {
+ /* TODO FALLBACK */
+ } else {
+ DRM_INFO("Using generic clock info\n");
+
+ /* may need to be per card */
+ rdev->clock.max_pixel_clock = 35000;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ p1pll->reference_freq = 1432;
+ p2pll->reference_freq = 1432;
+ spll->reference_freq = 1432;
+ mpll->reference_freq = 1432;
+ } else {
+ p1pll->reference_freq = 2700;
+ p2pll->reference_freq = 2700;
+ spll->reference_freq = 2700;
+ mpll->reference_freq = 2700;
+ }
+ p1pll->reference_div =
+ RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+ if (p1pll->reference_div < 2)
+ p1pll->reference_div = 12;
+ p2pll->reference_div = p1pll->reference_div;
+
+ if (rdev->family >= CHIP_R420) {
+ p1pll->pll_in_min = 100;
+ p1pll->pll_in_max = 1350;
+ p1pll->pll_out_min = 20000;
+ p1pll->pll_out_max = 50000;
+ p2pll->pll_in_min = 100;
+ p2pll->pll_in_max = 1350;
+ p2pll->pll_out_min = 20000;
+ p2pll->pll_out_max = 50000;
+ } else {
+ p1pll->pll_in_min = 40;
+ p1pll->pll_in_max = 500;
+ p1pll->pll_out_min = 12500;
+ p1pll->pll_out_max = 35000;
+ p2pll->pll_in_min = 40;
+ p2pll->pll_in_max = 500;
+ p2pll->pll_out_min = 12500;
+ p2pll->pll_out_max = 35000;
+ }
+
+ spll->reference_div =
+ RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+ RADEON_M_SPLL_REF_DIV_MASK;
+ mpll->reference_div = spll->reference_div;
+ rdev->clock.default_sclk =
+ radeon_legacy_get_engine_clock(rdev);
+ rdev->clock.default_mclk =
+ radeon_legacy_get_memory_clock(rdev);
+ }
+ }
+
+ /* pixel clocks */
+ if (ASIC_IS_AVIVO(rdev)) {
+ p1pll->min_post_div = 2;
+ p1pll->max_post_div = 0x7f;
+ p1pll->min_frac_feedback_div = 0;
+ p1pll->max_frac_feedback_div = 9;
+ p2pll->min_post_div = 2;
+ p2pll->max_post_div = 0x7f;
+ p2pll->min_frac_feedback_div = 0;
+ p2pll->max_frac_feedback_div = 9;
+ } else {
+ p1pll->min_post_div = 1;
+ p1pll->max_post_div = 16;
+ p1pll->min_frac_feedback_div = 0;
+ p1pll->max_frac_feedback_div = 0;
+ p2pll->min_post_div = 1;
+ p2pll->max_post_div = 12;
+ p2pll->min_frac_feedback_div = 0;
+ p2pll->max_frac_feedback_div = 0;
+ }
+
+ /* dcpll is DCE4 only */
+ dcpll->min_post_div = 2;
+ dcpll->max_post_div = 0x7f;
+ dcpll->min_frac_feedback_div = 0;
+ dcpll->max_frac_feedback_div = 9;
+ dcpll->min_ref_div = 2;
+ dcpll->max_ref_div = 0x3ff;
+ dcpll->min_feedback_div = 4;
+ dcpll->max_feedback_div = 0xfff;
+ dcpll->best_vco = 0;
+
+ p1pll->min_ref_div = 2;
+ p1pll->max_ref_div = 0x3ff;
+ p1pll->min_feedback_div = 4;
+ p1pll->max_feedback_div = 0x7ff;
+ p1pll->best_vco = 0;
+
+ p2pll->min_ref_div = 2;
+ p2pll->max_ref_div = 0x3ff;
+ p2pll->min_feedback_div = 4;
+ p2pll->max_feedback_div = 0x7ff;
+ p2pll->best_vco = 0;
+
+ /* system clock */
+ spll->min_post_div = 1;
+ spll->max_post_div = 1;
+ spll->min_ref_div = 2;
+ spll->max_ref_div = 0xff;
+ spll->min_feedback_div = 4;
+ spll->max_feedback_div = 0xff;
+ spll->best_vco = 0;
+
+ /* memory clock */
+ mpll->min_post_div = 1;
+ mpll->max_post_div = 1;
+ mpll->min_ref_div = 2;
+ mpll->max_ref_div = 0xff;
+ mpll->min_feedback_div = 4;
+ mpll->max_feedback_div = 0xff;
+ mpll->best_vco = 0;
+
+ if (!rdev->clock.default_sclk)
+ rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
+ if ((!rdev->clock.default_mclk) && rdev->asic->pm.get_memory_clock)
+ rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
+
+ rdev->pm.current_sclk = rdev->clock.default_sclk;
+ rdev->pm.current_mclk = rdev->clock.default_mclk;
+
+}
+
+/* 10 khz */
+static uint32_t calc_eng_mem_clock(struct radeon_device *rdev,
+ uint32_t req_clock,
+ int *fb_div, int *post_div)
+{
+ struct radeon_pll *spll = &rdev->clock.spll;
+ int ref_div = spll->reference_div;
+
+ if (!ref_div)
+ ref_div =
+ RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+ RADEON_M_SPLL_REF_DIV_MASK;
+
+ if (req_clock < 15000) {
+ *post_div = 8;
+ req_clock *= 8;
+ } else if (req_clock < 30000) {
+ *post_div = 4;
+ req_clock *= 4;
+ } else if (req_clock < 60000) {
+ *post_div = 2;
+ req_clock *= 2;
+ } else
+ *post_div = 1;
+
+ req_clock *= ref_div;
+ req_clock += spll->reference_freq;
+ req_clock /= (2 * spll->reference_freq);
+
+ *fb_div = req_clock & 0xff;
+
+ req_clock = (req_clock & 0xffff) << 1;
+ req_clock *= spll->reference_freq;
+ req_clock /= ref_div;
+ req_clock /= *post_div;
+
+ return req_clock;
+}
+
+/* 10 khz */
+void radeon_legacy_set_engine_clock(struct radeon_device *rdev,
+ uint32_t eng_clock)
+{
+ uint32_t tmp;
+ int fb_div, post_div;
+
+ /* XXX: wait for idle */
+
+ eng_clock = calc_eng_mem_clock(rdev, eng_clock, &fb_div, &post_div);
+
+ tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+ tmp &= ~RADEON_DONT_USE_XTALIN;
+ WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+ udelay(10);
+
+ tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+ tmp |= RADEON_SPLL_SLEEP;
+ WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+ udelay(2);
+
+ tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+ tmp |= RADEON_SPLL_RESET;
+ WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+ udelay(200);
+
+ tmp = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+ tmp &= ~(RADEON_SPLL_FB_DIV_MASK << RADEON_SPLL_FB_DIV_SHIFT);
+ tmp |= (fb_div & RADEON_SPLL_FB_DIV_MASK) << RADEON_SPLL_FB_DIV_SHIFT;
+ WREG32_PLL(RADEON_M_SPLL_REF_FB_DIV, tmp);
+
+ /* XXX: verify on different asics */
+ tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+ tmp &= ~RADEON_SPLL_PVG_MASK;
+ if ((eng_clock * post_div) >= 90000)
+ tmp |= (0x7 << RADEON_SPLL_PVG_SHIFT);
+ else
+ tmp |= (0x4 << RADEON_SPLL_PVG_SHIFT);
+ WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+ tmp &= ~RADEON_SPLL_SLEEP;
+ WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+ udelay(2);
+
+ tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+ tmp &= ~RADEON_SPLL_RESET;
+ WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+ udelay(200);
+
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
+ switch (post_div) {
+ case 1:
+ default:
+ tmp |= 1;
+ break;
+ case 2:
+ tmp |= 2;
+ break;
+ case 4:
+ tmp |= 3;
+ break;
+ case 8:
+ tmp |= 4;
+ break;
+ }
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+ udelay(20);
+
+ tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+ tmp |= RADEON_DONT_USE_XTALIN;
+ WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+
+ udelay(10);
+}
+
+void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
+{
+ uint32_t tmp;
+
+ if (enable) {
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ if ((RREG32(RADEON_CONFIG_CNTL) &
+ RADEON_CFG_ATI_REV_ID_MASK) >
+ RADEON_CFG_ATI_REV_A13) {
+ tmp &=
+ ~(RADEON_SCLK_FORCE_CP |
+ RADEON_SCLK_FORCE_RB);
+ }
+ tmp &=
+ ~(RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1 |
+ RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_SE |
+ RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_RE |
+ RADEON_SCLK_FORCE_PB | RADEON_SCLK_FORCE_TAM |
+ RADEON_SCLK_FORCE_TDM);
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+ } else if (ASIC_IS_R300(rdev)) {
+ if ((rdev->family == CHIP_RS400) ||
+ (rdev->family == CHIP_RS480)) {
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp &=
+ ~(RADEON_SCLK_FORCE_DISP2 |
+ RADEON_SCLK_FORCE_CP |
+ RADEON_SCLK_FORCE_HDP |
+ RADEON_SCLK_FORCE_DISP1 |
+ RADEON_SCLK_FORCE_TOP |
+ RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
+ | RADEON_SCLK_FORCE_IDCT |
+ RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
+ | R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
+ | R300_SCLK_FORCE_US |
+ RADEON_SCLK_FORCE_TV_SCLK |
+ R300_SCLK_FORCE_SU |
+ RADEON_SCLK_FORCE_OV0);
+ tmp |= RADEON_DYN_STOP_LAT_MASK;
+ tmp |=
+ RADEON_SCLK_FORCE_TOP |
+ RADEON_SCLK_FORCE_VIP;
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+ tmp &= ~RADEON_SCLK_MORE_FORCEON;
+ tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
+ WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_DAC_ALWAYS_ONb);
+ WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+ tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+ RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+ RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+ R300_DVOCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+ RADEON_PIXCLK_GV_ALWAYS_ONb |
+ R300_PIXCLK_DVO_ALWAYS_ONb |
+ RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+ RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+ R300_PIXCLK_TRANS_ALWAYS_ONb |
+ R300_PIXCLK_TVO_ALWAYS_ONb |
+ R300_P2G2CLK_ALWAYS_ONb |
+ R300_P2G2CLK_DAC_ALWAYS_ONb);
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+ } else if (rdev->family >= CHIP_RV350) {
+ tmp = RREG32_PLL(R300_SCLK_CNTL2);
+ tmp &= ~(R300_SCLK_FORCE_TCL |
+ R300_SCLK_FORCE_GA |
+ R300_SCLK_FORCE_CBA);
+ tmp |= (R300_SCLK_TCL_MAX_DYN_STOP_LAT |
+ R300_SCLK_GA_MAX_DYN_STOP_LAT |
+ R300_SCLK_CBA_MAX_DYN_STOP_LAT);
+ WREG32_PLL(R300_SCLK_CNTL2, tmp);
+
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp &=
+ ~(RADEON_SCLK_FORCE_DISP2 |
+ RADEON_SCLK_FORCE_CP |
+ RADEON_SCLK_FORCE_HDP |
+ RADEON_SCLK_FORCE_DISP1 |
+ RADEON_SCLK_FORCE_TOP |
+ RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
+ | RADEON_SCLK_FORCE_IDCT |
+ RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
+ | R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
+ | R300_SCLK_FORCE_US |
+ RADEON_SCLK_FORCE_TV_SCLK |
+ R300_SCLK_FORCE_SU |
+ RADEON_SCLK_FORCE_OV0);
+ tmp |= RADEON_DYN_STOP_LAT_MASK;
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+ tmp &= ~RADEON_SCLK_MORE_FORCEON;
+ tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
+ WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_DAC_ALWAYS_ONb);
+ WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+ tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+ RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+ RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+ R300_DVOCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+ RADEON_PIXCLK_GV_ALWAYS_ONb |
+ R300_PIXCLK_DVO_ALWAYS_ONb |
+ RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+ RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+ R300_PIXCLK_TRANS_ALWAYS_ONb |
+ R300_PIXCLK_TVO_ALWAYS_ONb |
+ R300_P2G2CLK_ALWAYS_ONb |
+ R300_P2G2CLK_DAC_ALWAYS_ONb);
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_MCLK_MISC);
+ tmp |= (RADEON_MC_MCLK_DYN_ENABLE |
+ RADEON_IO_MCLK_DYN_ENABLE);
+ WREG32_PLL(RADEON_MCLK_MISC, tmp);
+
+ tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+ tmp |= (RADEON_FORCEON_MCLKA |
+ RADEON_FORCEON_MCLKB);
+
+ tmp &= ~(RADEON_FORCEON_YCLKA |
+ RADEON_FORCEON_YCLKB |
+ RADEON_FORCEON_MC);
+
+ /* Some releases of vbios have set DISABLE_MC_MCLKA
+ and DISABLE_MC_MCLKB bits in the vbios table. Setting these
+ bits will cause H/W hang when reading video memory with dynamic clocking
+ enabled. */
+ if ((tmp & R300_DISABLE_MC_MCLKA) &&
+ (tmp & R300_DISABLE_MC_MCLKB)) {
+ /* If both bits are set, then check the active channels */
+ tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+ if (rdev->mc.vram_width == 64) {
+ if (RREG32(RADEON_MEM_CNTL) &
+ R300_MEM_USE_CD_CH_ONLY)
+ tmp &=
+ ~R300_DISABLE_MC_MCLKB;
+ else
+ tmp &=
+ ~R300_DISABLE_MC_MCLKA;
+ } else {
+ tmp &= ~(R300_DISABLE_MC_MCLKA |
+ R300_DISABLE_MC_MCLKB);
+ }
+ }
+
+ WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+ } else {
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp &= ~(R300_SCLK_FORCE_VAP);
+ tmp |= RADEON_SCLK_FORCE_CP;
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+ mdelay(15);
+
+ tmp = RREG32_PLL(R300_SCLK_CNTL2);
+ tmp &= ~(R300_SCLK_FORCE_TCL |
+ R300_SCLK_FORCE_GA |
+ R300_SCLK_FORCE_CBA);
+ WREG32_PLL(R300_SCLK_CNTL2, tmp);
+ }
+ } else {
+ tmp = RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
+
+ tmp &= ~(RADEON_ACTIVE_HILO_LAT_MASK |
+ RADEON_DISP_DYN_STOP_LAT_MASK |
+ RADEON_DYN_STOP_MODE_MASK);
+
+ tmp |= (RADEON_ENGIN_DYNCLK_MODE |
+ (0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
+ WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp);
+ mdelay(15);
+
+ tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+ tmp |= RADEON_SCLK_DYN_START_CNTL;
+ WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+ mdelay(15);
+
+ /* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
+ to lockup randomly, leave them as set by BIOS.
+ */
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ /*tmp &= RADEON_SCLK_SRC_SEL_MASK; */
+ tmp &= ~RADEON_SCLK_FORCEON_MASK;
+
+ /*RAGE_6::A11 A12 A12N1 A13, RV250::A11 A12, R300 */
+ if (((rdev->family == CHIP_RV250) &&
+ ((RREG32(RADEON_CONFIG_CNTL) &
+ RADEON_CFG_ATI_REV_ID_MASK) <
+ RADEON_CFG_ATI_REV_A13))
+ || ((rdev->family == CHIP_RV100)
+ &&
+ ((RREG32(RADEON_CONFIG_CNTL) &
+ RADEON_CFG_ATI_REV_ID_MASK) <=
+ RADEON_CFG_ATI_REV_A13))) {
+ tmp |= RADEON_SCLK_FORCE_CP;
+ tmp |= RADEON_SCLK_FORCE_VIP;
+ }
+
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+ if ((rdev->family == CHIP_RV200) ||
+ (rdev->family == CHIP_RV250) ||
+ (rdev->family == CHIP_RV280)) {
+ tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+ tmp &= ~RADEON_SCLK_MORE_FORCEON;
+
+ /* RV200::A11 A12 RV250::A11 A12 */
+ if (((rdev->family == CHIP_RV200) ||
+ (rdev->family == CHIP_RV250)) &&
+ ((RREG32(RADEON_CONFIG_CNTL) &
+ RADEON_CFG_ATI_REV_ID_MASK) <
+ RADEON_CFG_ATI_REV_A13)) {
+ tmp |= RADEON_SCLK_MORE_FORCEON;
+ }
+ WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+ mdelay(15);
+ }
+
+ /* RV200::A11 A12, RV250::A11 A12 */
+ if (((rdev->family == CHIP_RV200) ||
+ (rdev->family == CHIP_RV250)) &&
+ ((RREG32(RADEON_CONFIG_CNTL) &
+ RADEON_CFG_ATI_REV_ID_MASK) <
+ RADEON_CFG_ATI_REV_A13)) {
+ tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
+ tmp |= RADEON_TCL_BYPASS_DISABLE;
+ WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
+ }
+ mdelay(15);
+
+ /*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */
+ tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+ tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+ RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+ RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+ RADEON_PIXCLK_GV_ALWAYS_ONb |
+ RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
+ RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+ RADEON_PIXCLK_TMDS_ALWAYS_ONb);
+
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+ mdelay(15);
+
+ tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_DAC_ALWAYS_ONb);
+
+ WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+ mdelay(15);
+ }
+ } else {
+ /* Turn everything OFF (ForceON to everything) */
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_HDP |
+ RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_TOP
+ | RADEON_SCLK_FORCE_E2 | RADEON_SCLK_FORCE_SE |
+ RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_VIP |
+ RADEON_SCLK_FORCE_RE | RADEON_SCLK_FORCE_PB |
+ RADEON_SCLK_FORCE_TAM | RADEON_SCLK_FORCE_TDM |
+ RADEON_SCLK_FORCE_RB);
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+ } else if ((rdev->family == CHIP_RS400) ||
+ (rdev->family == CHIP_RS480)) {
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
+ RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
+ | RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
+ R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
+ RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
+ R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
+ R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
+ R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+ tmp |= RADEON_SCLK_MORE_FORCEON;
+ WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_DAC_ALWAYS_ONb |
+ R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
+ WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+ tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+ RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+ RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+ R300_DVOCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+ RADEON_PIXCLK_GV_ALWAYS_ONb |
+ R300_PIXCLK_DVO_ALWAYS_ONb |
+ RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+ RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+ R300_PIXCLK_TRANS_ALWAYS_ONb |
+ R300_PIXCLK_TVO_ALWAYS_ONb |
+ R300_P2G2CLK_ALWAYS_ONb |
+ R300_P2G2CLK_DAC_ALWAYS_ONb |
+ R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+ } else if (rdev->family >= CHIP_RV350) {
+ /* for RV350/M10, no delays are required. */
+ tmp = RREG32_PLL(R300_SCLK_CNTL2);
+ tmp |= (R300_SCLK_FORCE_TCL |
+ R300_SCLK_FORCE_GA | R300_SCLK_FORCE_CBA);
+ WREG32_PLL(R300_SCLK_CNTL2, tmp);
+
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
+ RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
+ | RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
+ R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
+ RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
+ R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
+ R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
+ R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+ tmp |= RADEON_SCLK_MORE_FORCEON;
+ WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+ tmp |= (RADEON_FORCEON_MCLKA |
+ RADEON_FORCEON_MCLKB |
+ RADEON_FORCEON_YCLKA |
+ RADEON_FORCEON_YCLKB | RADEON_FORCEON_MC);
+ WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_DAC_ALWAYS_ONb |
+ R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
+ WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+ tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+ tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+ RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+ RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+ R300_DVOCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+ RADEON_PIXCLK_GV_ALWAYS_ONb |
+ R300_PIXCLK_DVO_ALWAYS_ONb |
+ RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+ RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+ R300_PIXCLK_TRANS_ALWAYS_ONb |
+ R300_PIXCLK_TVO_ALWAYS_ONb |
+ R300_P2G2CLK_ALWAYS_ONb |
+ R300_P2G2CLK_DAC_ALWAYS_ONb |
+ R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+ } else {
+ tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+ tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_E2);
+ tmp |= RADEON_SCLK_FORCE_SE;
+
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ tmp |= (RADEON_SCLK_FORCE_RB |
+ RADEON_SCLK_FORCE_TDM |
+ RADEON_SCLK_FORCE_TAM |
+ RADEON_SCLK_FORCE_PB |
+ RADEON_SCLK_FORCE_RE |
+ RADEON_SCLK_FORCE_VIP |
+ RADEON_SCLK_FORCE_IDCT |
+ RADEON_SCLK_FORCE_TOP |
+ RADEON_SCLK_FORCE_DISP1 |
+ RADEON_SCLK_FORCE_DISP2 |
+ RADEON_SCLK_FORCE_HDP);
+ } else if ((rdev->family == CHIP_R300) ||
+ (rdev->family == CHIP_R350)) {
+ tmp |= (RADEON_SCLK_FORCE_HDP |
+ RADEON_SCLK_FORCE_DISP1 |
+ RADEON_SCLK_FORCE_DISP2 |
+ RADEON_SCLK_FORCE_TOP |
+ RADEON_SCLK_FORCE_IDCT |
+ RADEON_SCLK_FORCE_VIP);
+ }
+ WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+ mdelay(16);
+
+ if ((rdev->family == CHIP_R300) ||
+ (rdev->family == CHIP_R350)) {
+ tmp = RREG32_PLL(R300_SCLK_CNTL2);
+ tmp |= (R300_SCLK_FORCE_TCL |
+ R300_SCLK_FORCE_GA |
+ R300_SCLK_FORCE_CBA);
+ WREG32_PLL(R300_SCLK_CNTL2, tmp);
+ mdelay(16);
+ }
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+ tmp &= ~(RADEON_FORCEON_MCLKA |
+ RADEON_FORCEON_YCLKA);
+ WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+ mdelay(16);
+ }
+
+ if ((rdev->family == CHIP_RV200) ||
+ (rdev->family == CHIP_RV250) ||
+ (rdev->family == CHIP_RV280)) {
+ tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+ tmp |= RADEON_SCLK_MORE_FORCEON;
+ WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+ mdelay(16);
+ }
+
+ tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+ tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+ RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+ RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+ RADEON_PIXCLK_GV_ALWAYS_ONb |
+ RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
+ RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+ RADEON_PIXCLK_TMDS_ALWAYS_ONb);
+
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+ mdelay(16);
+
+ tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+ RADEON_PIXCLK_DAC_ALWAYS_ONb);
+ WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+ }
+ }
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
new file mode 100644
index 0000000..68ce360
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -0,0 +1,3619 @@
+/*
+ * Copyright 2004 ATI Technologies Inc., Markham, Ontario
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+
+#ifdef CONFIG_PPC_PMAC
+/* not sure which of these are needed */
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#endif /* CONFIG_PPC_PMAC */
+
+/* from radeon_encoder.c */
+extern uint32_t
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+ uint8_t dac);
+extern void radeon_link_encoder_connector(struct drm_device *dev);
+
+/* from radeon_connector.c */
+extern void
+radeon_add_legacy_connector(struct drm_device *dev,
+ uint32_t connector_id,
+ uint32_t supported_device,
+ int connector_type,
+ struct radeon_i2c_bus_rec *i2c_bus,
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd);
+
+/* from radeon_legacy_encoder.c */
+extern void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
+ uint32_t supported_device);
+
+/* old legacy ATI BIOS routines */
+
+/* COMBIOS table offsets */
+enum radeon_combios_table_offset {
+ /* absolute offset tables */
+ COMBIOS_ASIC_INIT_1_TABLE,
+ COMBIOS_BIOS_SUPPORT_TABLE,
+ COMBIOS_DAC_PROGRAMMING_TABLE,
+ COMBIOS_MAX_COLOR_DEPTH_TABLE,
+ COMBIOS_CRTC_INFO_TABLE,
+ COMBIOS_PLL_INFO_TABLE,
+ COMBIOS_TV_INFO_TABLE,
+ COMBIOS_DFP_INFO_TABLE,
+ COMBIOS_HW_CONFIG_INFO_TABLE,
+ COMBIOS_MULTIMEDIA_INFO_TABLE,
+ COMBIOS_TV_STD_PATCH_TABLE,
+ COMBIOS_LCD_INFO_TABLE,
+ COMBIOS_MOBILE_INFO_TABLE,
+ COMBIOS_PLL_INIT_TABLE,
+ COMBIOS_MEM_CONFIG_TABLE,
+ COMBIOS_SAVE_MASK_TABLE,
+ COMBIOS_HARDCODED_EDID_TABLE,
+ COMBIOS_ASIC_INIT_2_TABLE,
+ COMBIOS_CONNECTOR_INFO_TABLE,
+ COMBIOS_DYN_CLK_1_TABLE,
+ COMBIOS_RESERVED_MEM_TABLE,
+ COMBIOS_EXT_TMDS_INFO_TABLE,
+ COMBIOS_MEM_CLK_INFO_TABLE,
+ COMBIOS_EXT_DAC_INFO_TABLE,
+ COMBIOS_MISC_INFO_TABLE,
+ COMBIOS_CRT_INFO_TABLE,
+ COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE,
+ COMBIOS_COMPONENT_VIDEO_INFO_TABLE,
+ COMBIOS_FAN_SPEED_INFO_TABLE,
+ COMBIOS_OVERDRIVE_INFO_TABLE,
+ COMBIOS_OEM_INFO_TABLE,
+ COMBIOS_DYN_CLK_2_TABLE,
+ COMBIOS_POWER_CONNECTOR_INFO_TABLE,
+ COMBIOS_I2C_INFO_TABLE,
+ /* relative offset tables */
+ COMBIOS_ASIC_INIT_3_TABLE, /* offset from misc info */
+ COMBIOS_ASIC_INIT_4_TABLE, /* offset from misc info */
+ COMBIOS_DETECTED_MEM_TABLE, /* offset from misc info */
+ COMBIOS_ASIC_INIT_5_TABLE, /* offset from misc info */
+ COMBIOS_RAM_RESET_TABLE, /* offset from mem config */
+ COMBIOS_POWERPLAY_INFO_TABLE, /* offset from mobile info */
+ COMBIOS_GPIO_INFO_TABLE, /* offset from mobile info */
+ COMBIOS_LCD_DDC_INFO_TABLE, /* offset from mobile info */
+ COMBIOS_TMDS_POWER_TABLE, /* offset from mobile info */
+ COMBIOS_TMDS_POWER_ON_TABLE, /* offset from tmds power */
+ COMBIOS_TMDS_POWER_OFF_TABLE, /* offset from tmds power */
+};
+
+enum radeon_combios_ddc {
+ DDC_NONE_DETECTED,
+ DDC_MONID,
+ DDC_DVI,
+ DDC_VGA,
+ DDC_CRT2,
+ DDC_LCD,
+ DDC_GPIO,
+};
+
+enum radeon_combios_connector {
+ CONNECTOR_NONE_LEGACY,
+ CONNECTOR_PROPRIETARY_LEGACY,
+ CONNECTOR_CRT_LEGACY,
+ CONNECTOR_DVI_I_LEGACY,
+ CONNECTOR_DVI_D_LEGACY,
+ CONNECTOR_CTV_LEGACY,
+ CONNECTOR_STV_LEGACY,
+ CONNECTOR_UNSUPPORTED_LEGACY
+};
+
+const int legacy_connector_convert[] = {
+ DRM_MODE_CONNECTOR_Unknown,
+ DRM_MODE_CONNECTOR_DVID,
+ DRM_MODE_CONNECTOR_VGA,
+ DRM_MODE_CONNECTOR_DVII,
+ DRM_MODE_CONNECTOR_DVID,
+ DRM_MODE_CONNECTOR_Composite,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ DRM_MODE_CONNECTOR_Unknown,
+};
+
+static uint16_t combios_get_table_offset(struct drm_device *dev,
+ enum radeon_combios_table_offset table)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ int rev, size;
+ uint16_t offset = 0, check_offset;
+
+ if (!rdev->bios)
+ return 0;
+
+ switch (table) {
+ /* absolute offset tables */
+ case COMBIOS_ASIC_INIT_1_TABLE:
+ check_offset = 0xc;
+ break;
+ case COMBIOS_BIOS_SUPPORT_TABLE:
+ check_offset = 0x14;
+ break;
+ case COMBIOS_DAC_PROGRAMMING_TABLE:
+ check_offset = 0x2a;
+ break;
+ case COMBIOS_MAX_COLOR_DEPTH_TABLE:
+ check_offset = 0x2c;
+ break;
+ case COMBIOS_CRTC_INFO_TABLE:
+ check_offset = 0x2e;
+ break;
+ case COMBIOS_PLL_INFO_TABLE:
+ check_offset = 0x30;
+ break;
+ case COMBIOS_TV_INFO_TABLE:
+ check_offset = 0x32;
+ break;
+ case COMBIOS_DFP_INFO_TABLE:
+ check_offset = 0x34;
+ break;
+ case COMBIOS_HW_CONFIG_INFO_TABLE:
+ check_offset = 0x36;
+ break;
+ case COMBIOS_MULTIMEDIA_INFO_TABLE:
+ check_offset = 0x38;
+ break;
+ case COMBIOS_TV_STD_PATCH_TABLE:
+ check_offset = 0x3e;
+ break;
+ case COMBIOS_LCD_INFO_TABLE:
+ check_offset = 0x40;
+ break;
+ case COMBIOS_MOBILE_INFO_TABLE:
+ check_offset = 0x42;
+ break;
+ case COMBIOS_PLL_INIT_TABLE:
+ check_offset = 0x46;
+ break;
+ case COMBIOS_MEM_CONFIG_TABLE:
+ check_offset = 0x48;
+ break;
+ case COMBIOS_SAVE_MASK_TABLE:
+ check_offset = 0x4a;
+ break;
+ case COMBIOS_HARDCODED_EDID_TABLE:
+ check_offset = 0x4c;
+ break;
+ case COMBIOS_ASIC_INIT_2_TABLE:
+ check_offset = 0x4e;
+ break;
+ case COMBIOS_CONNECTOR_INFO_TABLE:
+ check_offset = 0x50;
+ break;
+ case COMBIOS_DYN_CLK_1_TABLE:
+ check_offset = 0x52;
+ break;
+ case COMBIOS_RESERVED_MEM_TABLE:
+ check_offset = 0x54;
+ break;
+ case COMBIOS_EXT_TMDS_INFO_TABLE:
+ check_offset = 0x58;
+ break;
+ case COMBIOS_MEM_CLK_INFO_TABLE:
+ check_offset = 0x5a;
+ break;
+ case COMBIOS_EXT_DAC_INFO_TABLE:
+ check_offset = 0x5c;
+ break;
+ case COMBIOS_MISC_INFO_TABLE:
+ check_offset = 0x5e;
+ break;
+ case COMBIOS_CRT_INFO_TABLE:
+ check_offset = 0x60;
+ break;
+ case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
+ check_offset = 0x62;
+ break;
+ case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
+ check_offset = 0x64;
+ break;
+ case COMBIOS_FAN_SPEED_INFO_TABLE:
+ check_offset = 0x66;
+ break;
+ case COMBIOS_OVERDRIVE_INFO_TABLE:
+ check_offset = 0x68;
+ break;
+ case COMBIOS_OEM_INFO_TABLE:
+ check_offset = 0x6a;
+ break;
+ case COMBIOS_DYN_CLK_2_TABLE:
+ check_offset = 0x6c;
+ break;
+ case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
+ check_offset = 0x6e;
+ break;
+ case COMBIOS_I2C_INFO_TABLE:
+ check_offset = 0x70;
+ break;
+ /* relative offset tables */
+ case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+ if (check_offset) {
+ rev = RBIOS8(check_offset);
+ if (rev > 0) {
+ check_offset = RBIOS16(check_offset + 0x3);
+ if (check_offset)
+ offset = check_offset;
+ }
+ }
+ break;
+ case COMBIOS_ASIC_INIT_4_TABLE: /* offset from misc info */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+ if (check_offset) {
+ rev = RBIOS8(check_offset);
+ if (rev > 0) {
+ check_offset = RBIOS16(check_offset + 0x5);
+ if (check_offset)
+ offset = check_offset;
+ }
+ }
+ break;
+ case COMBIOS_DETECTED_MEM_TABLE: /* offset from misc info */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+ if (check_offset) {
+ rev = RBIOS8(check_offset);
+ if (rev > 0) {
+ check_offset = RBIOS16(check_offset + 0x7);
+ if (check_offset)
+ offset = check_offset;
+ }
+ }
+ break;
+ case COMBIOS_ASIC_INIT_5_TABLE: /* offset from misc info */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+ if (check_offset) {
+ rev = RBIOS8(check_offset);
+ if (rev == 2) {
+ check_offset = RBIOS16(check_offset + 0x9);
+ if (check_offset)
+ offset = check_offset;
+ }
+ }
+ break;
+ case COMBIOS_RAM_RESET_TABLE: /* offset from mem config */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE);
+ if (check_offset) {
+ while (RBIOS8(check_offset++));
+ check_offset += 2;
+ if (check_offset)
+ offset = check_offset;
+ }
+ break;
+ case COMBIOS_POWERPLAY_INFO_TABLE: /* offset from mobile info */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+ if (check_offset) {
+ check_offset = RBIOS16(check_offset + 0x11);
+ if (check_offset)
+ offset = check_offset;
+ }
+ break;
+ case COMBIOS_GPIO_INFO_TABLE: /* offset from mobile info */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+ if (check_offset) {
+ check_offset = RBIOS16(check_offset + 0x13);
+ if (check_offset)
+ offset = check_offset;
+ }
+ break;
+ case COMBIOS_LCD_DDC_INFO_TABLE: /* offset from mobile info */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+ if (check_offset) {
+ check_offset = RBIOS16(check_offset + 0x15);
+ if (check_offset)
+ offset = check_offset;
+ }
+ break;
+ case COMBIOS_TMDS_POWER_TABLE: /* offset from mobile info */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+ if (check_offset) {
+ check_offset = RBIOS16(check_offset + 0x17);
+ if (check_offset)
+ offset = check_offset;
+ }
+ break;
+ case COMBIOS_TMDS_POWER_ON_TABLE: /* offset from tmds power */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE);
+ if (check_offset) {
+ check_offset = RBIOS16(check_offset + 0x2);
+ if (check_offset)
+ offset = check_offset;
+ }
+ break;
+ case COMBIOS_TMDS_POWER_OFF_TABLE: /* offset from tmds power */
+ check_offset =
+ combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE);
+ if (check_offset) {
+ check_offset = RBIOS16(check_offset + 0x4);
+ if (check_offset)
+ offset = check_offset;
+ }
+ break;
+ default:
+ check_offset = 0;
+ break;
+ }
+
+ size = RBIOS8(rdev->bios_header_start + 0x6);
+ /* check absolute offset tables */
+ if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
+ offset = RBIOS16(rdev->bios_header_start + check_offset);
+
+ return offset;
+}
+
+bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
+{
+ int edid_info, size;
+ struct edid *edid;
+ unsigned char *raw;
+ edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
+ if (!edid_info)
+ return false;
+
+ raw = rdev->bios + edid_info;
+ size = EDID_LENGTH * (raw[0x7e] + 1);
+ edid = kmalloc(size, GFP_KERNEL);
+ if (edid == NULL)
+ return false;
+
+ memcpy((unsigned char *)edid, raw, size);
+
+ if (!drm_edid_is_valid(edid)) {
+ kfree(edid);
+ return false;
+ }
+
+ rdev->mode_info.bios_hardcoded_edid = edid;
+ rdev->mode_info.bios_hardcoded_edid_size = size;
+ return true;
+}
+
+/* this is used for atom LCDs as well */
+struct edid *
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev)
+{
+ struct edid *edid;
+
+ if (rdev->mode_info.bios_hardcoded_edid) {
+ edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
+ if (edid) {
+ memcpy((unsigned char *)edid,
+ (unsigned char *)rdev->mode_info.bios_hardcoded_edid,
+ rdev->mode_info.bios_hardcoded_edid_size);
+ return edid;
+ }
+ }
+ return NULL;
+}
+
+static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
+ enum radeon_combios_ddc ddc,
+ u32 clk_mask,
+ u32 data_mask)
+{
+ struct radeon_i2c_bus_rec i2c;
+ int ddc_line = 0;
+
+ /* ddc id = mask reg
+ * DDC_NONE_DETECTED = none
+ * DDC_DVI = RADEON_GPIO_DVI_DDC
+ * DDC_VGA = RADEON_GPIO_VGA_DDC
+ * DDC_LCD = RADEON_GPIOPAD_MASK
+ * DDC_GPIO = RADEON_MDGPIO_MASK
+ * r1xx
+ * DDC_MONID = RADEON_GPIO_MONID
+ * DDC_CRT2 = RADEON_GPIO_CRT2_DDC
+ * r200
+ * DDC_MONID = RADEON_GPIO_MONID
+ * DDC_CRT2 = RADEON_GPIO_DVI_DDC
+ * r300/r350
+ * DDC_MONID = RADEON_GPIO_DVI_DDC
+ * DDC_CRT2 = RADEON_GPIO_DVI_DDC
+ * rv2xx/rv3xx
+ * DDC_MONID = RADEON_GPIO_MONID
+ * DDC_CRT2 = RADEON_GPIO_MONID
+ * rs3xx/rs4xx
+ * DDC_MONID = RADEON_GPIOPAD_MASK
+ * DDC_CRT2 = RADEON_GPIO_MONID
+ */
+ switch (ddc) {
+ case DDC_NONE_DETECTED:
+ default:
+ ddc_line = 0;
+ break;
+ case DDC_DVI:
+ ddc_line = RADEON_GPIO_DVI_DDC;
+ break;
+ case DDC_VGA:
+ ddc_line = RADEON_GPIO_VGA_DDC;
+ break;
+ case DDC_LCD:
+ ddc_line = RADEON_GPIOPAD_MASK;
+ break;
+ case DDC_GPIO:
+ ddc_line = RADEON_MDGPIO_MASK;
+ break;
+ case DDC_MONID:
+ if (rdev->family == CHIP_RS300 ||
+ rdev->family == CHIP_RS400 ||
+ rdev->family == CHIP_RS480)
+ ddc_line = RADEON_GPIOPAD_MASK;
+ else if (rdev->family == CHIP_R300 ||
+ rdev->family == CHIP_R350) {
+ ddc_line = RADEON_GPIO_DVI_DDC;
+ ddc = DDC_DVI;
+ } else
+ ddc_line = RADEON_GPIO_MONID;
+ break;
+ case DDC_CRT2:
+ if (rdev->family == CHIP_R200 ||
+ rdev->family == CHIP_R300 ||
+ rdev->family == CHIP_R350) {
+ ddc_line = RADEON_GPIO_DVI_DDC;
+ ddc = DDC_DVI;
+ } else if (rdev->family == CHIP_RS300 ||
+ rdev->family == CHIP_RS400 ||
+ rdev->family == CHIP_RS480)
+ ddc_line = RADEON_GPIO_MONID;
+ else if (rdev->family >= CHIP_RV350) {
+ ddc_line = RADEON_GPIO_MONID;
+ ddc = DDC_MONID;
+ } else
+ ddc_line = RADEON_GPIO_CRT2_DDC;
+ break;
+ }
+
+ if (ddc_line == RADEON_GPIOPAD_MASK) {
+ i2c.mask_clk_reg = RADEON_GPIOPAD_MASK;
+ i2c.mask_data_reg = RADEON_GPIOPAD_MASK;
+ i2c.a_clk_reg = RADEON_GPIOPAD_A;
+ i2c.a_data_reg = RADEON_GPIOPAD_A;
+ i2c.en_clk_reg = RADEON_GPIOPAD_EN;
+ i2c.en_data_reg = RADEON_GPIOPAD_EN;
+ i2c.y_clk_reg = RADEON_GPIOPAD_Y;
+ i2c.y_data_reg = RADEON_GPIOPAD_Y;
+ } else if (ddc_line == RADEON_MDGPIO_MASK) {
+ i2c.mask_clk_reg = RADEON_MDGPIO_MASK;
+ i2c.mask_data_reg = RADEON_MDGPIO_MASK;
+ i2c.a_clk_reg = RADEON_MDGPIO_A;
+ i2c.a_data_reg = RADEON_MDGPIO_A;
+ i2c.en_clk_reg = RADEON_MDGPIO_EN;
+ i2c.en_data_reg = RADEON_MDGPIO_EN;
+ i2c.y_clk_reg = RADEON_MDGPIO_Y;
+ i2c.y_data_reg = RADEON_MDGPIO_Y;
+ } else {
+ i2c.mask_clk_reg = ddc_line;
+ i2c.mask_data_reg = ddc_line;
+ i2c.a_clk_reg = ddc_line;
+ i2c.a_data_reg = ddc_line;
+ i2c.en_clk_reg = ddc_line;
+ i2c.en_data_reg = ddc_line;
+ i2c.y_clk_reg = ddc_line;
+ i2c.y_data_reg = ddc_line;
+ }
+
+ if (clk_mask && data_mask) {
+ /* system specific masks */
+ i2c.mask_clk_mask = clk_mask;
+ i2c.mask_data_mask = data_mask;
+ i2c.a_clk_mask = clk_mask;
+ i2c.a_data_mask = data_mask;
+ i2c.en_clk_mask = clk_mask;
+ i2c.en_data_mask = data_mask;
+ i2c.y_clk_mask = clk_mask;
+ i2c.y_data_mask = data_mask;
+ } else if ((ddc_line == RADEON_GPIOPAD_MASK) ||
+ (ddc_line == RADEON_MDGPIO_MASK)) {
+ /* default gpiopad masks */
+ i2c.mask_clk_mask = (0x20 << 8);
+ i2c.mask_data_mask = 0x80;
+ i2c.a_clk_mask = (0x20 << 8);
+ i2c.a_data_mask = 0x80;
+ i2c.en_clk_mask = (0x20 << 8);
+ i2c.en_data_mask = 0x80;
+ i2c.y_clk_mask = (0x20 << 8);
+ i2c.y_data_mask = 0x80;
+ } else {
+ /* default masks for ddc pads */
+ i2c.mask_clk_mask = RADEON_GPIO_MASK_1;
+ i2c.mask_data_mask = RADEON_GPIO_MASK_0;
+ i2c.a_clk_mask = RADEON_GPIO_A_1;
+ i2c.a_data_mask = RADEON_GPIO_A_0;
+ i2c.en_clk_mask = RADEON_GPIO_EN_1;
+ i2c.en_data_mask = RADEON_GPIO_EN_0;
+ i2c.y_clk_mask = RADEON_GPIO_Y_1;
+ i2c.y_data_mask = RADEON_GPIO_Y_0;
+ }
+
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ case CHIP_RS300:
+ switch (ddc_line) {
+ case RADEON_GPIO_DVI_DDC:
+ i2c.hw_capable = true;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ break;
+ case CHIP_R200:
+ switch (ddc_line) {
+ case RADEON_GPIO_DVI_DDC:
+ case RADEON_GPIO_MONID:
+ i2c.hw_capable = true;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ break;
+ case CHIP_RV250:
+ case CHIP_RV280:
+ switch (ddc_line) {
+ case RADEON_GPIO_VGA_DDC:
+ case RADEON_GPIO_DVI_DDC:
+ case RADEON_GPIO_CRT2_DDC:
+ i2c.hw_capable = true;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ break;
+ case CHIP_R300:
+ case CHIP_R350:
+ switch (ddc_line) {
+ case RADEON_GPIO_VGA_DDC:
+ case RADEON_GPIO_DVI_DDC:
+ i2c.hw_capable = true;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ break;
+ case CHIP_RV350:
+ case CHIP_RV380:
+ case CHIP_RS400:
+ case CHIP_RS480:
+ switch (ddc_line) {
+ case RADEON_GPIO_VGA_DDC:
+ case RADEON_GPIO_DVI_DDC:
+ i2c.hw_capable = true;
+ break;
+ case RADEON_GPIO_MONID:
+ /* hw i2c on RADEON_GPIO_MONID doesn't seem to work
+ * reliably on some pre-r4xx hardware; not sure why.
+ */
+ i2c.hw_capable = false;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ i2c.mm_i2c = false;
+
+ i2c.i2c_id = ddc;
+ i2c.hpd = RADEON_HPD_NONE;
+
+ if (ddc_line)
+ i2c.valid = true;
+ else
+ i2c.valid = false;
+
+ return i2c;
+}
+
+static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct radeon_i2c_bus_rec i2c;
+ u16 offset;
+ u8 id, blocks, clk, data;
+ int i;
+
+ i2c.valid = false;
+
+ offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
+ if (offset) {
+ blocks = RBIOS8(offset + 2);
+ for (i = 0; i < blocks; i++) {
+ id = RBIOS8(offset + 3 + (i * 5) + 0);
+ if (id == 136) {
+ clk = RBIOS8(offset + 3 + (i * 5) + 3);
+ data = RBIOS8(offset + 3 + (i * 5) + 4);
+ /* gpiopad */
+ i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
+ (1 << clk), (1 << data));
+ break;
+ }
+ }
+ }
+ return i2c;
+}
+
+void radeon_combios_i2c_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct radeon_i2c_bus_rec i2c;
+
+ /* actual hw pads
+ * r1xx/rs2xx/rs3xx
+ * 0x60, 0x64, 0x68, 0x6c, gpiopads, mm
+ * r200
+ * 0x60, 0x64, 0x68, mm
+ * r300/r350
+ * 0x60, 0x64, mm
+ * rv2xx/rv3xx/rs4xx
+ * 0x60, 0x64, 0x68, gpiopads, mm
+ */
+
+ /* 0x60 */
+ i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ rdev->i2c_bus[0] = radeon_i2c_create(dev, &i2c, "DVI_DDC");
+ /* 0x64 */
+ i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ rdev->i2c_bus[1] = radeon_i2c_create(dev, &i2c, "VGA_DDC");
+
+ /* mm i2c */
+ i2c.valid = true;
+ i2c.hw_capable = true;
+ i2c.mm_i2c = true;
+ i2c.i2c_id = 0xa0;
+ rdev->i2c_bus[2] = radeon_i2c_create(dev, &i2c, "MM_I2C");
+
+ if (rdev->family == CHIP_R300 ||
+ rdev->family == CHIP_R350) {
+ /* only 2 sw i2c pads */
+ } else if (rdev->family == CHIP_RS300 ||
+ rdev->family == CHIP_RS400 ||
+ rdev->family == CHIP_RS480) {
+ /* 0x68 */
+ i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+ rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+
+ /* gpiopad */
+ i2c = radeon_combios_get_i2c_info_from_table(rdev);
+ if (i2c.valid)
+ rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
+ } else if ((rdev->family == CHIP_R200) ||
+ (rdev->family >= CHIP_R300)) {
+ /* 0x68 */
+ i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+ } else {
+ /* 0x68 */
+ i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+ /* 0x6c */
+ i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+ rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "CRT2_DDC");
+ }
+}
+
+bool radeon_combios_get_clock_info(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint16_t pll_info;
+ struct radeon_pll *p1pll = &rdev->clock.p1pll;
+ struct radeon_pll *p2pll = &rdev->clock.p2pll;
+ struct radeon_pll *spll = &rdev->clock.spll;
+ struct radeon_pll *mpll = &rdev->clock.mpll;
+ int8_t rev;
+ uint16_t sclk, mclk;
+
+ pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
+ if (pll_info) {
+ rev = RBIOS8(pll_info);
+
+ /* pixel clocks */
+ p1pll->reference_freq = RBIOS16(pll_info + 0xe);
+ p1pll->reference_div = RBIOS16(pll_info + 0x10);
+ p1pll->pll_out_min = RBIOS32(pll_info + 0x12);
+ p1pll->pll_out_max = RBIOS32(pll_info + 0x16);
+ p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+ p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+
+ if (rev > 9) {
+ p1pll->pll_in_min = RBIOS32(pll_info + 0x36);
+ p1pll->pll_in_max = RBIOS32(pll_info + 0x3a);
+ } else {
+ p1pll->pll_in_min = 40;
+ p1pll->pll_in_max = 500;
+ }
+ *p2pll = *p1pll;
+
+ /* system clock */
+ spll->reference_freq = RBIOS16(pll_info + 0x1a);
+ spll->reference_div = RBIOS16(pll_info + 0x1c);
+ spll->pll_out_min = RBIOS32(pll_info + 0x1e);
+ spll->pll_out_max = RBIOS32(pll_info + 0x22);
+
+ if (rev > 10) {
+ spll->pll_in_min = RBIOS32(pll_info + 0x48);
+ spll->pll_in_max = RBIOS32(pll_info + 0x4c);
+ } else {
+ /* ??? */
+ spll->pll_in_min = 40;
+ spll->pll_in_max = 500;
+ }
+
+ /* memory clock */
+ mpll->reference_freq = RBIOS16(pll_info + 0x26);
+ mpll->reference_div = RBIOS16(pll_info + 0x28);
+ mpll->pll_out_min = RBIOS32(pll_info + 0x2a);
+ mpll->pll_out_max = RBIOS32(pll_info + 0x2e);
+
+ if (rev > 10) {
+ mpll->pll_in_min = RBIOS32(pll_info + 0x5a);
+ mpll->pll_in_max = RBIOS32(pll_info + 0x5e);
+ } else {
+ /* ??? */
+ mpll->pll_in_min = 40;
+ mpll->pll_in_max = 500;
+ }
+
+ /* default sclk/mclk */
+ sclk = RBIOS16(pll_info + 0xa);
+ mclk = RBIOS16(pll_info + 0x8);
+ if (sclk == 0)
+ sclk = 200 * 100;
+ if (mclk == 0)
+ mclk = 200 * 100;
+
+ rdev->clock.default_sclk = sclk;
+ rdev->clock.default_mclk = mclk;
+
+ if (RBIOS32(pll_info + 0x16))
+ rdev->clock.max_pixel_clock = RBIOS32(pll_info + 0x16);
+ else
+ rdev->clock.max_pixel_clock = 35000; /* might need something asic specific */
+
+ return true;
+ }
+ return false;
+}
+
+bool radeon_combios_sideport_present(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ u16 igp_info;
+
+ /* sideport is AMD only */
+ if (rdev->family == CHIP_RS400)
+ return false;
+
+ igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE);
+
+ if (igp_info) {
+ if (RBIOS16(igp_info + 0x4))
+ return true;
+ }
+ return false;
+}
+
+static const uint32_t default_primarydac_adj[CHIP_LAST] = {
+ 0x00000808, /* r100 */
+ 0x00000808, /* rv100 */
+ 0x00000808, /* rs100 */
+ 0x00000808, /* rv200 */
+ 0x00000808, /* rs200 */
+ 0x00000808, /* r200 */
+ 0x00000808, /* rv250 */
+ 0x00000000, /* rs300 */
+ 0x00000808, /* rv280 */
+ 0x00000808, /* r300 */
+ 0x00000808, /* r350 */
+ 0x00000808, /* rv350 */
+ 0x00000808, /* rv380 */
+ 0x00000808, /* r420 */
+ 0x00000808, /* r423 */
+ 0x00000808, /* rv410 */
+ 0x00000000, /* rs400 */
+ 0x00000000, /* rs480 */
+};
+
+static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev,
+ struct radeon_encoder_primary_dac *p_dac)
+{
+ p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family];
+ return;
+}
+
+struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
+ radeon_encoder
+ *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint16_t dac_info;
+ uint8_t rev, bg, dac;
+ struct radeon_encoder_primary_dac *p_dac = NULL;
+ int found = 0;
+
+ p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac),
+ GFP_KERNEL);
+
+ if (!p_dac)
+ return NULL;
+
+ /* check CRT table */
+ dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+ if (dac_info) {
+ rev = RBIOS8(dac_info) & 0x3;
+ if (rev < 2) {
+ bg = RBIOS8(dac_info + 0x2) & 0xf;
+ dac = (RBIOS8(dac_info + 0x2) >> 4) & 0xf;
+ p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+ } else {
+ bg = RBIOS8(dac_info + 0x2) & 0xf;
+ dac = RBIOS8(dac_info + 0x3) & 0xf;
+ p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+ }
+ /* if the values are zeros, use the table */
+ if ((dac == 0) || (bg == 0))
+ found = 0;
+ else
+ found = 1;
+ }
+
+ /* quirks */
+ /* Radeon 7000 (RV100) */
+ if (((dev->pdev->device == 0x5159) &&
+ (dev->pdev->subsystem_vendor == 0x174B) &&
+ (dev->pdev->subsystem_device == 0x7c28)) ||
+ /* Radeon 9100 (R200) */
+ ((dev->pdev->device == 0x514D) &&
+ (dev->pdev->subsystem_vendor == 0x174B) &&
+ (dev->pdev->subsystem_device == 0x7149))) {
+ /* vbios value is bad, use the default */
+ found = 0;
+ }
+
+ if (!found) /* fallback to defaults */
+ radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
+
+ return p_dac;
+}
+
+enum radeon_tv_std
+radeon_combios_get_tv_info(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ uint16_t tv_info;
+ enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+ tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+ if (tv_info) {
+ if (RBIOS8(tv_info + 6) == 'T') {
+ switch (RBIOS8(tv_info + 7) & 0xf) {
+ case 1:
+ tv_std = TV_STD_NTSC;
+ DRM_DEBUG_KMS("Default TV standard: NTSC\n");
+ break;
+ case 2:
+ tv_std = TV_STD_PAL;
+ DRM_DEBUG_KMS("Default TV standard: PAL\n");
+ break;
+ case 3:
+ tv_std = TV_STD_PAL_M;
+ DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
+ break;
+ case 4:
+ tv_std = TV_STD_PAL_60;
+ DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
+ break;
+ case 5:
+ tv_std = TV_STD_NTSC_J;
+ DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
+ break;
+ case 6:
+ tv_std = TV_STD_SCART_PAL;
+ DRM_DEBUG_KMS("Default TV standard: SCART-PAL\n");
+ break;
+ default:
+ tv_std = TV_STD_NTSC;
+ DRM_DEBUG_KMS
+ ("Unknown TV standard; defaulting to NTSC\n");
+ break;
+ }
+
+ switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) {
+ case 0:
+ DRM_DEBUG_KMS("29.498928713 MHz TV ref clk\n");
+ break;
+ case 1:
+ DRM_DEBUG_KMS("28.636360000 MHz TV ref clk\n");
+ break;
+ case 2:
+ DRM_DEBUG_KMS("14.318180000 MHz TV ref clk\n");
+ break;
+ case 3:
+ DRM_DEBUG_KMS("27.000000000 MHz TV ref clk\n");
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ return tv_std;
+}
+
+static const uint32_t default_tvdac_adj[CHIP_LAST] = {
+ 0x00000000, /* r100 */
+ 0x00280000, /* rv100 */
+ 0x00000000, /* rs100 */
+ 0x00880000, /* rv200 */
+ 0x00000000, /* rs200 */
+ 0x00000000, /* r200 */
+ 0x00770000, /* rv250 */
+ 0x00290000, /* rs300 */
+ 0x00560000, /* rv280 */
+ 0x00780000, /* r300 */
+ 0x00770000, /* r350 */
+ 0x00780000, /* rv350 */
+ 0x00780000, /* rv380 */
+ 0x01080000, /* r420 */
+ 0x01080000, /* r423 */
+ 0x01080000, /* rv410 */
+ 0x00780000, /* rs400 */
+ 0x00780000, /* rs480 */
+};
+
+static void radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev,
+ struct radeon_encoder_tv_dac *tv_dac)
+{
+ tv_dac->ps2_tvdac_adj = default_tvdac_adj[rdev->family];
+ if ((rdev->flags & RADEON_IS_MOBILITY) && (rdev->family == CHIP_RV250))
+ tv_dac->ps2_tvdac_adj = 0x00880000;
+ tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+ tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+ return;
+}
+
+struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
+ radeon_encoder
+ *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint16_t dac_info;
+ uint8_t rev, bg, dac;
+ struct radeon_encoder_tv_dac *tv_dac = NULL;
+ int found = 0;
+
+ tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
+ if (!tv_dac)
+ return NULL;
+
+ /* first check TV table */
+ dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+ if (dac_info) {
+ rev = RBIOS8(dac_info + 0x3);
+ if (rev > 4) {
+ bg = RBIOS8(dac_info + 0xc) & 0xf;
+ dac = RBIOS8(dac_info + 0xd) & 0xf;
+ tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+ bg = RBIOS8(dac_info + 0xe) & 0xf;
+ dac = RBIOS8(dac_info + 0xf) & 0xf;
+ tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+ bg = RBIOS8(dac_info + 0x10) & 0xf;
+ dac = RBIOS8(dac_info + 0x11) & 0xf;
+ tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+ /* if the values are all zeros, use the table */
+ if (tv_dac->ps2_tvdac_adj)
+ found = 1;
+ } else if (rev > 1) {
+ bg = RBIOS8(dac_info + 0xc) & 0xf;
+ dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
+ tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+ bg = RBIOS8(dac_info + 0xd) & 0xf;
+ dac = (RBIOS8(dac_info + 0xd) >> 4) & 0xf;
+ tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+ bg = RBIOS8(dac_info + 0xe) & 0xf;
+ dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
+ tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+ /* if the values are all zeros, use the table */
+ if (tv_dac->ps2_tvdac_adj)
+ found = 1;
+ }
+ tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
+ }
+ if (!found) {
+ /* then check CRT table */
+ dac_info =
+ combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+ if (dac_info) {
+ rev = RBIOS8(dac_info) & 0x3;
+ if (rev < 2) {
+ bg = RBIOS8(dac_info + 0x3) & 0xf;
+ dac = (RBIOS8(dac_info + 0x3) >> 4) & 0xf;
+ tv_dac->ps2_tvdac_adj =
+ (bg << 16) | (dac << 20);
+ tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+ tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+ /* if the values are all zeros, use the table */
+ if (tv_dac->ps2_tvdac_adj)
+ found = 1;
+ } else {
+ bg = RBIOS8(dac_info + 0x4) & 0xf;
+ dac = RBIOS8(dac_info + 0x5) & 0xf;
+ tv_dac->ps2_tvdac_adj =
+ (bg << 16) | (dac << 20);
+ tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+ tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+ /* if the values are all zeros, use the table */
+ if (tv_dac->ps2_tvdac_adj)
+ found = 1;
+ }
+ } else {
+ DRM_INFO("No TV DAC info found in BIOS\n");
+ }
+ }
+
+ if (!found) /* fallback to defaults */
+ radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac);
+
+ return tv_dac;
+}
+
+static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
+ radeon_device
+ *rdev)
+{
+ struct radeon_encoder_lvds *lvds = NULL;
+ uint32_t fp_vert_stretch, fp_horz_stretch;
+ uint32_t ppll_div_sel, ppll_val;
+ uint32_t lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
+
+ lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL);
+
+ if (!lvds)
+ return NULL;
+
+ fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH);
+ fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH);
+
+ /* These should be fail-safe defaults, fingers crossed */
+ lvds->panel_pwr_delay = 200;
+ lvds->panel_vcc_delay = 2000;
+
+ lvds->lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ lvds->panel_digon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) & 0xf;
+ lvds->panel_blon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY2_SHIFT) & 0xf;
+
+ if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE)
+ lvds->native_mode.vdisplay =
+ ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >>
+ RADEON_VERT_PANEL_SHIFT) + 1;
+ else
+ lvds->native_mode.vdisplay =
+ (RREG32(RADEON_CRTC_V_TOTAL_DISP) >> 16) + 1;
+
+ if (fp_horz_stretch & RADEON_HORZ_STRETCH_ENABLE)
+ lvds->native_mode.hdisplay =
+ (((fp_horz_stretch & RADEON_HORZ_PANEL_SIZE) >>
+ RADEON_HORZ_PANEL_SHIFT) + 1) * 8;
+ else
+ lvds->native_mode.hdisplay =
+ ((RREG32(RADEON_CRTC_H_TOTAL_DISP) >> 16) + 1) * 8;
+
+ if ((lvds->native_mode.hdisplay < 640) ||
+ (lvds->native_mode.vdisplay < 480)) {
+ lvds->native_mode.hdisplay = 640;
+ lvds->native_mode.vdisplay = 480;
+ }
+
+ ppll_div_sel = RREG8(RADEON_CLOCK_CNTL_INDEX + 1) & 0x3;
+ ppll_val = RREG32_PLL(RADEON_PPLL_DIV_0 + ppll_div_sel);
+ if ((ppll_val & 0x000707ff) == 0x1bb)
+ lvds->use_bios_dividers = false;
+ else {
+ lvds->panel_ref_divider =
+ RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+ lvds->panel_post_divider = (ppll_val >> 16) & 0x7;
+ lvds->panel_fb_divider = ppll_val & 0x7ff;
+
+ if ((lvds->panel_ref_divider != 0) &&
+ (lvds->panel_fb_divider > 3))
+ lvds->use_bios_dividers = true;
+ }
+ lvds->panel_vcc_delay = 200;
+
+ DRM_INFO("Panel info derived from registers\n");
+ DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay,
+ lvds->native_mode.vdisplay);
+
+ return lvds;
+}
+
+struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
+ *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint16_t lcd_info;
+ uint32_t panel_setup;
+ char stmp[30];
+ int tmp, i;
+ struct radeon_encoder_lvds *lvds = NULL;
+
+ lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
+
+ if (lcd_info) {
+ lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL);
+
+ if (!lvds)
+ return NULL;
+
+ for (i = 0; i < 24; i++)
+ stmp[i] = RBIOS8(lcd_info + i + 1);
+ stmp[24] = 0;
+
+ DRM_INFO("Panel ID String: %s\n", stmp);
+
+ lvds->native_mode.hdisplay = RBIOS16(lcd_info + 0x19);
+ lvds->native_mode.vdisplay = RBIOS16(lcd_info + 0x1b);
+
+ DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay,
+ lvds->native_mode.vdisplay);
+
+ lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
+ lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
+
+ lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
+ lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
+ lvds->panel_blon_delay = (RBIOS16(lcd_info + 0x38) >> 4) & 0xf;
+
+ lvds->panel_ref_divider = RBIOS16(lcd_info + 0x2e);
+ lvds->panel_post_divider = RBIOS8(lcd_info + 0x30);
+ lvds->panel_fb_divider = RBIOS16(lcd_info + 0x31);
+ if ((lvds->panel_ref_divider != 0) &&
+ (lvds->panel_fb_divider > 3))
+ lvds->use_bios_dividers = true;
+
+ panel_setup = RBIOS32(lcd_info + 0x39);
+ lvds->lvds_gen_cntl = 0xff00;
+ if (panel_setup & 0x1)
+ lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_FORMAT;
+
+ if ((panel_setup >> 4) & 0x1)
+ lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_TYPE;
+
+ switch ((panel_setup >> 8) & 0x7) {
+ case 0:
+ lvds->lvds_gen_cntl |= RADEON_LVDS_NO_FM;
+ break;
+ case 1:
+ lvds->lvds_gen_cntl |= RADEON_LVDS_2_GREY;
+ break;
+ case 2:
+ lvds->lvds_gen_cntl |= RADEON_LVDS_4_GREY;
+ break;
+ default:
+ break;
+ }
+
+ if ((panel_setup >> 16) & 0x1)
+ lvds->lvds_gen_cntl |= RADEON_LVDS_FP_POL_LOW;
+
+ if ((panel_setup >> 17) & 0x1)
+ lvds->lvds_gen_cntl |= RADEON_LVDS_LP_POL_LOW;
+
+ if ((panel_setup >> 18) & 0x1)
+ lvds->lvds_gen_cntl |= RADEON_LVDS_DTM_POL_LOW;
+
+ if ((panel_setup >> 23) & 0x1)
+ lvds->lvds_gen_cntl |= RADEON_LVDS_BL_CLK_SEL;
+
+ lvds->lvds_gen_cntl |= (panel_setup & 0xf0000000);
+
+ for (i = 0; i < 32; i++) {
+ tmp = RBIOS16(lcd_info + 64 + i * 2);
+ if (tmp == 0)
+ break;
+
+ if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
+ (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+ lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+ (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
+ lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+ (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+ lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+ (RBIOS8(tmp + 23) * 8);
+
+ lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
+ (RBIOS16(tmp + 24) - RBIOS16(tmp + 26));
+ lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
+ ((RBIOS16(tmp + 28) & 0x7ff) - RBIOS16(tmp + 26));
+ lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
+ ((RBIOS16(tmp + 28) & 0xf800) >> 11);
+
+ lvds->native_mode.clock = RBIOS16(tmp + 9) * 10;
+ lvds->native_mode.flags = 0;
+ /* set crtc values */
+ drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
+
+ }
+ }
+ } else {
+ DRM_INFO("No panel info found in BIOS\n");
+ lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
+ }
+
+ if (lvds)
+ encoder->native_mode = lvds->native_mode;
+ return lvds;
+}
+
+static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = {
+ {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_R100 */
+ {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_RV100 */
+ {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RS100 */
+ {{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_RV200 */
+ {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_RS200 */
+ {{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_R200 */
+ {{15500, 0x81b}, {0xffffffff, 0x83f}, {0, 0}, {0, 0}}, /* CHIP_RV250 */
+ {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RS300 */
+ {{13000, 0x400f4}, {15000, 0x400f7}, {0xffffffff, 0x40111}, {0, 0}}, /* CHIP_RV280 */
+ {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R300 */
+ {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R350 */
+ {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RV350 */
+ {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RV380 */
+ {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */
+ {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */
+ {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */
+ { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS400 */
+ { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS480 */
+};
+
+bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ tmds->tmds_pll[i].value =
+ default_tmds_pll[rdev->family][i].value;
+ tmds->tmds_pll[i].freq = default_tmds_pll[rdev->family][i].freq;
+ }
+
+ return true;
+}
+
+bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint16_t tmds_info;
+ int i, n;
+ uint8_t ver;
+
+ tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
+
+ if (tmds_info) {
+ ver = RBIOS8(tmds_info);
+ DRM_DEBUG_KMS("DFP table revision: %d\n", ver);
+ if (ver == 3) {
+ n = RBIOS8(tmds_info + 5) + 1;
+ if (n > 4)
+ n = 4;
+ for (i = 0; i < n; i++) {
+ tmds->tmds_pll[i].value =
+ RBIOS32(tmds_info + i * 10 + 0x08);
+ tmds->tmds_pll[i].freq =
+ RBIOS16(tmds_info + i * 10 + 0x10);
+ DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n",
+ tmds->tmds_pll[i].freq,
+ tmds->tmds_pll[i].value);
+ }
+ } else if (ver == 4) {
+ int stride = 0;
+ n = RBIOS8(tmds_info + 5) + 1;
+ if (n > 4)
+ n = 4;
+ for (i = 0; i < n; i++) {
+ tmds->tmds_pll[i].value =
+ RBIOS32(tmds_info + stride + 0x08);
+ tmds->tmds_pll[i].freq =
+ RBIOS16(tmds_info + stride + 0x10);
+ if (i == 0)
+ stride += 10;
+ else
+ stride += 6;
+ DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n",
+ tmds->tmds_pll[i].freq,
+ tmds->tmds_pll[i].value);
+ }
+ }
+ } else {
+ DRM_INFO("No TMDS info found in BIOS\n");
+ return false;
+ }
+ return true;
+}
+
+bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_i2c_bus_rec i2c_bus;
+
+ /* default for macs */
+ i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+
+ /* XXX some macs have duallink chips */
+ switch (rdev->mode_info.connector_table) {
+ case CT_POWERBOOK_EXTERNAL:
+ case CT_MINI_EXTERNAL:
+ default:
+ tmds->dvo_chip = DVO_SIL164;
+ tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+ break;
+ }
+
+ return true;
+}
+
+bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint16_t offset;
+ uint8_t ver;
+ enum radeon_combios_ddc gpio;
+ struct radeon_i2c_bus_rec i2c_bus;
+
+ tmds->i2c_bus = NULL;
+ if (rdev->flags & RADEON_IS_IGP) {
+ i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ tmds->dvo_chip = DVO_SIL164;
+ tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+ } else {
+ offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+ if (offset) {
+ ver = RBIOS8(offset);
+ DRM_DEBUG_KMS("External TMDS Table revision: %d\n", ver);
+ tmds->slave_addr = RBIOS8(offset + 4 + 2);
+ tmds->slave_addr >>= 1; /* 7 bit addressing */
+ gpio = RBIOS8(offset + 4 + 3);
+ if (gpio == DDC_LCD) {
+ /* MM i2c */
+ i2c_bus.valid = true;
+ i2c_bus.hw_capable = true;
+ i2c_bus.mm_i2c = true;
+ i2c_bus.i2c_id = 0xa0;
+ } else
+ i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0);
+ tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ }
+ }
+
+ if (!tmds->i2c_bus) {
+ DRM_INFO("No valid Ext TMDS info found in BIOS\n");
+ return false;
+ }
+
+ return true;
+}
+
+bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_i2c_bus_rec ddc_i2c;
+ struct radeon_hpd hpd;
+
+ rdev->mode_info.connector_table = radeon_connector_table;
+ if (rdev->mode_info.connector_table == CT_NONE) {
+#ifdef CONFIG_PPC_PMAC
+ if (of_machine_is_compatible("PowerBook3,3")) {
+ /* powerbook with VGA */
+ rdev->mode_info.connector_table = CT_POWERBOOK_VGA;
+ } else if (of_machine_is_compatible("PowerBook3,4") ||
+ of_machine_is_compatible("PowerBook3,5")) {
+ /* powerbook with internal tmds */
+ rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL;
+ } else if (of_machine_is_compatible("PowerBook5,1") ||
+ of_machine_is_compatible("PowerBook5,2") ||
+ of_machine_is_compatible("PowerBook5,3") ||
+ of_machine_is_compatible("PowerBook5,4") ||
+ of_machine_is_compatible("PowerBook5,5")) {
+ /* powerbook with external single link tmds (sil164) */
+ rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+ } else if (of_machine_is_compatible("PowerBook5,6")) {
+ /* powerbook with external dual or single link tmds */
+ rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+ } else if (of_machine_is_compatible("PowerBook5,7") ||
+ of_machine_is_compatible("PowerBook5,8") ||
+ of_machine_is_compatible("PowerBook5,9")) {
+ /* PowerBook6,2 ? */
+ /* powerbook with external dual link tmds (sil1178?) */
+ rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+ } else if (of_machine_is_compatible("PowerBook4,1") ||
+ of_machine_is_compatible("PowerBook4,2") ||
+ of_machine_is_compatible("PowerBook4,3") ||
+ of_machine_is_compatible("PowerBook6,3") ||
+ of_machine_is_compatible("PowerBook6,5") ||
+ of_machine_is_compatible("PowerBook6,7")) {
+ /* ibook */
+ rdev->mode_info.connector_table = CT_IBOOK;
+ } else if (of_machine_is_compatible("PowerMac3,5")) {
+ /* PowerMac G4 Silver radeon 7500 */
+ rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
+ } else if (of_machine_is_compatible("PowerMac4,4")) {
+ /* emac */
+ rdev->mode_info.connector_table = CT_EMAC;
+ } else if (of_machine_is_compatible("PowerMac10,1")) {
+ /* mini with internal tmds */
+ rdev->mode_info.connector_table = CT_MINI_INTERNAL;
+ } else if (of_machine_is_compatible("PowerMac10,2")) {
+ /* mini with external tmds */
+ rdev->mode_info.connector_table = CT_MINI_EXTERNAL;
+ } else if (of_machine_is_compatible("PowerMac12,1")) {
+ /* PowerMac8,1 ? */
+ /* imac g5 isight */
+ rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
+ } else if ((rdev->pdev->device == 0x4a48) &&
+ (rdev->pdev->subsystem_vendor == 0x1002) &&
+ (rdev->pdev->subsystem_device == 0x4a48)) {
+ /* Mac X800 */
+ rdev->mode_info.connector_table = CT_MAC_X800;
+ } else if ((of_machine_is_compatible("PowerMac7,2") ||
+ of_machine_is_compatible("PowerMac7,3")) &&
+ (rdev->pdev->device == 0x4150) &&
+ (rdev->pdev->subsystem_vendor == 0x1002) &&
+ (rdev->pdev->subsystem_device == 0x4150)) {
+ /* Mac G5 tower 9600 */
+ rdev->mode_info.connector_table = CT_MAC_G5_9600;
+ } else if ((rdev->pdev->device == 0x4c66) &&
+ (rdev->pdev->subsystem_vendor == 0x1002) &&
+ (rdev->pdev->subsystem_device == 0x4c66)) {
+ /* SAM440ep RV250 embedded board */
+ rdev->mode_info.connector_table = CT_SAM440EP;
+ } else
+#endif /* CONFIG_PPC_PMAC */
+#ifdef CONFIG_PPC64
+ if (ASIC_IS_RN50(rdev))
+ rdev->mode_info.connector_table = CT_RN50_POWER;
+ else
+#endif
+ rdev->mode_info.connector_table = CT_GENERIC;
+ }
+
+ switch (rdev->mode_info.connector_table) {
+ case CT_GENERIC:
+ DRM_INFO("Connector Table: %d (generic)\n",
+ rdev->mode_info.connector_table);
+ /* these are the most common settings */
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ } else if (rdev->flags & RADEON_IS_MOBILITY) {
+ /* LVDS */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ 0),
+ ATOM_DEVICE_LCD1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ DRM_MODE_CONNECTOR_LVDS,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
+
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ } else {
+ /* DVI-I - tv dac, int tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_1;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ }
+
+ if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
+ /* TV - tv dac */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2,
+ ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ }
+ break;
+ case CT_IBOOK:
+ DRM_INFO("Connector Table: %d (ibook)\n",
+ rdev->mode_info.connector_table);
+ /* LVDS */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ 0),
+ ATOM_DEVICE_LCD1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
+ /* VGA - TV DAC */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
+ case CT_POWERBOOK_EXTERNAL:
+ DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
+ rdev->mode_info.connector_table);
+ /* LVDS */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ 0),
+ ATOM_DEVICE_LCD1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
+ /* DVI-I - primary dac, ext tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_2; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP2_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP2_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ /* XXX some are SL */
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_DFP2_SUPPORT |
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
+ case CT_POWERBOOK_INTERNAL:
+ DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
+ rdev->mode_info.connector_table);
+ /* LVDS */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ 0),
+ ATOM_DEVICE_LCD1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
+ /* DVI-I - primary dac, int tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
+ case CT_POWERBOOK_VGA:
+ DRM_INFO("Connector Table: %d (powerbook vga)\n",
+ rdev->mode_info.connector_table);
+ /* LVDS */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ 0),
+ ATOM_DEVICE_LCD1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
+ case CT_MINI_EXTERNAL:
+ DRM_INFO("Connector Table: %d (mini external tmds)\n",
+ rdev->mode_info.connector_table);
+ /* DVI-I - tv dac, ext tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+ hpd.hpd = RADEON_HPD_2; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP2_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP2_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ /* XXX are any DL? */
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_DFP2_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
+ case CT_MINI_INTERNAL:
+ DRM_INFO("Connector Table: %d (mini internal tmds)\n",
+ rdev->mode_info.connector_table);
+ /* DVI-I - tv dac, int tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
+ case CT_IMAC_G5_ISIGHT:
+ DRM_INFO("Connector Table: %d (imac g5 isight)\n",
+ rdev->mode_info.connector_table);
+ /* DVI-D - int tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
+ DRM_MODE_CONNECTOR_DVID, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
+ &hpd);
+ /* VGA - tv dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
+ case CT_EMAC:
+ DRM_INFO("Connector Table: %d (emac)\n",
+ rdev->mode_info.connector_table);
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ /* VGA - tv dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
+ case CT_RN50_POWER:
+ DRM_INFO("Connector Table: %d (rn50-power)\n",
+ rdev->mode_info.connector_table);
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ break;
+ case CT_MAC_X800:
+ DRM_INFO("Connector Table: %d (mac x800)\n",
+ rdev->mode_info.connector_table);
+ /* DVI - primary dac, internal tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* DVI - tv dac, dvo */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ hpd.hpd = RADEON_HPD_2; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP2_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP2_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_DFP2_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+ &hpd);
+ break;
+ case CT_MAC_G5_9600:
+ DRM_INFO("Connector Table: %d (mac g5 9600)\n",
+ rdev->mode_info.connector_table);
+ /* DVI - tv dac, dvo */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP2_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP2_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_DFP2_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* ADC - primary dac, internal tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_2; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
+ case CT_SAM440EP:
+ DRM_INFO("Connector Table: %d (SAM440ep embedded board)\n",
+ rdev->mode_info.connector_table);
+ /* LVDS */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ 0),
+ ATOM_DEVICE_LCD1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
+ /* DVI-I - secondary dac, int tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 3, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
+ case CT_MAC_G4_SILVER:
+ DRM_INFO("Connector Table: %d (mac g4 silver)\n",
+ rdev->mode_info.connector_table);
+ /* DVI-I - tv dac, int tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
+ default:
+ DRM_INFO("Connector table: %d (invalid)\n",
+ rdev->mode_info.connector_table);
+ return false;
+ }
+
+ radeon_link_encoder_connector(dev);
+
+ return true;
+}
+
+static bool radeon_apply_legacy_quirks(struct drm_device *dev,
+ int bios_index,
+ enum radeon_combios_connector
+ *legacy_connector,
+ struct radeon_i2c_bus_rec *ddc_i2c,
+ struct radeon_hpd *hpd)
+{
+
+ /* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
+ one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
+ if (dev->pdev->device == 0x515e &&
+ dev->pdev->subsystem_vendor == 0x1014) {
+ if (*legacy_connector == CONNECTOR_CRT_LEGACY &&
+ ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
+ return false;
+ }
+
+ /* X300 card with extra non-existent DVI port */
+ if (dev->pdev->device == 0x5B60 &&
+ dev->pdev->subsystem_vendor == 0x17af &&
+ dev->pdev->subsystem_device == 0x201e && bios_index == 2) {
+ if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
+ return false;
+ }
+
+ return true;
+}
+
+static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev)
+{
+ /* Acer 5102 has non-existent TV port */
+ if (dev->pdev->device == 0x5975 &&
+ dev->pdev->subsystem_vendor == 0x1025 &&
+ dev->pdev->subsystem_device == 0x009f)
+ return false;
+
+ /* HP dc5750 has non-existent TV port */
+ if (dev->pdev->device == 0x5974 &&
+ dev->pdev->subsystem_vendor == 0x103c &&
+ dev->pdev->subsystem_device == 0x280a)
+ return false;
+
+ /* MSI S270 has non-existent TV port */
+ if (dev->pdev->device == 0x5955 &&
+ dev->pdev->subsystem_vendor == 0x1462 &&
+ dev->pdev->subsystem_device == 0x0131)
+ return false;
+
+ return true;
+}
+
+static uint16_t combios_check_dl_dvi(struct drm_device *dev, int is_dvi_d)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t ext_tmds_info;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (is_dvi_d)
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+ else
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+ }
+ ext_tmds_info = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+ if (ext_tmds_info) {
+ uint8_t rev = RBIOS8(ext_tmds_info);
+ uint8_t flags = RBIOS8(ext_tmds_info + 4 + 5);
+ if (rev >= 3) {
+ if (is_dvi_d)
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+ else
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+ } else {
+ if (flags & 1) {
+ if (is_dvi_d)
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+ else
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+ }
+ }
+ }
+ if (is_dvi_d)
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+ else
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+}
+
+bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t conn_info, entry, devices;
+ uint16_t tmp, connector_object_id;
+ enum radeon_combios_ddc ddc_type;
+ enum radeon_combios_connector connector;
+ int i = 0;
+ struct radeon_i2c_bus_rec ddc_i2c;
+ struct radeon_hpd hpd;
+
+ conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE);
+ if (conn_info) {
+ for (i = 0; i < 4; i++) {
+ entry = conn_info + 2 + i * 2;
+
+ if (!RBIOS16(entry))
+ break;
+
+ tmp = RBIOS16(entry);
+
+ connector = (tmp >> 12) & 0xf;
+
+ ddc_type = (tmp >> 8) & 0xf;
+ if (ddc_type == 5)
+ ddc_i2c = radeon_combios_get_i2c_info_from_table(rdev);
+ else
+ ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
+
+ switch (connector) {
+ case CONNECTOR_PROPRIETARY_LEGACY:
+ case CONNECTOR_DVI_I_LEGACY:
+ case CONNECTOR_DVI_D_LEGACY:
+ if ((tmp >> 4) & 0x1)
+ hpd.hpd = RADEON_HPD_2;
+ else
+ hpd.hpd = RADEON_HPD_1;
+ break;
+ default:
+ hpd.hpd = RADEON_HPD_NONE;
+ break;
+ }
+
+ if (!radeon_apply_legacy_quirks(dev, i, &connector,
+ &ddc_i2c, &hpd))
+ continue;
+
+ switch (connector) {
+ case CONNECTOR_PROPRIETARY_LEGACY:
+ if ((tmp >> 4) & 0x1)
+ devices = ATOM_DEVICE_DFP2_SUPPORT;
+ else
+ devices = ATOM_DEVICE_DFP1_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum
+ (dev, devices, 0),
+ devices);
+ radeon_add_legacy_connector(dev, i, devices,
+ legacy_connector_convert
+ [connector],
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
+ &hpd);
+ break;
+ case CONNECTOR_CRT_LEGACY:
+ if (tmp & 0x1) {
+ devices = ATOM_DEVICE_CRT2_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum
+ (dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ } else {
+ devices = ATOM_DEVICE_CRT1_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum
+ (dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ }
+ radeon_add_legacy_connector(dev,
+ i,
+ devices,
+ legacy_connector_convert
+ [connector],
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ break;
+ case CONNECTOR_DVI_I_LEGACY:
+ devices = 0;
+ if (tmp & 0x1) {
+ devices |= ATOM_DEVICE_CRT2_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum
+ (dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ } else {
+ devices |= ATOM_DEVICE_CRT1_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum
+ (dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ }
+ /* RV100 board with external TDMS bit mis-set.
+ * Actually uses internal TMDS, clear the bit.
+ */
+ if (dev->pdev->device == 0x5159 &&
+ dev->pdev->subsystem_vendor == 0x1014 &&
+ dev->pdev->subsystem_device == 0x029A) {
+ tmp &= ~(1 << 4);
+ }
+ if ((tmp >> 4) & 0x1) {
+ devices |= ATOM_DEVICE_DFP2_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum
+ (dev,
+ ATOM_DEVICE_DFP2_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP2_SUPPORT);
+ connector_object_id = combios_check_dl_dvi(dev, 0);
+ } else {
+ devices |= ATOM_DEVICE_DFP1_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum
+ (dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+ }
+ radeon_add_legacy_connector(dev,
+ i,
+ devices,
+ legacy_connector_convert
+ [connector],
+ &ddc_i2c,
+ connector_object_id,
+ &hpd);
+ break;
+ case CONNECTOR_DVI_D_LEGACY:
+ if ((tmp >> 4) & 0x1) {
+ devices = ATOM_DEVICE_DFP2_SUPPORT;
+ connector_object_id = combios_check_dl_dvi(dev, 1);
+ } else {
+ devices = ATOM_DEVICE_DFP1_SUPPORT;
+ connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+ }
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum
+ (dev, devices, 0),
+ devices);
+ radeon_add_legacy_connector(dev, i, devices,
+ legacy_connector_convert
+ [connector],
+ &ddc_i2c,
+ connector_object_id,
+ &hpd);
+ break;
+ case CONNECTOR_CTV_LEGACY:
+ case CONNECTOR_STV_LEGACY:
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum
+ (dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, i,
+ ATOM_DEVICE_TV1_SUPPORT,
+ legacy_connector_convert
+ [connector],
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
+ default:
+ DRM_ERROR("Unknown connector type: %d\n",
+ connector);
+ continue;
+ }
+
+ }
+ } else {
+ uint16_t tmds_info =
+ combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
+ if (tmds_info) {
+ DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n");
+
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_1;
+ radeon_add_legacy_connector(dev,
+ 0,
+ ATOM_DEVICE_CRT1_SUPPORT |
+ ATOM_DEVICE_DFP1_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ } else {
+ uint16_t crt_info =
+ combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+ DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n");
+ if (crt_info) {
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_connector(dev,
+ 0,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ } else {
+ DRM_DEBUG_KMS("No connector info found\n");
+ return false;
+ }
+ }
+ }
+
+ if (rdev->flags & RADEON_IS_MOBILITY || rdev->flags & RADEON_IS_IGP) {
+ uint16_t lcd_info =
+ combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
+ if (lcd_info) {
+ uint16_t lcd_ddc_info =
+ combios_get_table_offset(dev,
+ COMBIOS_LCD_DDC_INFO_TABLE);
+
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ 0),
+ ATOM_DEVICE_LCD1_SUPPORT);
+
+ if (lcd_ddc_info) {
+ ddc_type = RBIOS8(lcd_ddc_info + 2);
+ switch (ddc_type) {
+ case DDC_LCD:
+ ddc_i2c =
+ combios_setup_i2c_bus(rdev,
+ DDC_LCD,
+ RBIOS32(lcd_ddc_info + 3),
+ RBIOS32(lcd_ddc_info + 7));
+ radeon_i2c_add(rdev, &ddc_i2c, "LCD");
+ break;
+ case DDC_GPIO:
+ ddc_i2c =
+ combios_setup_i2c_bus(rdev,
+ DDC_GPIO,
+ RBIOS32(lcd_ddc_info + 3),
+ RBIOS32(lcd_ddc_info + 7));
+ radeon_i2c_add(rdev, &ddc_i2c, "LCD");
+ break;
+ default:
+ ddc_i2c =
+ combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
+ break;
+ }
+ DRM_DEBUG_KMS("LCD DDC Info Table found!\n");
+ } else
+ ddc_i2c.valid = false;
+
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_connector(dev,
+ 5,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ DRM_MODE_CONNECTOR_LVDS,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
+ }
+ }
+
+ /* check TV table */
+ if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
+ uint32_t tv_info =
+ combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+ if (tv_info) {
+ if (RBIOS8(tv_info + 6) == 'T') {
+ if (radeon_apply_legacy_tv_quirks(dev)) {
+ hpd.hpd = RADEON_HPD_NONE;
+ ddc_i2c.valid = false;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum
+ (dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 6,
+ ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ }
+ }
+ }
+ }
+
+ radeon_link_encoder_connector(dev);
+
+ return true;
+}
+
+static const char *thermal_controller_names[] = {
+ "NONE",
+ "lm63",
+ "adm1032",
+};
+
+void radeon_combios_get_power_modes(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ u16 offset, misc, misc2 = 0;
+ u8 rev, blocks, tmp;
+ int state_index = 0;
+ struct radeon_i2c_bus_rec i2c_bus;
+
+ rdev->pm.default_power_state_index = -1;
+
+ /* allocate 2 power states */
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
+ if (rdev->pm.power_state) {
+ /* allocate 1 clock mode per state */
+ rdev->pm.power_state[0].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ rdev->pm.power_state[1].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ if (!rdev->pm.power_state[0].clock_info ||
+ !rdev->pm.power_state[1].clock_info)
+ goto pm_failed;
+ } else
+ goto pm_failed;
+
+ /* check for a thermal chip */
+ offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
+ if (offset) {
+ u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0;
+
+ rev = RBIOS8(offset);
+
+ if (rev == 0) {
+ thermal_controller = RBIOS8(offset + 3);
+ gpio = RBIOS8(offset + 4) & 0x3f;
+ i2c_addr = RBIOS8(offset + 5);
+ } else if (rev == 1) {
+ thermal_controller = RBIOS8(offset + 4);
+ gpio = RBIOS8(offset + 5) & 0x3f;
+ i2c_addr = RBIOS8(offset + 6);
+ } else if (rev == 2) {
+ thermal_controller = RBIOS8(offset + 4);
+ gpio = RBIOS8(offset + 5) & 0x3f;
+ i2c_addr = RBIOS8(offset + 6);
+ clk_bit = RBIOS8(offset + 0xa);
+ data_bit = RBIOS8(offset + 0xb);
+ }
+ if ((thermal_controller > 0) && (thermal_controller < 3)) {
+ DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+ thermal_controller_names[thermal_controller],
+ i2c_addr >> 1);
+ if (gpio == DDC_LCD) {
+ /* MM i2c */
+ i2c_bus.valid = true;
+ i2c_bus.hw_capable = true;
+ i2c_bus.mm_i2c = true;
+ i2c_bus.i2c_id = 0xa0;
+ } else if (gpio == DDC_GPIO)
+ i2c_bus = combios_setup_i2c_bus(rdev, gpio, 1 << clk_bit, 1 << data_bit);
+ else
+ i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0);
+ rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = thermal_controller_names[thermal_controller];
+ info.addr = i2c_addr >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ }
+ }
+ } else {
+ /* boards with a thermal chip, but no overdrive table */
+
+ /* Asus 9600xt has an f75375 on the monid bus */
+ if ((dev->pdev->device == 0x4152) &&
+ (dev->pdev->subsystem_vendor == 0x1043) &&
+ (dev->pdev->subsystem_device == 0xc002)) {
+ i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = "f75375";
+ info.addr = 0x28;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+ name, info.addr);
+ }
+ }
+ }
+
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
+ if (offset) {
+ rev = RBIOS8(offset);
+ blocks = RBIOS8(offset + 0x2);
+ /* power mode 0 tends to be the only valid one */
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk = RBIOS32(offset + 0x5 + 0x2);
+ rdev->pm.power_state[state_index].clock_info[0].sclk = RBIOS32(offset + 0x5 + 0x6);
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ goto default_mode;
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ misc = RBIOS16(offset + 0x5 + 0x0);
+ if (rev > 4)
+ misc2 = RBIOS16(offset + 0x5 + 0xe);
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
+ if (misc & 0x4) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
+ if (misc & 0x8)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = true;
+ if (rev < 6) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
+ RBIOS16(offset + 0x5 + 0xb) * 4;
+ tmp = RBIOS8(offset + 0x5 + 0xd);
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
+ } else {
+ u8 entries = RBIOS8(offset + 0x5 + 0xb);
+ u16 voltage_table_offset = RBIOS16(offset + 0x5 + 0xc);
+ if (entries && voltage_table_offset) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
+ RBIOS16(voltage_table_offset) * 4;
+ tmp = RBIOS8(voltage_table_offset + 0x2);
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
+ } else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = false;
+ }
+ switch ((misc2 & 0x700) >> 8) {
+ case 0:
+ default:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 0;
+ break;
+ case 1:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 33;
+ break;
+ case 2:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 66;
+ break;
+ case 3:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 99;
+ break;
+ case 4:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 132;
+ break;
+ }
+ } else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ if (rev > 6)
+ rdev->pm.power_state[state_index].pcie_lanes =
+ RBIOS8(offset + 0x5 + 0x10);
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ state_index++;
+ } else {
+ /* XXX figure out some good default low power mode for mobility cards w/out power tables */
+ }
+ } else {
+ /* XXX figure out some good default low power mode for desktop cards */
+ }
+
+default_mode:
+ /* add the default mode */
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+ rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
+ if ((state_index > 0) &&
+ (rdev->pm.power_state[0].clock_info[0].voltage.type == VOLTAGE_GPIO))
+ rdev->pm.power_state[state_index].clock_info[0].voltage =
+ rdev->pm.power_state[0].clock_info[0].voltage;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ rdev->pm.power_state[state_index].pcie_lanes = 16;
+ rdev->pm.power_state[state_index].flags = 0;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.num_power_states = state_index + 1;
+
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
+ return;
+
+pm_failed:
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.num_power_states = 0;
+
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
+}
+
+void radeon_external_tmds_setup(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+ if (!tmds)
+ return;
+
+ switch (tmds->dvo_chip) {
+ case DVO_SIL164:
+ /* sil 164 */
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x08, 0x30);
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x09, 0x00);
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x0a, 0x90);
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x0c, 0x89);
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x08, 0x3b);
+ break;
+ case DVO_SIL1178:
+ /* sil 1178 - untested */
+ /*
+ * 0x0f, 0x44
+ * 0x0f, 0x4c
+ * 0x0e, 0x01
+ * 0x0a, 0x80
+ * 0x09, 0x30
+ * 0x0c, 0xc9
+ * 0x0d, 0x70
+ * 0x08, 0x32
+ * 0x08, 0x33
+ */
+ break;
+ default:
+ break;
+ }
+
+}
+
+bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint16_t offset;
+ uint8_t blocks, slave_addr, rev;
+ uint32_t index, id;
+ uint32_t reg, val, and_mask, or_mask;
+ struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+ if (!tmds)
+ return false;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_ON_TABLE);
+ rev = RBIOS8(offset);
+ if (offset) {
+ rev = RBIOS8(offset);
+ if (rev > 1) {
+ blocks = RBIOS8(offset + 3);
+ index = offset + 4;
+ while (blocks > 0) {
+ id = RBIOS16(index);
+ index += 2;
+ switch (id >> 13) {
+ case 0:
+ reg = (id & 0x1fff) * 4;
+ val = RBIOS32(index);
+ index += 4;
+ WREG32(reg, val);
+ break;
+ case 2:
+ reg = (id & 0x1fff) * 4;
+ and_mask = RBIOS32(index);
+ index += 4;
+ or_mask = RBIOS32(index);
+ index += 4;
+ val = RREG32(reg);
+ val = (val & and_mask) | or_mask;
+ WREG32(reg, val);
+ break;
+ case 3:
+ val = RBIOS16(index);
+ index += 2;
+ udelay(val);
+ break;
+ case 4:
+ val = RBIOS16(index);
+ index += 2;
+ mdelay(val);
+ break;
+ case 6:
+ slave_addr = id & 0xff;
+ slave_addr >>= 1; /* 7 bit addressing */
+ index++;
+ reg = RBIOS8(index);
+ index++;
+ val = RBIOS8(index);
+ index++;
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ slave_addr,
+ reg, val);
+ break;
+ default:
+ DRM_ERROR("Unknown id %d\n", id >> 13);
+ break;
+ }
+ blocks--;
+ }
+ return true;
+ }
+ }
+ } else {
+ offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+ if (offset) {
+ index = offset + 10;
+ id = RBIOS16(index);
+ while (id != 0xffff) {
+ index += 2;
+ switch (id >> 13) {
+ case 0:
+ reg = (id & 0x1fff) * 4;
+ val = RBIOS32(index);
+ WREG32(reg, val);
+ break;
+ case 2:
+ reg = (id & 0x1fff) * 4;
+ and_mask = RBIOS32(index);
+ index += 4;
+ or_mask = RBIOS32(index);
+ index += 4;
+ val = RREG32(reg);
+ val = (val & and_mask) | or_mask;
+ WREG32(reg, val);
+ break;
+ case 4:
+ val = RBIOS16(index);
+ index += 2;
+ udelay(val);
+ break;
+ case 5:
+ reg = id & 0x1fff;
+ and_mask = RBIOS32(index);
+ index += 4;
+ or_mask = RBIOS32(index);
+ index += 4;
+ val = RREG32_PLL(reg);
+ val = (val & and_mask) | or_mask;
+ WREG32_PLL(reg, val);
+ break;
+ case 6:
+ reg = id & 0x1fff;
+ val = RBIOS8(index);
+ index += 1;
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ reg, val);
+ break;
+ default:
+ DRM_ERROR("Unknown id %d\n", id >> 13);
+ break;
+ }
+ id = RBIOS16(index);
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (offset) {
+ while (RBIOS16(offset)) {
+ uint16_t cmd = ((RBIOS16(offset) & 0xe000) >> 13);
+ uint32_t addr = (RBIOS16(offset) & 0x1fff);
+ uint32_t val, and_mask, or_mask;
+ uint32_t tmp;
+
+ offset += 2;
+ switch (cmd) {
+ case 0:
+ val = RBIOS32(offset);
+ offset += 4;
+ WREG32(addr, val);
+ break;
+ case 1:
+ val = RBIOS32(offset);
+ offset += 4;
+ WREG32(addr, val);
+ break;
+ case 2:
+ and_mask = RBIOS32(offset);
+ offset += 4;
+ or_mask = RBIOS32(offset);
+ offset += 4;
+ tmp = RREG32(addr);
+ tmp &= and_mask;
+ tmp |= or_mask;
+ WREG32(addr, tmp);
+ break;
+ case 3:
+ and_mask = RBIOS32(offset);
+ offset += 4;
+ or_mask = RBIOS32(offset);
+ offset += 4;
+ tmp = RREG32(addr);
+ tmp &= and_mask;
+ tmp |= or_mask;
+ WREG32(addr, tmp);
+ break;
+ case 4:
+ val = RBIOS16(offset);
+ offset += 2;
+ udelay(val);
+ break;
+ case 5:
+ val = RBIOS16(offset);
+ offset += 2;
+ switch (addr) {
+ case 8:
+ while (val--) {
+ if (!
+ (RREG32_PLL
+ (RADEON_CLK_PWRMGT_CNTL) &
+ RADEON_MC_BUSY))
+ break;
+ }
+ break;
+ case 9:
+ while (val--) {
+ if ((RREG32(RADEON_MC_STATUS) &
+ RADEON_MC_IDLE))
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+}
+
+static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (offset) {
+ while (RBIOS8(offset)) {
+ uint8_t cmd = ((RBIOS8(offset) & 0xc0) >> 6);
+ uint8_t addr = (RBIOS8(offset) & 0x3f);
+ uint32_t val, shift, tmp;
+ uint32_t and_mask, or_mask;
+
+ offset++;
+ switch (cmd) {
+ case 0:
+ val = RBIOS32(offset);
+ offset += 4;
+ WREG32_PLL(addr, val);
+ break;
+ case 1:
+ shift = RBIOS8(offset) * 8;
+ offset++;
+ and_mask = RBIOS8(offset) << shift;
+ and_mask |= ~(0xff << shift);
+ offset++;
+ or_mask = RBIOS8(offset) << shift;
+ offset++;
+ tmp = RREG32_PLL(addr);
+ tmp &= and_mask;
+ tmp |= or_mask;
+ WREG32_PLL(addr, tmp);
+ break;
+ case 2:
+ case 3:
+ tmp = 1000;
+ switch (addr) {
+ case 1:
+ udelay(150);
+ break;
+ case 2:
+ mdelay(1);
+ break;
+ case 3:
+ while (tmp--) {
+ if (!
+ (RREG32_PLL
+ (RADEON_CLK_PWRMGT_CNTL) &
+ RADEON_MC_BUSY))
+ break;
+ }
+ break;
+ case 4:
+ while (tmp--) {
+ if (RREG32_PLL
+ (RADEON_CLK_PWRMGT_CNTL) &
+ RADEON_DLL_READY)
+ break;
+ }
+ break;
+ case 5:
+ tmp =
+ RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
+ if (tmp & RADEON_CG_NO1_DEBUG_0) {
+#if 0
+ uint32_t mclk_cntl =
+ RREG32_PLL
+ (RADEON_MCLK_CNTL);
+ mclk_cntl &= 0xffff0000;
+ /*mclk_cntl |= 0x00001111;*//* ??? */
+ WREG32_PLL(RADEON_MCLK_CNTL,
+ mclk_cntl);
+ mdelay(10);
+#endif
+ WREG32_PLL
+ (RADEON_CLK_PWRMGT_CNTL,
+ tmp &
+ ~RADEON_CG_NO1_DEBUG_0);
+ mdelay(10);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+}
+
+static void combios_parse_ram_reset_table(struct drm_device *dev,
+ uint16_t offset)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t tmp;
+
+ if (offset) {
+ uint8_t val = RBIOS8(offset);
+ while (val != 0xff) {
+ offset++;
+
+ if (val == 0x0f) {
+ uint32_t channel_complete_mask;
+
+ if (ASIC_IS_R300(rdev))
+ channel_complete_mask =
+ R300_MEM_PWRUP_COMPLETE;
+ else
+ channel_complete_mask =
+ RADEON_MEM_PWRUP_COMPLETE;
+ tmp = 20000;
+ while (tmp--) {
+ if ((RREG32(RADEON_MEM_STR_CNTL) &
+ channel_complete_mask) ==
+ channel_complete_mask)
+ break;
+ }
+ } else {
+ uint32_t or_mask = RBIOS16(offset);
+ offset += 2;
+
+ tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+ tmp &= RADEON_SDRAM_MODE_MASK;
+ tmp |= or_mask;
+ WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp);
+
+ or_mask = val << 24;
+ tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+ tmp &= RADEON_B3MEM_RESET_MASK;
+ tmp |= or_mask;
+ WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp);
+ }
+ val = RBIOS8(offset);
+ }
+ }
+}
+
+static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
+ int mem_addr_mapping)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t mem_cntl;
+ uint32_t mem_size;
+ uint32_t addr = 0;
+
+ mem_cntl = RREG32(RADEON_MEM_CNTL);
+ if (mem_cntl & RV100_HALF_MODE)
+ ram /= 2;
+ mem_size = ram;
+ mem_cntl &= ~(0xff << 8);
+ mem_cntl |= (mem_addr_mapping & 0xff) << 8;
+ WREG32(RADEON_MEM_CNTL, mem_cntl);
+ RREG32(RADEON_MEM_CNTL);
+
+ /* sdram reset ? */
+
+ /* something like this???? */
+ while (ram--) {
+ addr = ram * 1024 * 1024;
+ /* write to each page */
+ WREG32_IDX((addr) | RADEON_MM_APER, 0xdeadbeef);
+ /* read back and verify */
+ if (RREG32_IDX((addr) | RADEON_MM_APER) != 0xdeadbeef)
+ return 0;
+ }
+
+ return mem_size;
+}
+
+static void combios_write_ram_size(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint8_t rev;
+ uint16_t offset;
+ uint32_t mem_size = 0;
+ uint32_t mem_cntl = 0;
+
+ /* should do something smarter here I guess... */
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ /* first check detected mem table */
+ offset = combios_get_table_offset(dev, COMBIOS_DETECTED_MEM_TABLE);
+ if (offset) {
+ rev = RBIOS8(offset);
+ if (rev < 3) {
+ mem_cntl = RBIOS32(offset + 1);
+ mem_size = RBIOS16(offset + 5);
+ if ((rdev->family < CHIP_R200) &&
+ !ASIC_IS_RN50(rdev))
+ WREG32(RADEON_MEM_CNTL, mem_cntl);
+ }
+ }
+
+ if (!mem_size) {
+ offset =
+ combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE);
+ if (offset) {
+ rev = RBIOS8(offset - 1);
+ if (rev < 1) {
+ if ((rdev->family < CHIP_R200)
+ && !ASIC_IS_RN50(rdev)) {
+ int ram = 0;
+ int mem_addr_mapping = 0;
+
+ while (RBIOS8(offset)) {
+ ram = RBIOS8(offset);
+ mem_addr_mapping =
+ RBIOS8(offset + 1);
+ if (mem_addr_mapping != 0x25)
+ ram *= 2;
+ mem_size =
+ combios_detect_ram(dev, ram,
+ mem_addr_mapping);
+ if (mem_size)
+ break;
+ offset += 2;
+ }
+ } else
+ mem_size = RBIOS8(offset);
+ } else {
+ mem_size = RBIOS8(offset);
+ mem_size *= 2; /* convert to MB */
+ }
+ }
+ }
+
+ mem_size *= (1024 * 1024); /* convert to bytes */
+ WREG32(RADEON_CONFIG_MEMSIZE, mem_size);
+}
+
+void radeon_combios_asic_init(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint16_t table;
+
+ /* port hardcoded mac stuff from radeonfb */
+ if (rdev->bios == NULL)
+ return;
+
+ /* ASIC INIT 1 */
+ table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_1_TABLE);
+ if (table)
+ combios_parse_mmio_table(dev, table);
+
+ /* PLL INIT */
+ table = combios_get_table_offset(dev, COMBIOS_PLL_INIT_TABLE);
+ if (table)
+ combios_parse_pll_table(dev, table);
+
+ /* ASIC INIT 2 */
+ table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_2_TABLE);
+ if (table)
+ combios_parse_mmio_table(dev, table);
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ /* ASIC INIT 4 */
+ table =
+ combios_get_table_offset(dev, COMBIOS_ASIC_INIT_4_TABLE);
+ if (table)
+ combios_parse_mmio_table(dev, table);
+
+ /* RAM RESET */
+ table = combios_get_table_offset(dev, COMBIOS_RAM_RESET_TABLE);
+ if (table)
+ combios_parse_ram_reset_table(dev, table);
+
+ /* ASIC INIT 3 */
+ table =
+ combios_get_table_offset(dev, COMBIOS_ASIC_INIT_3_TABLE);
+ if (table)
+ combios_parse_mmio_table(dev, table);
+
+ /* write CONFIG_MEMSIZE */
+ combios_write_ram_size(dev);
+ }
+
+ /* quirk for rs4xx HP nx6125 laptop to make it resume
+ * - it hangs on resume inside the dynclk 1 table.
+ */
+ if (rdev->family == CHIP_RS480 &&
+ rdev->pdev->subsystem_vendor == 0x103c &&
+ rdev->pdev->subsystem_device == 0x308b)
+ return;
+
+ /* quirk for rs4xx HP dv5000 laptop to make it resume
+ * - it hangs on resume inside the dynclk 1 table.
+ */
+ if (rdev->family == CHIP_RS480 &&
+ rdev->pdev->subsystem_vendor == 0x103c &&
+ rdev->pdev->subsystem_device == 0x30a4)
+ return;
+
+ /* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume
+ * - it hangs on resume inside the dynclk 1 table.
+ */
+ if (rdev->family == CHIP_RS480 &&
+ rdev->pdev->subsystem_vendor == 0x103c &&
+ rdev->pdev->subsystem_device == 0x30ae)
+ return;
+
+ /* DYN CLK 1 */
+ table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
+ if (table)
+ combios_parse_pll_table(dev, table);
+
+}
+
+void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t bios_0_scratch, bios_6_scratch, bios_7_scratch;
+
+ bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+ bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+ bios_7_scratch = RREG32(RADEON_BIOS_7_SCRATCH);
+
+ /* let the bios control the backlight */
+ bios_0_scratch &= ~RADEON_DRIVER_BRIGHTNESS_EN;
+
+ /* tell the bios not to handle mode switching */
+ bios_6_scratch |= (RADEON_DISPLAY_SWITCHING_DIS |
+ RADEON_ACC_MODE_CHANGE);
+
+ /* tell the bios a driver is loaded */
+ bios_7_scratch |= RADEON_DRV_LOADED;
+
+ WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch);
+ WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+ WREG32(RADEON_BIOS_7_SCRATCH, bios_7_scratch);
+}
+
+void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t bios_6_scratch;
+
+ bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+ if (lock)
+ bios_6_scratch |= RADEON_DRIVER_CRITICAL;
+ else
+ bios_6_scratch &= ~RADEON_DRIVER_CRITICAL;
+
+ WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}
+
+void
+radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+ struct drm_encoder *encoder,
+ bool connected)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector =
+ to_radeon_connector(connector);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t bios_4_scratch = RREG32(RADEON_BIOS_4_SCRATCH);
+ uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH);
+
+ if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("TV1 connected\n");
+ /* fix me */
+ bios_4_scratch |= RADEON_TV1_ATTACHED_SVIDEO;
+ /*save->bios_4_scratch |= RADEON_TV1_ATTACHED_COMP; */
+ bios_5_scratch |= RADEON_TV1_ON;
+ bios_5_scratch |= RADEON_ACC_REQ_TV1;
+ } else {
+ DRM_DEBUG_KMS("TV1 disconnected\n");
+ bios_4_scratch &= ~RADEON_TV1_ATTACHED_MASK;
+ bios_5_scratch &= ~RADEON_TV1_ON;
+ bios_5_scratch &= ~RADEON_ACC_REQ_TV1;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("LCD1 connected\n");
+ bios_4_scratch |= RADEON_LCD1_ATTACHED;
+ bios_5_scratch |= RADEON_LCD1_ON;
+ bios_5_scratch |= RADEON_ACC_REQ_LCD1;
+ } else {
+ DRM_DEBUG_KMS("LCD1 disconnected\n");
+ bios_4_scratch &= ~RADEON_LCD1_ATTACHED;
+ bios_5_scratch &= ~RADEON_LCD1_ON;
+ bios_5_scratch &= ~RADEON_ACC_REQ_LCD1;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("CRT1 connected\n");
+ bios_4_scratch |= RADEON_CRT1_ATTACHED_COLOR;
+ bios_5_scratch |= RADEON_CRT1_ON;
+ bios_5_scratch |= RADEON_ACC_REQ_CRT1;
+ } else {
+ DRM_DEBUG_KMS("CRT1 disconnected\n");
+ bios_4_scratch &= ~RADEON_CRT1_ATTACHED_MASK;
+ bios_5_scratch &= ~RADEON_CRT1_ON;
+ bios_5_scratch &= ~RADEON_ACC_REQ_CRT1;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("CRT2 connected\n");
+ bios_4_scratch |= RADEON_CRT2_ATTACHED_COLOR;
+ bios_5_scratch |= RADEON_CRT2_ON;
+ bios_5_scratch |= RADEON_ACC_REQ_CRT2;
+ } else {
+ DRM_DEBUG_KMS("CRT2 disconnected\n");
+ bios_4_scratch &= ~RADEON_CRT2_ATTACHED_MASK;
+ bios_5_scratch &= ~RADEON_CRT2_ON;
+ bios_5_scratch &= ~RADEON_ACC_REQ_CRT2;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("DFP1 connected\n");
+ bios_4_scratch |= RADEON_DFP1_ATTACHED;
+ bios_5_scratch |= RADEON_DFP1_ON;
+ bios_5_scratch |= RADEON_ACC_REQ_DFP1;
+ } else {
+ DRM_DEBUG_KMS("DFP1 disconnected\n");
+ bios_4_scratch &= ~RADEON_DFP1_ATTACHED;
+ bios_5_scratch &= ~RADEON_DFP1_ON;
+ bios_5_scratch &= ~RADEON_ACC_REQ_DFP1;
+ }
+ }
+ if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("DFP2 connected\n");
+ bios_4_scratch |= RADEON_DFP2_ATTACHED;
+ bios_5_scratch |= RADEON_DFP2_ON;
+ bios_5_scratch |= RADEON_ACC_REQ_DFP2;
+ } else {
+ DRM_DEBUG_KMS("DFP2 disconnected\n");
+ bios_4_scratch &= ~RADEON_DFP2_ATTACHED;
+ bios_5_scratch &= ~RADEON_DFP2_ON;
+ bios_5_scratch &= ~RADEON_ACC_REQ_DFP2;
+ }
+ }
+ WREG32(RADEON_BIOS_4_SCRATCH, bios_4_scratch);
+ WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch);
+}
+
+void
+radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH);
+
+ if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+ bios_5_scratch &= ~RADEON_TV1_CRTC_MASK;
+ bios_5_scratch |= (crtc << RADEON_TV1_CRTC_SHIFT);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+ bios_5_scratch &= ~RADEON_CRT1_CRTC_MASK;
+ bios_5_scratch |= (crtc << RADEON_CRT1_CRTC_SHIFT);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+ bios_5_scratch &= ~RADEON_CRT2_CRTC_MASK;
+ bios_5_scratch |= (crtc << RADEON_CRT2_CRTC_SHIFT);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+ bios_5_scratch &= ~RADEON_LCD1_CRTC_MASK;
+ bios_5_scratch |= (crtc << RADEON_LCD1_CRTC_SHIFT);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+ bios_5_scratch &= ~RADEON_DFP1_CRTC_MASK;
+ bios_5_scratch |= (crtc << RADEON_DFP1_CRTC_SHIFT);
+ }
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+ bios_5_scratch &= ~RADEON_DFP2_CRTC_MASK;
+ bios_5_scratch |= (crtc << RADEON_DFP2_CRTC_SHIFT);
+ }
+ WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch);
+}
+
+void
+radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
+ if (on)
+ bios_6_scratch |= RADEON_TV_DPMS_ON;
+ else
+ bios_6_scratch &= ~RADEON_TV_DPMS_ON;
+ }
+ if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+ if (on)
+ bios_6_scratch |= RADEON_CRT_DPMS_ON;
+ else
+ bios_6_scratch &= ~RADEON_CRT_DPMS_ON;
+ }
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (on)
+ bios_6_scratch |= RADEON_LCD_DPMS_ON;
+ else
+ bios_6_scratch &= ~RADEON_LCD_DPMS_ON;
+ }
+ if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (on)
+ bios_6_scratch |= RADEON_DFP_DPMS_ON;
+ else
+ bios_6_scratch &= ~RADEON_DFP_DPMS_ON;
+ }
+ WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
new file mode 100644
index 0000000..5a87c9f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -0,0 +1,2051 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+
+extern void
+radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+ struct drm_encoder *encoder,
+ bool connected);
+extern void
+radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+ struct drm_encoder *encoder,
+ bool connected);
+
+void radeon_connector_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ /* bail if the connector does not have hpd pin, e.g.,
+ * VGA, TV, etc.
+ */
+ if (radeon_connector->hpd.hpd == RADEON_HPD_NONE)
+ return;
+
+ radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+
+ /* if the connector is already off, don't turn it back on */
+ if (connector->dpms != DRM_MODE_DPMS_ON)
+ return;
+
+ /* just deal with DP (not eDP) here. */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ /* if existing sink type was not DP no need to retrain */
+ if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ return;
+
+ /* first get sink type as it may be reset after (un)plug */
+ dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+ /* don't do anything if sink is not display port, i.e.,
+ * passive dp->(dvi|hdmi) adaptor
+ */
+ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ int saved_dpms = connector->dpms;
+ /* Only turn off the display if it's physically disconnected */
+ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ } else if (radeon_dp_needs_link_train(radeon_connector)) {
+ /* set it to OFF so that drm_helper_connector_dpms()
+ * won't return immediately since the current state
+ * is ON at this point.
+ */
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
+ connector->dpms = saved_dpms;
+ }
+ }
+}
+
+static void radeon_property_change_mode(struct drm_encoder *encoder)
+{
+ struct drm_crtc *crtc = encoder->crtc;
+
+ if (crtc && crtc->enabled) {
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
+ }
+}
+
+int radeon_get_monitor_bpc(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector;
+ int bpc = 8;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ if (radeon_connector->use_digital) {
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc;
+ }
+ }
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc;
+ }
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ dig_connector = radeon_connector->con_priv;
+ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
+ drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc;
+ }
+ break;
+ case DRM_MODE_CONNECTOR_eDP:
+ case DRM_MODE_CONNECTOR_LVDS:
+ if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc;
+ else if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+ struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+ struct drm_encoder *encoder = connector_funcs->best_encoder(connector);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ if (dig->lcd_misc & ATOM_PANEL_MISC_V13_6BIT_PER_COLOR)
+ bpc = 6;
+ else if (dig->lcd_misc & ATOM_PANEL_MISC_V13_8BIT_PER_COLOR)
+ bpc = 8;
+ }
+ break;
+ }
+ return bpc;
+}
+
+static void
+radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *best_encoder = NULL;
+ struct drm_encoder *encoder = NULL;
+ struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+ struct drm_mode_object *obj;
+ bool connected;
+ int i;
+
+ best_encoder = connector_funcs->best_encoder(connector);
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ obj = drm_mode_object_find(connector->dev,
+ connector->encoder_ids[i],
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
+
+ encoder = obj_to_encoder(obj);
+
+ if ((encoder == best_encoder) && (status == connector_status_connected))
+ connected = true;
+ else
+ connected = false;
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_connected_scratch_regs(connector, encoder, connected);
+ else
+ radeon_combios_connected_scratch_regs(connector, encoder, connected);
+
+ }
+}
+
+static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
+{
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+ int i;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
+
+ encoder = obj_to_encoder(obj);
+ if (encoder->encoder_type == encoder_type)
+ return encoder;
+ }
+ return NULL;
+}
+
+static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+/*
+ * radeon_connector_analog_encoder_conflict_solve
+ * - search for other connectors sharing this encoder
+ * if priority is true, then set them disconnected if this is connected
+ * if priority is false, set us disconnected if they are connected
+ */
+static enum drm_connector_status
+radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
+ struct drm_encoder *encoder,
+ enum drm_connector_status current_status,
+ bool priority)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_connector *conflict;
+ struct radeon_connector *radeon_conflict;
+ int i;
+
+ list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
+ if (conflict == connector)
+ continue;
+
+ radeon_conflict = to_radeon_connector(conflict);
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (conflict->encoder_ids[i] == 0)
+ break;
+
+ /* if the IDs match */
+ if (conflict->encoder_ids[i] == encoder->base.id) {
+ if (conflict->status != connector_status_connected)
+ continue;
+
+ if (radeon_conflict->use_digital)
+ continue;
+
+ if (priority == true) {
+ DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
+ DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
+ conflict->status = connector_status_disconnected;
+ radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
+ } else {
+ DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
+ DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
+ current_status = connector_status_disconnected;
+ }
+ break;
+ }
+ }
+ }
+ return current_status;
+
+}
+
+static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+ if (native_mode->hdisplay != 0 &&
+ native_mode->vdisplay != 0 &&
+ native_mode->clock != 0) {
+ mode = drm_mode_duplicate(dev, native_mode);
+ mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+ drm_mode_set_name(mode);
+
+ DRM_DEBUG_KMS("Adding native panel mode %s\n", mode->name);
+ } else if (native_mode->hdisplay != 0 &&
+ native_mode->vdisplay != 0) {
+ /* mac laptops without an edid */
+ /* Note that this is not necessarily the exact panel mode,
+ * but an approximation based on the cvt formula. For these
+ * systems we should ideally read the mode info out of the
+ * registers or add a mode table, but this works and is much
+ * simpler.
+ */
+ mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+ mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+ DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
+ }
+ return mode;
+}
+
+static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+ int i;
+ struct mode_size {
+ int w;
+ int h;
+ } common_modes[17] = {
+ { 640, 480},
+ { 720, 480},
+ { 800, 600},
+ { 848, 480},
+ {1024, 768},
+ {1152, 768},
+ {1280, 720},
+ {1280, 800},
+ {1280, 854},
+ {1280, 960},
+ {1280, 1024},
+ {1440, 900},
+ {1400, 1050},
+ {1680, 1050},
+ {1600, 1200},
+ {1920, 1080},
+ {1920, 1200}
+ };
+
+ for (i = 0; i < 17; i++) {
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
+ if (common_modes[i].w > 1024 ||
+ common_modes[i].h > 768)
+ continue;
+ }
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (common_modes[i].w > native_mode->hdisplay ||
+ common_modes[i].h > native_mode->vdisplay ||
+ (common_modes[i].w == native_mode->hdisplay &&
+ common_modes[i].h == native_mode->vdisplay))
+ continue;
+ }
+ if (common_modes[i].w < 320 || common_modes[i].h < 200)
+ continue;
+
+ mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+ drm_mode_probed_add(connector, mode);
+ }
+}
+
+static int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+
+ if (property == rdev->mode_info.coherent_mode_property) {
+ struct radeon_encoder_atom_dig *dig;
+ bool new_coherent_mode;
+
+ /* need to find digital encoder on connector */
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+ if (!encoder)
+ return 0;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (!radeon_encoder->enc_priv)
+ return 0;
+
+ dig = radeon_encoder->enc_priv;
+ new_coherent_mode = val ? true : false;
+ if (dig->coherent_mode != new_coherent_mode) {
+ dig->coherent_mode = new_coherent_mode;
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+ }
+
+ if (property == rdev->mode_info.underscan_property) {
+ /* need to find digital encoder on connector */
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+ if (!encoder)
+ return 0;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->underscan_type != val) {
+ radeon_encoder->underscan_type = val;
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+ }
+
+ if (property == rdev->mode_info.underscan_hborder_property) {
+ /* need to find digital encoder on connector */
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+ if (!encoder)
+ return 0;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->underscan_hborder != val) {
+ radeon_encoder->underscan_hborder = val;
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+ }
+
+ if (property == rdev->mode_info.underscan_vborder_property) {
+ /* need to find digital encoder on connector */
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+ if (!encoder)
+ return 0;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->underscan_vborder != val) {
+ radeon_encoder->underscan_vborder = val;
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+ }
+
+ if (property == rdev->mode_info.tv_std_property) {
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC);
+ if (!encoder) {
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_DAC);
+ }
+
+ if (!encoder)
+ return 0;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+ if (!radeon_encoder->enc_priv)
+ return 0;
+ if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) {
+ struct radeon_encoder_atom_dac *dac_int;
+ dac_int = radeon_encoder->enc_priv;
+ dac_int->tv_std = val;
+ } else {
+ struct radeon_encoder_tv_dac *dac_int;
+ dac_int = radeon_encoder->enc_priv;
+ dac_int->tv_std = val;
+ }
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+
+ if (property == rdev->mode_info.load_detect_property) {
+ struct radeon_connector *radeon_connector =
+ to_radeon_connector(connector);
+
+ if (val == 0)
+ radeon_connector->dac_load_detect = false;
+ else
+ radeon_connector->dac_load_detect = true;
+ }
+
+ if (property == rdev->mode_info.tmds_pll_property) {
+ struct radeon_encoder_int_tmds *tmds = NULL;
+ bool ret = false;
+ /* need to find digital encoder on connector */
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+ if (!encoder)
+ return 0;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ tmds = radeon_encoder->enc_priv;
+ if (!tmds)
+ return 0;
+
+ if (val == 0) {
+ if (rdev->is_atom_bios)
+ ret = radeon_atombios_get_tmds_info(radeon_encoder, tmds);
+ else
+ ret = radeon_legacy_get_tmds_info_from_combios(radeon_encoder, tmds);
+ }
+ if (val == 1 || ret == false) {
+ radeon_legacy_get_tmds_info_from_table(radeon_encoder, tmds);
+ }
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+
+ return 0;
+}
+
+static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+ struct drm_display_mode *t, *mode;
+
+ /* If the EDID preferred mode doesn't match the native mode, use it */
+ list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+ if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+ if (mode->hdisplay != native_mode->hdisplay ||
+ mode->vdisplay != native_mode->vdisplay)
+ memcpy(native_mode, mode, sizeof(*mode));
+ }
+ }
+
+ /* Try to get native mode details from EDID if necessary */
+ if (!native_mode->clock) {
+ list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+ if (mode->hdisplay == native_mode->hdisplay &&
+ mode->vdisplay == native_mode->vdisplay) {
+ *native_mode = *mode;
+ drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
+ DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
+ break;
+ }
+ }
+ }
+
+ if (!native_mode->clock) {
+ DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
+ radeon_encoder->rmx_type = RMX_OFF;
+ }
+}
+
+static int radeon_lvds_get_modes(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_encoder *encoder;
+ int ret = 0;
+ struct drm_display_mode *mode;
+
+ if (radeon_connector->ddc_bus) {
+ ret = radeon_ddc_get_modes(radeon_connector);
+ if (ret > 0) {
+ encoder = radeon_best_single_encoder(connector);
+ if (encoder) {
+ radeon_fixup_lvds_native_mode(encoder, connector);
+ /* add scaled modes */
+ radeon_add_common_modes(encoder, connector);
+ }
+ return ret;
+ }
+ }
+
+ encoder = radeon_best_single_encoder(connector);
+ if (!encoder)
+ return 0;
+
+ /* we have no EDID modes */
+ mode = radeon_fp_native_mode(encoder);
+ if (mode) {
+ ret = 1;
+ drm_mode_probed_add(connector, mode);
+ /* add the width/height from vbios tables if available */
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ /* add scaled modes */
+ radeon_add_common_modes(encoder, connector);
+ }
+
+ return ret;
+}
+
+static int radeon_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+
+ if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
+ return MODE_PANEL;
+
+ if (encoder) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+ /* AVIVO hardware supports downscaling modes larger than the panel
+ * to the panel size, but I'm not sure this is desirable.
+ */
+ if ((mode->hdisplay > native_mode->hdisplay) ||
+ (mode->vdisplay > native_mode->vdisplay))
+ return MODE_PANEL;
+
+ /* if scaling is disabled, block non-native modes */
+ if (radeon_encoder->rmx_type == RMX_OFF) {
+ if ((mode->hdisplay != native_mode->hdisplay) ||
+ (mode->vdisplay != native_mode->vdisplay))
+ return MODE_PANEL;
+ }
+ }
+
+ return MODE_OK;
+}
+
+static enum drm_connector_status
+radeon_lvds_detect(struct drm_connector *connector, bool force)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ enum drm_connector_status ret = connector_status_disconnected;
+
+ if (encoder) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+ /* check if panel is valid */
+ if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
+ ret = connector_status_connected;
+
+ }
+
+ /* check for edid as well */
+ if (radeon_connector->edid)
+ ret = connector_status_connected;
+ else {
+ if (radeon_connector->ddc_bus) {
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
+ if (radeon_connector->edid)
+ ret = connector_status_connected;
+ }
+ }
+ /* check acpi lid status ??? */
+
+ radeon_connector_update_scratch_regs(connector, ret);
+ return ret;
+}
+
+static void radeon_connector_destroy(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->edid)
+ kfree(radeon_connector->edid);
+ kfree(radeon_connector->con_priv);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static int radeon_lvds_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_encoder *radeon_encoder;
+ enum radeon_rmx_type rmx_type;
+
+ DRM_DEBUG_KMS("\n");
+ if (property != dev->mode_config.scaling_mode_property)
+ return 0;
+
+ if (connector->encoder)
+ radeon_encoder = to_radeon_encoder(connector->encoder);
+ else {
+ struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+ radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
+ }
+
+ switch (value) {
+ case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
+ case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
+ case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
+ default:
+ case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
+ }
+ if (radeon_encoder->rmx_type == rmx_type)
+ return 0;
+
+ radeon_encoder->rmx_type = rmx_type;
+
+ radeon_property_change_mode(&radeon_encoder->base);
+ return 0;
+}
+
+
+static const struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
+ .get_modes = radeon_lvds_get_modes,
+ .mode_valid = radeon_lvds_mode_valid,
+ .best_encoder = radeon_best_single_encoder,
+};
+
+static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_lvds_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = radeon_connector_destroy,
+ .set_property = radeon_lvds_set_property,
+};
+
+static int radeon_vga_get_modes(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int ret;
+
+ ret = radeon_ddc_get_modes(radeon_connector);
+
+ return ret;
+}
+
+static int radeon_vga_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* XXX check mode bandwidth */
+
+ if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static enum drm_connector_status
+radeon_vga_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_encoder *encoder;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ bool dret = false;
+ enum drm_connector_status ret = connector_status_disconnected;
+
+ encoder = radeon_best_single_encoder(connector);
+ if (!encoder)
+ ret = connector_status_disconnected;
+
+ if (radeon_connector->ddc_bus)
+ dret = radeon_ddc_probe(radeon_connector, false);
+ if (dret) {
+ radeon_connector->detected_by_load = false;
+ if (radeon_connector->edid) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ }
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+
+ if (!radeon_connector->edid) {
+ DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
+ drm_get_connector_name(connector));
+ ret = connector_status_connected;
+ } else {
+ radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+
+ /* some oems have boards with separate digital and analog connectors
+ * with a shared ddc line (often vga + hdmi)
+ */
+ if (radeon_connector->use_digital && radeon_connector->shared_ddc) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ ret = connector_status_disconnected;
+ } else
+ ret = connector_status_connected;
+ }
+ } else {
+
+ /* if we aren't forcing don't do destructive polling */
+ if (!force) {
+ /* only return the previous status if we last
+ * detected a monitor via load.
+ */
+ if (radeon_connector->detected_by_load)
+ return connector->status;
+ else
+ return ret;
+ }
+
+ if (radeon_connector->dac_load_detect && encoder) {
+ encoder_funcs = encoder->helper_private;
+ ret = encoder_funcs->detect(encoder, connector);
+ if (ret != connector_status_disconnected)
+ radeon_connector->detected_by_load = true;
+ }
+ }
+
+ if (ret == connector_status_connected)
+ ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
+
+ /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
+ * vbios to deal with KVMs. If we have one and are not able to detect a monitor
+ * by other means, assume the CRT is connected and use that EDID.
+ */
+ if ((!rdev->is_atom_bios) &&
+ (ret == connector_status_disconnected) &&
+ rdev->mode_info.bios_hardcoded_edid_size) {
+ ret = connector_status_connected;
+ }
+
+ radeon_connector_update_scratch_regs(connector, ret);
+ return ret;
+}
+
+static const struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
+ .get_modes = radeon_vga_get_modes,
+ .mode_valid = radeon_vga_mode_valid,
+ .best_encoder = radeon_best_single_encoder,
+};
+
+static const struct drm_connector_funcs radeon_vga_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_vga_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = radeon_connector_destroy,
+ .set_property = radeon_connector_set_property,
+};
+
+static int radeon_tv_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_display_mode *tv_mode;
+ struct drm_encoder *encoder;
+
+ encoder = radeon_best_single_encoder(connector);
+ if (!encoder)
+ return 0;
+
+ /* avivo chips can scale any mode */
+ if (rdev->family >= CHIP_RS600)
+ /* add scaled modes */
+ radeon_add_common_modes(encoder, connector);
+ else {
+ /* only 800x600 is supported right now on pre-avivo chips */
+ tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false);
+ tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, tv_mode);
+ }
+ return 1;
+}
+
+static int radeon_tv_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ if ((mode->hdisplay > 1024) || (mode->vdisplay > 768))
+ return MODE_CLOCK_RANGE;
+ return MODE_OK;
+}
+
+static enum drm_connector_status
+radeon_tv_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_encoder *encoder;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ enum drm_connector_status ret = connector_status_disconnected;
+
+ if (!radeon_connector->dac_load_detect)
+ return ret;
+
+ encoder = radeon_best_single_encoder(connector);
+ if (!encoder)
+ ret = connector_status_disconnected;
+ else {
+ encoder_funcs = encoder->helper_private;
+ ret = encoder_funcs->detect(encoder, connector);
+ }
+ if (ret == connector_status_connected)
+ ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
+ radeon_connector_update_scratch_regs(connector, ret);
+ return ret;
+}
+
+static const struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = {
+ .get_modes = radeon_tv_get_modes,
+ .mode_valid = radeon_tv_mode_valid,
+ .best_encoder = radeon_best_single_encoder,
+};
+
+static const struct drm_connector_funcs radeon_tv_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_tv_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = radeon_connector_destroy,
+ .set_property = radeon_connector_set_property,
+};
+
+static int radeon_dvi_get_modes(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int ret;
+
+ ret = radeon_ddc_get_modes(radeon_connector);
+ return ret;
+}
+
+static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ enum drm_connector_status status;
+
+ /* We only trust HPD on R600 and newer ASICS. */
+ if (rdev->family >= CHIP_R600
+ && radeon_connector->hpd.hpd != RADEON_HPD_NONE) {
+ if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+ if (connector->status == status)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * DVI is complicated
+ * Do a DDC probe, if DDC probe passes, get the full EDID so
+ * we can do analog/digital monitor detection at this point.
+ * If the monitor is an analog monitor or we got no DDC,
+ * we need to find the DAC encoder object for this connector.
+ * If we got no DDC, we do load detection on the DAC encoder object.
+ * If we got analog DDC or load detection passes on the DAC encoder
+ * we have to check if this analog encoder is shared with anyone else (TV)
+ * if its shared we have to set the other connector to disconnected.
+ */
+static enum drm_connector_status
+radeon_dvi_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_encoder *encoder = NULL;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_mode_object *obj;
+ int i;
+ enum drm_connector_status ret = connector_status_disconnected;
+ bool dret = false, broken_edid = false;
+
+ if (!force && radeon_check_hpd_status_unchanged(connector))
+ return connector->status;
+
+ if (radeon_connector->ddc_bus)
+ dret = radeon_ddc_probe(radeon_connector, false);
+ if (dret) {
+ radeon_connector->detected_by_load = false;
+ if (radeon_connector->edid) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ }
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+
+ if (!radeon_connector->edid) {
+ DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
+ drm_get_connector_name(connector));
+ /* rs690 seems to have a problem with connectors not existing and always
+ * return a block of 0's. If we see this just stop polling on this output */
+ if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
+ ret = connector_status_disconnected;
+ DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector));
+ radeon_connector->ddc_bus = NULL;
+ } else {
+ ret = connector_status_connected;
+ broken_edid = true; /* defer use_digital to later */
+ }
+ } else {
+ radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+
+ /* some oems have boards with separate digital and analog connectors
+ * with a shared ddc line (often vga + hdmi)
+ */
+ if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ ret = connector_status_disconnected;
+ } else
+ ret = connector_status_connected;
+
+ /* This gets complicated. We have boards with VGA + HDMI with a
+ * shared DDC line and we have boards with DVI-D + HDMI with a shared
+ * DDC line. The latter is more complex because with DVI<->HDMI adapters
+ * you don't really know what's connected to which port as both are digital.
+ */
+ if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
+ struct drm_connector *list_connector;
+ struct radeon_connector *list_radeon_connector;
+ list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
+ if (connector == list_connector)
+ continue;
+ list_radeon_connector = to_radeon_connector(list_connector);
+ if (list_radeon_connector->shared_ddc &&
+ (list_radeon_connector->ddc_bus->rec.i2c_id ==
+ radeon_connector->ddc_bus->rec.i2c_id)) {
+ /* cases where both connectors are digital */
+ if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
+ /* hpd is our only option in this case */
+ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ ret = connector_status_disconnected;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if ((ret == connector_status_connected) && (radeon_connector->use_digital == true))
+ goto out;
+
+ /* DVI-D and HDMI-A are digital only */
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA))
+ goto out;
+
+ /* if we aren't forcing don't do destructive polling */
+ if (!force) {
+ /* only return the previous status if we last
+ * detected a monitor via load.
+ */
+ if (radeon_connector->detected_by_load)
+ ret = connector->status;
+ goto out;
+ }
+
+ /* find analog encoder */
+ if (radeon_connector->dac_load_detect) {
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ obj = drm_mode_object_find(connector->dev,
+ connector->encoder_ids[i],
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
+
+ encoder = obj_to_encoder(obj);
+
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
+ encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
+ continue;
+
+ encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->detect) {
+ if (!broken_edid) {
+ if (ret != connector_status_connected) {
+ /* deal with analog monitors without DDC */
+ ret = encoder_funcs->detect(encoder, connector);
+ if (ret == connector_status_connected) {
+ radeon_connector->use_digital = false;
+ }
+ if (ret != connector_status_disconnected)
+ radeon_connector->detected_by_load = true;
+ }
+ } else {
+ enum drm_connector_status lret;
+ /* assume digital unless load detected otherwise */
+ radeon_connector->use_digital = true;
+ lret = encoder_funcs->detect(encoder, connector);
+ DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret);
+ if (lret == connector_status_connected)
+ radeon_connector->use_digital = false;
+ }
+ break;
+ }
+ }
+ }
+
+ if ((ret == connector_status_connected) && (radeon_connector->use_digital == false) &&
+ encoder) {
+ ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
+ }
+
+ /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
+ * vbios to deal with KVMs. If we have one and are not able to detect a monitor
+ * by other means, assume the DFP is connected and use that EDID. In most
+ * cases the DVI port is actually a virtual KVM port connected to the service
+ * processor.
+ */
+out:
+ if ((!rdev->is_atom_bios) &&
+ (ret == connector_status_disconnected) &&
+ rdev->mode_info.bios_hardcoded_edid_size) {
+ radeon_connector->use_digital = true;
+ ret = connector_status_connected;
+ }
+
+ /* updated in get modes as well since we need to know if it's analog or digital */
+ radeon_connector_update_scratch_regs(connector, ret);
+ return ret;
+}
+
+/* okay need to be smart in here about which encoder to pick */
+static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+ int i;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
+
+ encoder = obj_to_encoder(obj);
+
+ if (radeon_connector->use_digital == true) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
+ return encoder;
+ } else {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DAC ||
+ encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
+ return encoder;
+ }
+ }
+
+ /* see if we have a default encoder TODO */
+
+ /* then check use digitial */
+ /* pick the first one */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+static void radeon_dvi_force(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ if (connector->force == DRM_FORCE_ON)
+ radeon_connector->use_digital = false;
+ if (connector->force == DRM_FORCE_ON_DIGITAL)
+ radeon_connector->use_digital = true;
+}
+
+static int radeon_dvi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ /* XXX check mode bandwidth */
+
+ /* clocks over 135 MHz have heat issues with DVI on RV100 */
+ if (radeon_connector->use_digital &&
+ (rdev->family == CHIP_RV100) &&
+ (mode->clock > 135000))
+ return MODE_CLOCK_HIGH;
+
+ if (radeon_connector->use_digital && (mode->clock > 165000)) {
+ if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
+ (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
+ (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
+ return MODE_OK;
+ else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
+ if (ASIC_IS_DCE6(rdev)) {
+ /* HDMI 1.3+ supports max clock of 340 Mhz */
+ if (mode->clock > 340000)
+ return MODE_CLOCK_HIGH;
+ else
+ return MODE_OK;
+ } else
+ return MODE_CLOCK_HIGH;
+ } else
+ return MODE_CLOCK_HIGH;
+ }
+
+ /* check against the max pixel clock */
+ if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
+ .get_modes = radeon_dvi_get_modes,
+ .mode_valid = radeon_dvi_mode_valid,
+ .best_encoder = radeon_dvi_encoder,
+};
+
+static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_dvi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = radeon_connector_set_property,
+ .destroy = radeon_connector_destroy,
+ .force = radeon_dvi_force,
+};
+
+static void radeon_dp_connector_destroy(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+ if (radeon_connector->edid)
+ kfree(radeon_connector->edid);
+ if (radeon_dig_connector->dp_i2c_bus)
+ radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
+ kfree(radeon_connector->con_priv);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static int radeon_dp_get_modes(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ int ret;
+
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+ struct drm_display_mode *mode;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (!radeon_dig_connector->edp_on)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ ret = radeon_ddc_get_modes(radeon_connector);
+ if (!radeon_dig_connector->edp_on)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_OFF);
+ } else {
+ /* need to setup ddc on the bridge */
+ if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+ ENCODER_OBJECT_ID_NONE) {
+ if (encoder)
+ radeon_atom_ext_encoder_setup_ddc(encoder);
+ }
+ ret = radeon_ddc_get_modes(radeon_connector);
+ }
+
+ if (ret > 0) {
+ if (encoder) {
+ radeon_fixup_lvds_native_mode(encoder, connector);
+ /* add scaled modes */
+ radeon_add_common_modes(encoder, connector);
+ }
+ return ret;
+ }
+
+ if (!encoder)
+ return 0;
+
+ /* we have no EDID modes */
+ mode = radeon_fp_native_mode(encoder);
+ if (mode) {
+ ret = 1;
+ drm_mode_probed_add(connector, mode);
+ /* add the width/height from vbios tables if available */
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ /* add scaled modes */
+ radeon_add_common_modes(encoder, connector);
+ }
+ } else {
+ /* need to setup ddc on the bridge */
+ if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+ ENCODER_OBJECT_ID_NONE) {
+ if (encoder)
+ radeon_atom_ext_encoder_setup_ddc(encoder);
+ }
+ ret = radeon_ddc_get_modes(radeon_connector);
+ }
+
+ return ret;
+}
+
+u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
+{
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+ int i;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
+
+ encoder = obj_to_encoder(obj);
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_TRAVIS:
+ case ENCODER_OBJECT_ID_NUTMEG:
+ return radeon_encoder->encoder_id;
+ default:
+ break;
+ }
+ }
+
+ return ENCODER_OBJECT_ID_NONE;
+}
+
+bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
+{
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+ int i;
+ bool found = false;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
+
+ encoder = obj_to_encoder(obj);
+ radeon_encoder = to_radeon_encoder(encoder);
+ if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
+ found = true;
+ }
+
+ return found;
+}
+
+bool radeon_connector_is_dp12_capable(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (ASIC_IS_DCE5(rdev) &&
+ (rdev->clock.dp_extclk >= 53900) &&
+ radeon_connector_encoder_is_hbr2(connector)) {
+ return true;
+ }
+
+ return false;
+}
+
+static enum drm_connector_status
+radeon_dp_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ enum drm_connector_status ret = connector_status_disconnected;
+ struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+
+ if (!force && radeon_check_hpd_status_unchanged(connector))
+ return connector->status;
+
+ if (radeon_connector->edid) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ }
+
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+ if (encoder) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+ /* check if panel is valid */
+ if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
+ ret = connector_status_connected;
+ }
+ /* eDP is always DP */
+ radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+ if (!radeon_dig_connector->edp_on)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ if (radeon_dp_getdpcd(radeon_connector))
+ ret = connector_status_connected;
+ if (!radeon_dig_connector->edp_on)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_OFF);
+ } else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+ ENCODER_OBJECT_ID_NONE) {
+ /* DP bridges are always DP */
+ radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+ /* get the DPCD from the bridge */
+ radeon_dp_getdpcd(radeon_connector);
+
+ if (encoder) {
+ /* setup ddc on the bridge */
+ radeon_atom_ext_encoder_setup_ddc(encoder);
+ /* bridge chips are always aux */
+ if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
+ ret = connector_status_connected;
+ else if (radeon_connector->dac_load_detect) { /* try load detection */
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ ret = encoder_funcs->detect(encoder, connector);
+ }
+ }
+ } else {
+ radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+ if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+ ret = connector_status_connected;
+ if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ radeon_dp_getdpcd(radeon_connector);
+ } else {
+ if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if (radeon_dp_getdpcd(radeon_connector))
+ ret = connector_status_connected;
+ } else {
+ /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
+ if (radeon_ddc_probe(radeon_connector, false))
+ ret = connector_status_connected;
+ }
+ }
+ }
+
+ radeon_connector_update_scratch_regs(connector, ret);
+ return ret;
+}
+
+static int radeon_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+ /* XXX check mode bandwidth */
+
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+
+ if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
+ return MODE_PANEL;
+
+ if (encoder) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+ /* AVIVO hardware supports downscaling modes larger than the panel
+ * to the panel size, but I'm not sure this is desirable.
+ */
+ if ((mode->hdisplay > native_mode->hdisplay) ||
+ (mode->vdisplay > native_mode->vdisplay))
+ return MODE_PANEL;
+
+ /* if scaling is disabled, block non-native modes */
+ if (radeon_encoder->rmx_type == RMX_OFF) {
+ if ((mode->hdisplay != native_mode->hdisplay) ||
+ (mode->vdisplay != native_mode->vdisplay))
+ return MODE_PANEL;
+ }
+ }
+ return MODE_OK;
+ } else {
+ if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+ return radeon_dp_mode_valid_helper(connector, mode);
+ else
+ return MODE_OK;
+ }
+}
+
+static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
+ .get_modes = radeon_dp_get_modes,
+ .mode_valid = radeon_dp_mode_valid,
+ .best_encoder = radeon_dvi_encoder,
+};
+
+static const struct drm_connector_funcs radeon_dp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = radeon_connector_set_property,
+ .destroy = radeon_dp_connector_destroy,
+ .force = radeon_dvi_force,
+};
+
+static const struct drm_connector_funcs radeon_edp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = radeon_lvds_set_property,
+ .destroy = radeon_dp_connector_destroy,
+ .force = radeon_dvi_force,
+};
+
+static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = radeon_lvds_set_property,
+ .destroy = radeon_dp_connector_destroy,
+ .force = radeon_dvi_force,
+};
+
+void
+radeon_add_atom_connector(struct drm_device *dev,
+ uint32_t connector_id,
+ uint32_t supported_device,
+ int connector_type,
+ struct radeon_i2c_bus_rec *i2c_bus,
+ uint32_t igp_lane_info,
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd,
+ struct radeon_router *router)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *radeon_dig_connector;
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+ uint32_t subpixel_order = SubPixelNone;
+ bool shared_ddc = false;
+ bool is_dp_bridge = false;
+
+ if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+ return;
+
+ /* if the user selected tv=0 don't try and add the connector */
+ if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+ (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+ (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+ (radeon_tv == 0))
+ return;
+
+ /* see if we already added it */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ radeon_connector = to_radeon_connector(connector);
+ if (radeon_connector->connector_id == connector_id) {
+ radeon_connector->devices |= supported_device;
+ return;
+ }
+ if (radeon_connector->ddc_bus && i2c_bus->valid) {
+ if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
+ radeon_connector->shared_ddc = true;
+ shared_ddc = true;
+ }
+ if (radeon_connector->router_bus && router->ddc_valid &&
+ (radeon_connector->router.router_id == router->router_id)) {
+ radeon_connector->shared_ddc = false;
+ shared_ddc = false;
+ }
+ }
+ }
+
+ /* check if it's a dp bridge */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ if (radeon_encoder->devices & supported_device) {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_TRAVIS:
+ case ENCODER_OBJECT_ID_NUTMEG:
+ is_dp_bridge = true;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
+ if (!radeon_connector)
+ return;
+
+ connector = &radeon_connector->base;
+
+ radeon_connector->connector_id = connector_id;
+ radeon_connector->devices = supported_device;
+ radeon_connector->shared_ddc = shared_ddc;
+ radeon_connector->connector_object_id = connector_object_id;
+ radeon_connector->hpd = *hpd;
+
+ radeon_connector->router = *router;
+ if (router->ddc_valid || router->cd_valid) {
+ radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
+ if (!radeon_connector->router_bus)
+ DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
+ }
+
+ if (is_dp_bridge) {
+ radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+ if (!radeon_dig_connector)
+ goto failed;
+ radeon_dig_connector->igp_lane_info = igp_lane_info;
+ radeon_connector->con_priv = radeon_dig_connector;
+ if (i2c_bus->valid) {
+ /* add DP i2c bus */
+ if (connector_type == DRM_MODE_CONNECTOR_eDP)
+ radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
+ else
+ radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
+ if (!radeon_dig_connector->dp_i2c_bus)
+ DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+ if (!radeon_connector->ddc_bus)
+ DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_DVIA:
+ default:
+ drm_connector_init(dev, &radeon_connector->base,
+ &radeon_dp_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base,
+ &radeon_dp_connector_helper_funcs);
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
+ radeon_connector->dac_load_detect = true;
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ drm_connector_init(dev, &radeon_connector->base,
+ &radeon_dp_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base,
+ &radeon_dp_connector_helper_funcs);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.underscan_property,
+ UNDERSCAN_OFF);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.underscan_hborder_property,
+ 0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.underscan_vborder_property,
+ 0);
+ subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
+ if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+ radeon_connector->dac_load_detect = true;
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ }
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ drm_connector_init(dev, &radeon_connector->base,
+ &radeon_lvds_bridge_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base,
+ &radeon_dp_connector_helper_funcs);
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ break;
+ }
+ } else {
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+ if (!radeon_connector->ddc_bus)
+ DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
+ radeon_connector->dac_load_detect = true;
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
+ break;
+ case DRM_MODE_CONNECTOR_DVIA:
+ drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+ if (!radeon_connector->ddc_bus)
+ DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
+ radeon_connector->dac_load_detect = true;
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
+ break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_DVID:
+ radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+ if (!radeon_dig_connector)
+ goto failed;
+ radeon_dig_connector->igp_lane_info = igp_lane_info;
+ radeon_connector->con_priv = radeon_dig_connector;
+ drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+ if (!radeon_connector->ddc_bus)
+ DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
+ subpixel_order = SubPixelHorizontalRGB;
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.coherent_mode_property,
+ 1);
+ if (ASIC_IS_AVIVO(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.underscan_property,
+ UNDERSCAN_OFF);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.underscan_hborder_property,
+ 0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.underscan_vborder_property,
+ 0);
+ }
+ if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+ radeon_connector->dac_load_detect = true;
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ }
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
+ break;
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+ if (!radeon_dig_connector)
+ goto failed;
+ radeon_dig_connector->igp_lane_info = igp_lane_info;
+ radeon_connector->con_priv = radeon_dig_connector;
+ drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+ if (!radeon_connector->ddc_bus)
+ DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.coherent_mode_property,
+ 1);
+ if (ASIC_IS_AVIVO(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.underscan_property,
+ UNDERSCAN_OFF);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.underscan_hborder_property,
+ 0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.underscan_vborder_property,
+ 0);
+ }
+ subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+ if (!radeon_dig_connector)
+ goto failed;
+ radeon_dig_connector->igp_lane_info = igp_lane_info;
+ radeon_connector->con_priv = radeon_dig_connector;
+ drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ /* add DP i2c bus */
+ radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
+ if (!radeon_dig_connector->dp_i2c_bus)
+ DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+ if (!radeon_connector->ddc_bus)
+ DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
+ subpixel_order = SubPixelHorizontalRGB;
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.coherent_mode_property,
+ 1);
+ if (ASIC_IS_AVIVO(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.underscan_property,
+ UNDERSCAN_OFF);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.underscan_hborder_property,
+ 0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.underscan_vborder_property,
+ 0);
+ }
+ connector->interlace_allowed = true;
+ /* in theory with a DP to VGA converter... */
+ connector->doublescan_allowed = false;
+ break;
+ case DRM_MODE_CONNECTOR_eDP:
+ radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+ if (!radeon_dig_connector)
+ goto failed;
+ radeon_dig_connector->igp_lane_info = igp_lane_info;
+ radeon_connector->con_priv = radeon_dig_connector;
+ drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ /* add DP i2c bus */
+ radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
+ if (!radeon_dig_connector->dp_i2c_bus)
+ DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+ if (!radeon_connector->ddc_bus)
+ DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ break;
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_9PinDIN:
+ drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+ radeon_connector->dac_load_detect = true;
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.tv_std_property,
+ radeon_atombios_get_tv_info(rdev));
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+ if (!radeon_dig_connector)
+ goto failed;
+ radeon_dig_connector->igp_lane_info = igp_lane_info;
+ radeon_connector->con_priv = radeon_dig_connector;
+ drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+ if (!radeon_connector->ddc_bus)
+ DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ break;
+ }
+ }
+
+ if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
+ if (i2c_bus->valid)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ } else
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ connector->display_info.subpixel_order = subpixel_order;
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed:
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+void
+radeon_add_legacy_connector(struct drm_device *dev,
+ uint32_t connector_id,
+ uint32_t supported_device,
+ int connector_type,
+ struct radeon_i2c_bus_rec *i2c_bus,
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ uint32_t subpixel_order = SubPixelNone;
+
+ if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+ return;
+
+ /* if the user selected tv=0 don't try and add the connector */
+ if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+ (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+ (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+ (radeon_tv == 0))
+ return;
+
+ /* see if we already added it */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ radeon_connector = to_radeon_connector(connector);
+ if (radeon_connector->connector_id == connector_id) {
+ radeon_connector->devices |= supported_device;
+ return;
+ }
+ }
+
+ radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
+ if (!radeon_connector)
+ return;
+
+ connector = &radeon_connector->base;
+
+ radeon_connector->connector_id = connector_id;
+ radeon_connector->devices = supported_device;
+ radeon_connector->connector_object_id = connector_object_id;
+ radeon_connector->hpd = *hpd;
+
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+ if (!radeon_connector->ddc_bus)
+ DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
+ radeon_connector->dac_load_detect = true;
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
+ break;
+ case DRM_MODE_CONNECTOR_DVIA:
+ drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+ if (!radeon_connector->ddc_bus)
+ DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
+ radeon_connector->dac_load_detect = true;
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
+ break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_DVID:
+ drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+ if (!radeon_connector->ddc_bus)
+ DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
+ if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+ radeon_connector->dac_load_detect = true;
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ }
+ subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
+ break;
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_9PinDIN:
+ drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+ radeon_connector->dac_load_detect = true;
+ /* RS400,RC410,RS480 chipset seems to report a lot
+ * of false positive on load detect, we haven't yet
+ * found a way to make load detect reliable on those
+ * chipset, thus just disable it for TV.
+ */
+ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
+ radeon_connector->dac_load_detect = false;
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.load_detect_property,
+ radeon_connector->dac_load_detect);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.tv_std_property,
+ radeon_combios_get_tv_info(rdev));
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+ if (!radeon_connector->ddc_bus)
+ DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ break;
+ }
+
+ if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
+ if (i2c_bus->valid)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ } else
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->display_info.subpixel_order = subpixel_order;
+ drm_sysfs_connector_add(connector);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
new file mode 100644
index 0000000..efc4f64
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -0,0 +1,2243 @@
+/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */
+/*
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * Copyright 2007 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
+ */
+
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+#include "r300_reg.h"
+
+#define RADEON_FIFO_DEBUG 0
+
+/* Firmware Names */
+#define FIRMWARE_R100 "radeon/R100_cp.bin"
+#define FIRMWARE_R200 "radeon/R200_cp.bin"
+#define FIRMWARE_R300 "radeon/R300_cp.bin"
+#define FIRMWARE_R420 "radeon/R420_cp.bin"
+#define FIRMWARE_RS690 "radeon/RS690_cp.bin"
+#define FIRMWARE_RS600 "radeon/RS600_cp.bin"
+#define FIRMWARE_R520 "radeon/R520_cp.bin"
+
+MODULE_FIRMWARE(FIRMWARE_R100);
+MODULE_FIRMWARE(FIRMWARE_R200);
+MODULE_FIRMWARE(FIRMWARE_R300);
+MODULE_FIRMWARE(FIRMWARE_R420);
+MODULE_FIRMWARE(FIRMWARE_RS690);
+MODULE_FIRMWARE(FIRMWARE_RS600);
+MODULE_FIRMWARE(FIRMWARE_R520);
+
+static int radeon_do_cleanup_cp(struct drm_device * dev);
+static void radeon_do_cp_start(drm_radeon_private_t * dev_priv);
+
+u32 radeon_read_ring_rptr(drm_radeon_private_t *dev_priv, u32 off)
+{
+ u32 val;
+
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ val = DRM_READ32(dev_priv->ring_rptr, off);
+ } else {
+ val = *(((volatile u32 *)
+ dev_priv->ring_rptr->handle) +
+ (off / sizeof(u32)));
+ val = le32_to_cpu(val);
+ }
+ return val;
+}
+
+u32 radeon_get_ring_head(drm_radeon_private_t *dev_priv)
+{
+ if (dev_priv->writeback_works)
+ return radeon_read_ring_rptr(dev_priv, 0);
+ else {
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return RADEON_READ(R600_CP_RB_RPTR);
+ else
+ return RADEON_READ(RADEON_CP_RB_RPTR);
+ }
+}
+
+void radeon_write_ring_rptr(drm_radeon_private_t *dev_priv, u32 off, u32 val)
+{
+ if (dev_priv->flags & RADEON_IS_AGP)
+ DRM_WRITE32(dev_priv->ring_rptr, off, val);
+ else
+ *(((volatile u32 *) dev_priv->ring_rptr->handle) +
+ (off / sizeof(u32))) = cpu_to_le32(val);
+}
+
+void radeon_set_ring_head(drm_radeon_private_t *dev_priv, u32 val)
+{
+ radeon_write_ring_rptr(dev_priv, 0, val);
+}
+
+u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index)
+{
+ if (dev_priv->writeback_works) {
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return radeon_read_ring_rptr(dev_priv,
+ R600_SCRATCHOFF(index));
+ else
+ return radeon_read_ring_rptr(dev_priv,
+ RADEON_SCRATCHOFF(index));
+ } else {
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return RADEON_READ(R600_SCRATCH_REG0 + 4*index);
+ else
+ return RADEON_READ(RADEON_SCRATCH_REG0 + 4*index);
+ }
+}
+
+static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+ u32 ret;
+ RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff));
+ ret = RADEON_READ(R520_MC_IND_DATA);
+ RADEON_WRITE(R520_MC_IND_INDEX, 0);
+ return ret;
+}
+
+static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+ u32 ret;
+ RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff);
+ ret = RADEON_READ(RS480_NB_MC_DATA);
+ RADEON_WRITE(RS480_NB_MC_INDEX, 0xff);
+ return ret;
+}
+
+static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+ u32 ret;
+ RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK));
+ ret = RADEON_READ(RS690_MC_DATA);
+ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
+ return ret;
+}
+
+static u32 RS600_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+ u32 ret;
+ RADEON_WRITE(RS600_MC_INDEX, ((addr & RS600_MC_ADDR_MASK) |
+ RS600_MC_IND_CITF_ARB0));
+ ret = RADEON_READ(RS600_MC_DATA);
+ return ret;
+}
+
+static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
+ return RS690_READ_MCIND(dev_priv, addr);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+ return RS600_READ_MCIND(dev_priv, addr);
+ else
+ return RS480_READ_MCIND(dev_priv, addr);
+}
+
+u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
+{
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
+ return RADEON_READ(R700_MC_VM_FB_LOCATION);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return RADEON_READ(R600_MC_VM_FB_LOCATION);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+ return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
+ else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
+ return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+ return RS600_READ_MCIND(dev_priv, RS600_MC_FB_LOCATION);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+ return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
+ else
+ return RADEON_READ(RADEON_MC_FB_LOCATION);
+}
+
+static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
+{
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
+ RADEON_WRITE(R700_MC_VM_FB_LOCATION, fb_loc);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ RADEON_WRITE(R600_MC_VM_FB_LOCATION, fb_loc);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+ R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
+ else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
+ RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+ RS600_WRITE_MCIND(RS600_MC_FB_LOCATION, fb_loc);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+ R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
+ else
+ RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
+}
+
+void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc)
+{
+ /*R6xx/R7xx: AGP_TOP and BOT are actually 18 bits each */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
+ RADEON_WRITE(R700_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */
+ RADEON_WRITE(R700_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff);
+ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
+ RADEON_WRITE(R600_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */
+ RADEON_WRITE(R600_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff);
+ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+ R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
+ else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
+ RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+ RS600_WRITE_MCIND(RS600_MC_AGP_LOCATION, agp_loc);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+ R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
+ else
+ RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
+}
+
+void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
+{
+ u32 agp_base_hi = upper_32_bits(agp_base);
+ u32 agp_base_lo = agp_base & 0xffffffff;
+ u32 r6xx_agp_base = (agp_base >> 22) & 0x3ffff;
+
+ /* R6xx/R7xx must be aligned to a 4MB boundary */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
+ RADEON_WRITE(R700_MC_VM_AGP_BASE, r6xx_agp_base);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ RADEON_WRITE(R600_MC_VM_AGP_BASE, r6xx_agp_base);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
+ R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
+ R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
+ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
+ RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
+ RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
+ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) {
+ RS600_WRITE_MCIND(RS600_AGP_BASE, agp_base_lo);
+ RS600_WRITE_MCIND(RS600_AGP_BASE_2, agp_base_hi);
+ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
+ R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
+ R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
+ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
+ RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
+ RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi);
+ } else {
+ RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
+ RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
+ }
+}
+
+void radeon_enable_bm(struct drm_radeon_private *dev_priv)
+{
+ u32 tmp;
+ /* Turn on bus mastering */
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
+ /* rs600/rs690/rs740 */
+ tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+ RADEON_WRITE(RADEON_BUS_CNTL, tmp);
+ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV350) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
+ /* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+ tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+ RADEON_WRITE(RADEON_BUS_CNTL, tmp);
+ } /* PCIE cards appears to not need this */
+}
+
+static int RADEON_READ_PLL(struct drm_device * dev, int addr)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f);
+ return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
+}
+
+static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
+{
+ RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
+ return RADEON_READ(RADEON_PCIE_DATA);
+}
+
+#if RADEON_FIFO_DEBUG
+static void radeon_status(drm_radeon_private_t * dev_priv)
+{
+ printk("%s:\n", __func__);
+ printk("RBBM_STATUS = 0x%08x\n",
+ (unsigned int)RADEON_READ(RADEON_RBBM_STATUS));
+ printk("CP_RB_RTPR = 0x%08x\n",
+ (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR));
+ printk("CP_RB_WTPR = 0x%08x\n",
+ (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR));
+ printk("AIC_CNTL = 0x%08x\n",
+ (unsigned int)RADEON_READ(RADEON_AIC_CNTL));
+ printk("AIC_STAT = 0x%08x\n",
+ (unsigned int)RADEON_READ(RADEON_AIC_STAT));
+ printk("AIC_PT_BASE = 0x%08x\n",
+ (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE));
+ printk("TLB_ADDR = 0x%08x\n",
+ (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR));
+ printk("TLB_DATA = 0x%08x\n",
+ (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA));
+}
+#endif
+
+/* ================================================================
+ * Engine, FIFO control
+ */
+
+static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
+{
+ u32 tmp;
+ int i;
+
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {
+ tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT);
+ tmp |= RADEON_RB3D_DC_FLUSH_ALL;
+ RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp);
+
+ for (i = 0; i < dev_priv->usec_timeout; i++) {
+ if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT)
+ & RADEON_RB3D_DC_BUSY)) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ } else {
+ /* don't flush or purge cache here or lockup */
+ return 0;
+ }
+
+#if RADEON_FIFO_DEBUG
+ DRM_ERROR("failed!\n");
+ radeon_status(dev_priv);
+#endif
+ return -EBUSY;
+}
+
+static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
+{
+ int i;
+
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+ for (i = 0; i < dev_priv->usec_timeout; i++) {
+ int slots = (RADEON_READ(RADEON_RBBM_STATUS)
+ & RADEON_RBBM_FIFOCNT_MASK);
+ if (slots >= entries)
+ return 0;
+ DRM_UDELAY(1);
+ }
+ DRM_DEBUG("wait for fifo failed status : 0x%08X 0x%08X\n",
+ RADEON_READ(RADEON_RBBM_STATUS),
+ RADEON_READ(R300_VAP_CNTL_STATUS));
+
+#if RADEON_FIFO_DEBUG
+ DRM_ERROR("failed!\n");
+ radeon_status(dev_priv);
+#endif
+ return -EBUSY;
+}
+
+static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
+{
+ int i, ret;
+
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+ ret = radeon_do_wait_for_fifo(dev_priv, 64);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < dev_priv->usec_timeout; i++) {
+ if (!(RADEON_READ(RADEON_RBBM_STATUS)
+ & RADEON_RBBM_ACTIVE)) {
+ radeon_do_pixcache_flush(dev_priv);
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ DRM_DEBUG("wait idle failed status : 0x%08X 0x%08X\n",
+ RADEON_READ(RADEON_RBBM_STATUS),
+ RADEON_READ(R300_VAP_CNTL_STATUS));
+
+#if RADEON_FIFO_DEBUG
+ DRM_ERROR("failed!\n");
+ radeon_status(dev_priv);
+#endif
+ return -EBUSY;
+}
+
+static void radeon_init_pipes(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ uint32_t gb_tile_config, gb_pipe_sel = 0;
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
+ uint32_t z_pipe_sel = RADEON_READ(RV530_GB_PIPE_SELECT2);
+ if ((z_pipe_sel & 3) == 3)
+ dev_priv->num_z_pipes = 2;
+ else
+ dev_priv->num_z_pipes = 1;
+ } else
+ dev_priv->num_z_pipes = 1;
+
+ /* RS4xx/RS6xx/R4xx/R5xx */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
+ gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
+ dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
+ /* SE cards have 1 pipe */
+ if ((dev->pdev->device == 0x5e4c) ||
+ (dev->pdev->device == 0x5e4f))
+ dev_priv->num_gb_pipes = 1;
+ } else {
+ /* R3xx */
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
+ dev->pdev->device != 0x4144) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350 &&
+ dev->pdev->device != 0x4148)) {
+ dev_priv->num_gb_pipes = 2;
+ } else {
+ /* RV3xx/R300 AD/R350 AH */
+ dev_priv->num_gb_pipes = 1;
+ }
+ }
+ DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes);
+
+ gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/);
+
+ switch (dev_priv->num_gb_pipes) {
+ case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break;
+ case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break;
+ case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break;
+ default:
+ case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break;
+ }
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
+ RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4));
+ RADEON_WRITE(R300_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1));
+ }
+ RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config);
+ radeon_do_wait_for_idle(dev_priv);
+ RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG);
+ RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) |
+ R300_DC_AUTOFLUSH_ENABLE |
+ R300_DC_DC_DISABLE_IGNORE_PE));
+
+
+}
+
+/* ================================================================
+ * CP control, initialization
+ */
+
+/* Load the microcode for the CP */
+static int radeon_cp_init_microcode(drm_radeon_private_t *dev_priv)
+{
+ struct platform_device *pdev;
+ const char *fw_name = NULL;
+ int err;
+
+ DRM_DEBUG("\n");
+
+ pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
+ err = IS_ERR(pdev);
+ if (err) {
+ printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
+ return -EINVAL;
+ }
+
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) {
+ DRM_INFO("Loading R100 Microcode\n");
+ fw_name = FIRMWARE_R100;
+ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) {
+ DRM_INFO("Loading R200 Microcode\n");
+ fw_name = FIRMWARE_R200;
+ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
+ DRM_INFO("Loading R300 Microcode\n");
+ fw_name = FIRMWARE_R300;
+ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R423) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
+ DRM_INFO("Loading R400 Microcode\n");
+ fw_name = FIRMWARE_R420;
+ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
+ DRM_INFO("Loading RS690/RS740 Microcode\n");
+ fw_name = FIRMWARE_RS690;
+ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) {
+ DRM_INFO("Loading RS600 Microcode\n");
+ fw_name = FIRMWARE_RS600;
+ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) {
+ DRM_INFO("Loading R500 Microcode\n");
+ fw_name = FIRMWARE_R520;
+ }
+
+ err = request_firmware(&dev_priv->me_fw, fw_name, &pdev->dev);
+ platform_device_unregister(pdev);
+ if (err) {
+ printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
+ fw_name);
+ } else if (dev_priv->me_fw->size % 8) {
+ printk(KERN_ERR
+ "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
+ dev_priv->me_fw->size, fw_name);
+ err = -EINVAL;
+ release_firmware(dev_priv->me_fw);
+ dev_priv->me_fw = NULL;
+ }
+ return err;
+}
+
+static void radeon_cp_load_microcode(drm_radeon_private_t *dev_priv)
+{
+ const __be32 *fw_data;
+ int i, size;
+
+ radeon_do_wait_for_idle(dev_priv);
+
+ if (dev_priv->me_fw) {
+ size = dev_priv->me_fw->size / 4;
+ fw_data = (const __be32 *)&dev_priv->me_fw->data[0];
+ RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0);
+ for (i = 0; i < size; i += 2) {
+ RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
+ be32_to_cpup(&fw_data[i]));
+ RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
+ be32_to_cpup(&fw_data[i + 1]));
+ }
+ }
+}
+
+/* Flush any pending commands to the CP. This should only be used just
+ * prior to a wait for idle, as it informs the engine that the command
+ * stream is ending.
+ */
+static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv)
+{
+ DRM_DEBUG("\n");
+#if 0
+ u32 tmp;
+
+ tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31);
+ RADEON_WRITE(RADEON_CP_RB_WPTR, tmp);
+#endif
+}
+
+/* Wait for the CP to go idle.
+ */
+int radeon_do_cp_idle(drm_radeon_private_t * dev_priv)
+{
+ RING_LOCALS;
+ DRM_DEBUG("\n");
+
+ BEGIN_RING(6);
+
+ RADEON_PURGE_CACHE();
+ RADEON_PURGE_ZCACHE();
+ RADEON_WAIT_UNTIL_IDLE();
+
+ ADVANCE_RING();
+ COMMIT_RING();
+
+ return radeon_do_wait_for_idle(dev_priv);
+}
+
+/* Start the Command Processor.
+ */
+static void radeon_do_cp_start(drm_radeon_private_t * dev_priv)
+{
+ RING_LOCALS;
+ DRM_DEBUG("\n");
+
+ radeon_do_wait_for_idle(dev_priv);
+
+ RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode);
+
+ dev_priv->cp_running = 1;
+
+ /* on r420, any DMA from CP to system memory while 2D is active
+ * can cause a hang. workaround is to queue a CP RESYNC token
+ */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) {
+ BEGIN_RING(3);
+ OUT_RING(CP_PACKET0(R300_CP_RESYNC_ADDR, 1));
+ OUT_RING(5); /* scratch reg 5 */
+ OUT_RING(0xdeadbeef);
+ ADVANCE_RING();
+ COMMIT_RING();
+ }
+
+ BEGIN_RING(8);
+ /* isync can only be written through cp on r5xx write it here */
+ OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0));
+ OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D |
+ RADEON_ISYNC_ANY3D_IDLE2D |
+ RADEON_ISYNC_WAIT_IDLEGUI |
+ RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+ RADEON_PURGE_CACHE();
+ RADEON_PURGE_ZCACHE();
+ RADEON_WAIT_UNTIL_IDLE();
+ ADVANCE_RING();
+ COMMIT_RING();
+
+ dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
+}
+
+/* Reset the Command Processor. This will not flush any pending
+ * commands, so you must wait for the CP command stream to complete
+ * before calling this routine.
+ */
+static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv)
+{
+ u32 cur_read_ptr;
+ DRM_DEBUG("\n");
+
+ cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
+ RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
+ SET_RING_HEAD(dev_priv, cur_read_ptr);
+ dev_priv->ring.tail = cur_read_ptr;
+}
+
+/* Stop the Command Processor. This will not flush any pending
+ * commands, so you must flush the command stream and wait for the CP
+ * to go idle before calling this routine.
+ */
+static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv)
+{
+ RING_LOCALS;
+ DRM_DEBUG("\n");
+
+ /* finish the pending CP_RESYNC token */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) {
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ OUT_RING(R300_RB3D_DC_FINISH);
+ ADVANCE_RING();
+ COMMIT_RING();
+ radeon_do_wait_for_idle(dev_priv);
+ }
+
+ RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS);
+
+ dev_priv->cp_running = 0;
+}
+
+/* Reset the engine. This will stop the CP if it is running.
+ */
+static int radeon_do_engine_reset(struct drm_device * dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset;
+ DRM_DEBUG("\n");
+
+ radeon_do_pixcache_flush(dev_priv);
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
+ /* may need something similar for newer chips */
+ clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
+ mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
+
+ RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
+ RADEON_FORCEON_MCLKA |
+ RADEON_FORCEON_MCLKB |
+ RADEON_FORCEON_YCLKA |
+ RADEON_FORCEON_YCLKB |
+ RADEON_FORCEON_MC |
+ RADEON_FORCEON_AIC));
+ }
+
+ rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
+
+ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
+ RADEON_SOFT_RESET_CP |
+ RADEON_SOFT_RESET_HI |
+ RADEON_SOFT_RESET_SE |
+ RADEON_SOFT_RESET_RE |
+ RADEON_SOFT_RESET_PP |
+ RADEON_SOFT_RESET_E2 |
+ RADEON_SOFT_RESET_RB));
+ RADEON_READ(RADEON_RBBM_SOFT_RESET);
+ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
+ ~(RADEON_SOFT_RESET_CP |
+ RADEON_SOFT_RESET_HI |
+ RADEON_SOFT_RESET_SE |
+ RADEON_SOFT_RESET_RE |
+ RADEON_SOFT_RESET_PP |
+ RADEON_SOFT_RESET_E2 |
+ RADEON_SOFT_RESET_RB)));
+ RADEON_READ(RADEON_RBBM_SOFT_RESET);
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
+ RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
+ RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
+ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
+ }
+
+ /* setup the raster pipes */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
+ radeon_init_pipes(dev);
+
+ /* Reset the CP ring */
+ radeon_do_cp_reset(dev_priv);
+
+ /* The CP is no longer running after an engine reset */
+ dev_priv->cp_running = 0;
+
+ /* Reset any pending vertex, indirect buffers */
+ radeon_freelist_reset(dev);
+
+ return 0;
+}
+
+static void radeon_cp_init_ring_buffer(struct drm_device * dev,
+ drm_radeon_private_t *dev_priv,
+ struct drm_file *file_priv)
+{
+ struct drm_radeon_master_private *master_priv;
+ u32 ring_start, cur_read_ptr;
+
+ /* Initialize the memory controller. With new memory map, the fb location
+ * is not changed, it should have been properly initialized already. Part
+ * of the problem is that the code below is bogus, assuming the GART is
+ * always appended to the fb which is not necessarily the case
+ */
+ if (!dev_priv->new_memmap)
+ radeon_write_fb_location(dev_priv,
+ ((dev_priv->gart_vm_start - 1) & 0xffff0000)
+ | (dev_priv->fb_location >> 16));
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ radeon_write_agp_base(dev_priv, dev->agp->base);
+
+ radeon_write_agp_location(dev_priv,
+ (((dev_priv->gart_vm_start - 1 +
+ dev_priv->gart_size) & 0xffff0000) |
+ (dev_priv->gart_vm_start >> 16)));
+
+ ring_start = (dev_priv->cp_ring->offset
+ - dev->agp->base
+ + dev_priv->gart_vm_start);
+ } else
+#endif
+ ring_start = (dev_priv->cp_ring->offset
+ - (unsigned long)dev->sg->virtual
+ + dev_priv->gart_vm_start);
+
+ RADEON_WRITE(RADEON_CP_RB_BASE, ring_start);
+
+ /* Set the write pointer delay */
+ RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0);
+
+ /* Initialize the ring buffer's read and write pointers */
+ cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
+ RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
+ SET_RING_HEAD(dev_priv, cur_read_ptr);
+ dev_priv->ring.tail = cur_read_ptr;
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
+ dev_priv->ring_rptr->offset
+ - dev->agp->base + dev_priv->gart_vm_start);
+ } else
+#endif
+ {
+ RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
+ dev_priv->ring_rptr->offset
+ - ((unsigned long) dev->sg->virtual)
+ + dev_priv->gart_vm_start);
+ }
+
+ /* Set ring buffer size */
+#ifdef __BIG_ENDIAN
+ RADEON_WRITE(RADEON_CP_RB_CNTL,
+ RADEON_BUF_SWAP_32BIT |
+ (dev_priv->ring.fetch_size_l2ow << 18) |
+ (dev_priv->ring.rptr_update_l2qw << 8) |
+ dev_priv->ring.size_l2qw);
+#else
+ RADEON_WRITE(RADEON_CP_RB_CNTL,
+ (dev_priv->ring.fetch_size_l2ow << 18) |
+ (dev_priv->ring.rptr_update_l2qw << 8) |
+ dev_priv->ring.size_l2qw);
+#endif
+
+
+ /* Initialize the scratch register pointer. This will cause
+ * the scratch register values to be written out to memory
+ * whenever they are updated.
+ *
+ * We simply put this behind the ring read pointer, this works
+ * with PCI GART as well as (whatever kind of) AGP GART
+ */
+ RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR)
+ + RADEON_SCRATCH_REG_OFFSET);
+
+ RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
+
+ radeon_enable_bm(dev_priv);
+
+ radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(0), 0);
+ RADEON_WRITE(RADEON_LAST_FRAME_REG, 0);
+
+ radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0);
+ RADEON_WRITE(RADEON_LAST_DISPATCH_REG, 0);
+
+ radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(2), 0);
+ RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0);
+
+ /* reset sarea copies of these */
+ master_priv = file_priv->master->driver_priv;
+ if (master_priv->sarea_priv) {
+ master_priv->sarea_priv->last_frame = 0;
+ master_priv->sarea_priv->last_dispatch = 0;
+ master_priv->sarea_priv->last_clear = 0;
+ }
+
+ radeon_do_wait_for_idle(dev_priv);
+
+ /* Sync everything up */
+ RADEON_WRITE(RADEON_ISYNC_CNTL,
+ (RADEON_ISYNC_ANY2D_IDLE3D |
+ RADEON_ISYNC_ANY3D_IDLE2D |
+ RADEON_ISYNC_WAIT_IDLEGUI |
+ RADEON_ISYNC_CPSCRATCH_IDLEGUI));
+
+}
+
+static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
+{
+ u32 tmp;
+
+ /* Start with assuming that writeback doesn't work */
+ dev_priv->writeback_works = 0;
+
+ /* Writeback doesn't seem to work everywhere, test it here and possibly
+ * enable it if it appears to work
+ */
+ radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0);
+
+ RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef);
+
+ for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
+ u32 val;
+
+ val = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1));
+ if (val == 0xdeadbeef)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (tmp < dev_priv->usec_timeout) {
+ dev_priv->writeback_works = 1;
+ DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
+ } else {
+ dev_priv->writeback_works = 0;
+ DRM_INFO("writeback test failed\n");
+ }
+ if (radeon_no_wb == 1) {
+ dev_priv->writeback_works = 0;
+ DRM_INFO("writeback forced off\n");
+ }
+
+ if (!dev_priv->writeback_works) {
+ /* Disable writeback to avoid unnecessary bus master transfer */
+ RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) |
+ RADEON_RB_NO_UPDATE);
+ RADEON_WRITE(RADEON_SCRATCH_UMSK, 0);
+ }
+}
+
+/* Enable or disable IGP GART on the chip */
+static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
+{
+ u32 temp;
+
+ if (on) {
+ DRM_DEBUG("programming igp gart %08X %08lX %08X\n",
+ dev_priv->gart_vm_start,
+ (long)dev_priv->gart_info.bus_addr,
+ dev_priv->gart_size);
+
+ temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL);
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
+ IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN |
+ RS690_BLOCK_GFX_D3_EN));
+ else
+ IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
+
+ IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
+ RS480_VA_SIZE_32MB));
+
+ temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID);
+ IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN |
+ RS480_TLB_ENABLE |
+ RS480_GTW_LAC_EN |
+ RS480_1LEVEL_GART));
+
+ temp = dev_priv->gart_info.bus_addr & 0xfffff000;
+ temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4;
+ IGP_WRITE_MCIND(RS480_GART_BASE, temp);
+
+ temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL);
+ IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) |
+ RS480_REQ_TYPE_SNOOP_DIS));
+
+ radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start);
+
+ dev_priv->gart_size = 32*1024*1024;
+ temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) &
+ 0xffff0000) | (dev_priv->gart_vm_start >> 16));
+
+ radeon_write_agp_location(dev_priv, temp);
+
+ temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE);
+ IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
+ RS480_VA_SIZE_32MB));
+
+ do {
+ temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
+ if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
+ break;
+ DRM_UDELAY(1);
+ } while (1);
+
+ IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL,
+ RS480_GART_CACHE_INVALIDATE);
+
+ do {
+ temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
+ if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
+ break;
+ DRM_UDELAY(1);
+ } while (1);
+
+ IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0);
+ } else {
+ IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
+ }
+}
+
+/* Enable or disable IGP GART on the chip */
+static void rs600_set_igpgart(drm_radeon_private_t *dev_priv, int on)
+{
+ u32 temp;
+ int i;
+
+ if (on) {
+ DRM_DEBUG("programming igp gart %08X %08lX %08X\n",
+ dev_priv->gart_vm_start,
+ (long)dev_priv->gart_info.bus_addr,
+ dev_priv->gart_size);
+
+ IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (RS600_EFFECTIVE_L2_CACHE_SIZE(6) |
+ RS600_EFFECTIVE_L2_QUEUE_SIZE(6)));
+
+ for (i = 0; i < 19; i++)
+ IGP_WRITE_MCIND(RS600_MC_PT0_CLIENT0_CNTL + i,
+ (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE |
+ RS600_SYSTEM_ACCESS_MODE_IN_SYS |
+ RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH |
+ RS600_EFFECTIVE_L1_CACHE_SIZE(3) |
+ RS600_ENABLE_FRAGMENT_PROCESSING |
+ RS600_EFFECTIVE_L1_QUEUE_SIZE(3)));
+
+ IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL, (RS600_ENABLE_PAGE_TABLE |
+ RS600_PAGE_TABLE_TYPE_FLAT));
+
+ /* disable all other contexts */
+ for (i = 1; i < 8; i++)
+ IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL + i, 0);
+
+ /* setup the page table aperture */
+ IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
+ dev_priv->gart_info.bus_addr);
+ IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR,
+ dev_priv->gart_vm_start);
+ IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR,
+ (dev_priv->gart_vm_start + dev_priv->gart_size - 1));
+ IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
+
+ /* setup the system aperture */
+ IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR,
+ dev_priv->gart_vm_start);
+ IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR,
+ (dev_priv->gart_vm_start + dev_priv->gart_size - 1));
+
+ /* enable page tables */
+ temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
+ IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (temp | RS600_ENABLE_PT));
+
+ temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1);
+ IGP_WRITE_MCIND(RS600_MC_CNTL1, (temp | RS600_ENABLE_PAGE_TABLES));
+
+ /* invalidate the cache */
+ temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
+
+ temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
+ IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp);
+ temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
+
+ temp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE;
+ IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp);
+ temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
+
+ temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
+ IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp);
+ temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
+
+ } else {
+ IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, 0);
+ temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1);
+ temp &= ~RS600_ENABLE_PAGE_TABLES;
+ IGP_WRITE_MCIND(RS600_MC_CNTL1, temp);
+ }
+}
+
+static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
+{
+ u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
+ if (on) {
+
+ DRM_DEBUG("programming pcie %08X %08lX %08X\n",
+ dev_priv->gart_vm_start,
+ (long)dev_priv->gart_info.bus_addr,
+ dev_priv->gart_size);
+ RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO,
+ dev_priv->gart_vm_start);
+ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE,
+ dev_priv->gart_info.bus_addr);
+ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO,
+ dev_priv->gart_vm_start);
+ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO,
+ dev_priv->gart_vm_start +
+ dev_priv->gart_size - 1);
+
+ radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */
+
+ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
+ RADEON_PCIE_TX_GART_EN);
+ } else {
+ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
+ tmp & ~RADEON_PCIE_TX_GART_EN);
+ }
+}
+
+/* Enable or disable PCI GART on the chip */
+static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
+{
+ u32 tmp;
+
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) ||
+ (dev_priv->flags & RADEON_IS_IGPGART)) {
+ radeon_set_igpgart(dev_priv, on);
+ return;
+ }
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) {
+ rs600_set_igpgart(dev_priv, on);
+ return;
+ }
+
+ if (dev_priv->flags & RADEON_IS_PCIE) {
+ radeon_set_pciegart(dev_priv, on);
+ return;
+ }
+
+ tmp = RADEON_READ(RADEON_AIC_CNTL);
+
+ if (on) {
+ RADEON_WRITE(RADEON_AIC_CNTL,
+ tmp | RADEON_PCIGART_TRANSLATE_EN);
+
+ /* set PCI GART page-table base address
+ */
+ RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr);
+
+ /* set address range for PCI address translate
+ */
+ RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start);
+ RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start
+ + dev_priv->gart_size - 1);
+
+ /* Turn off AGP aperture -- is this required for PCI GART?
+ */
+ radeon_write_agp_location(dev_priv, 0xffffffc0);
+ RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */
+ } else {
+ RADEON_WRITE(RADEON_AIC_CNTL,
+ tmp & ~RADEON_PCIGART_TRANSLATE_EN);
+ }
+}
+
+static int radeon_setup_pcigart_surface(drm_radeon_private_t *dev_priv)
+{
+ struct drm_ati_pcigart_info *gart_info = &dev_priv->gart_info;
+ struct radeon_virt_surface *vp;
+ int i;
+
+ for (i = 0; i < RADEON_MAX_SURFACES * 2; i++) {
+ if (!dev_priv->virt_surfaces[i].file_priv ||
+ dev_priv->virt_surfaces[i].file_priv == PCIGART_FILE_PRIV)
+ break;
+ }
+ if (i >= 2 * RADEON_MAX_SURFACES)
+ return -ENOMEM;
+ vp = &dev_priv->virt_surfaces[i];
+
+ for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+ struct radeon_surface *sp = &dev_priv->surfaces[i];
+ if (sp->refcount)
+ continue;
+
+ vp->surface_index = i;
+ vp->lower = gart_info->bus_addr;
+ vp->upper = vp->lower + gart_info->table_size;
+ vp->flags = 0;
+ vp->file_priv = PCIGART_FILE_PRIV;
+
+ sp->refcount = 1;
+ sp->lower = vp->lower;
+ sp->upper = vp->upper;
+ sp->flags = 0;
+
+ RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, sp->flags);
+ RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * i, sp->lower);
+ RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * i, sp->upper);
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
+ struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+
+ DRM_DEBUG("\n");
+
+ /* if we require new memory map but we don't have it fail */
+ if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
+ DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
+ radeon_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+
+ if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
+ DRM_DEBUG("Forcing AGP card to PCI mode\n");
+ dev_priv->flags &= ~RADEON_IS_AGP;
+ } else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
+ && !init->is_pci) {
+ DRM_DEBUG("Restoring AGP flag\n");
+ dev_priv->flags |= RADEON_IS_AGP;
+ }
+
+ if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
+ DRM_ERROR("PCI GART memory not allocated!\n");
+ radeon_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+
+ dev_priv->usec_timeout = init->usec_timeout;
+ if (dev_priv->usec_timeout < 1 ||
+ dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
+ DRM_DEBUG("TIMEOUT problem!\n");
+ radeon_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+
+ /* Enable vblank on CRTC1 for older X servers
+ */
+ dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
+
+ switch(init->func) {
+ case RADEON_INIT_R200_CP:
+ dev_priv->microcode_version = UCODE_R200;
+ break;
+ case RADEON_INIT_R300_CP:
+ dev_priv->microcode_version = UCODE_R300;
+ break;
+ default:
+ dev_priv->microcode_version = UCODE_R100;
+ }
+
+ dev_priv->do_boxes = 0;
+ dev_priv->cp_mode = init->cp_mode;
+
+ /* We don't support anything other than bus-mastering ring mode,
+ * but the ring can be in either AGP or PCI space for the ring
+ * read pointer.
+ */
+ if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
+ (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
+ DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
+ radeon_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+
+ switch (init->fb_bpp) {
+ case 16:
+ dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
+ break;
+ case 32:
+ default:
+ dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
+ break;
+ }
+ dev_priv->front_offset = init->front_offset;
+ dev_priv->front_pitch = init->front_pitch;
+ dev_priv->back_offset = init->back_offset;
+ dev_priv->back_pitch = init->back_pitch;
+
+ switch (init->depth_bpp) {
+ case 16:
+ dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
+ break;
+ case 32:
+ default:
+ dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
+ break;
+ }
+ dev_priv->depth_offset = init->depth_offset;
+ dev_priv->depth_pitch = init->depth_pitch;
+
+ /* Hardware state for depth clears. Remove this if/when we no
+ * longer clear the depth buffer with a 3D rectangle. Hard-code
+ * all values to prevent unwanted 3D state from slipping through
+ * and screwing with the clear operation.
+ */
+ dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
+ (dev_priv->color_fmt << 10) |
+ (dev_priv->microcode_version ==
+ UCODE_R100 ? RADEON_ZBLOCK16 : 0));
+
+ dev_priv->depth_clear.rb3d_zstencilcntl =
+ (dev_priv->depth_fmt |
+ RADEON_Z_TEST_ALWAYS |
+ RADEON_STENCIL_TEST_ALWAYS |
+ RADEON_STENCIL_S_FAIL_REPLACE |
+ RADEON_STENCIL_ZPASS_REPLACE |
+ RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE);
+
+ dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW |
+ RADEON_BFACE_SOLID |
+ RADEON_FFACE_SOLID |
+ RADEON_FLAT_SHADE_VTX_LAST |
+ RADEON_DIFFUSE_SHADE_FLAT |
+ RADEON_ALPHA_SHADE_FLAT |
+ RADEON_SPECULAR_SHADE_FLAT |
+ RADEON_FOG_SHADE_FLAT |
+ RADEON_VTX_PIX_CENTER_OGL |
+ RADEON_ROUND_MODE_TRUNC |
+ RADEON_ROUND_PREC_8TH_PIX);
+
+
+ dev_priv->ring_offset = init->ring_offset;
+ dev_priv->ring_rptr_offset = init->ring_rptr_offset;
+ dev_priv->buffers_offset = init->buffers_offset;
+ dev_priv->gart_textures_offset = init->gart_textures_offset;
+
+ master_priv->sarea = drm_getsarea(dev);
+ if (!master_priv->sarea) {
+ DRM_ERROR("could not find sarea!\n");
+ radeon_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+
+ dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
+ if (!dev_priv->cp_ring) {
+ DRM_ERROR("could not find cp ring region!\n");
+ radeon_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+ dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
+ if (!dev_priv->ring_rptr) {
+ DRM_ERROR("could not find ring read pointer!\n");
+ radeon_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+ dev->agp_buffer_token = init->buffers_offset;
+ dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
+ if (!dev->agp_buffer_map) {
+ DRM_ERROR("could not find dma buffer region!\n");
+ radeon_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+
+ if (init->gart_textures_offset) {
+ dev_priv->gart_textures =
+ drm_core_findmap(dev, init->gart_textures_offset);
+ if (!dev_priv->gart_textures) {
+ DRM_ERROR("could not find GART texture region!\n");
+ radeon_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+ }
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ drm_core_ioremap_wc(dev_priv->cp_ring, dev);
+ drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
+ drm_core_ioremap_wc(dev->agp_buffer_map, dev);
+ if (!dev_priv->cp_ring->handle ||
+ !dev_priv->ring_rptr->handle ||
+ !dev->agp_buffer_map->handle) {
+ DRM_ERROR("could not find ioremap agp regions!\n");
+ radeon_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+ } else
+#endif
+ {
+ dev_priv->cp_ring->handle =
+ (void *)(unsigned long)dev_priv->cp_ring->offset;
+ dev_priv->ring_rptr->handle =
+ (void *)(unsigned long)dev_priv->ring_rptr->offset;
+ dev->agp_buffer_map->handle =
+ (void *)(unsigned long)dev->agp_buffer_map->offset;
+
+ DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
+ dev_priv->cp_ring->handle);
+ DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
+ dev_priv->ring_rptr->handle);
+ DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
+ dev->agp_buffer_map->handle);
+ }
+
+ dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
+ dev_priv->fb_size =
+ ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
+ - dev_priv->fb_location;
+
+ dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
+ ((dev_priv->front_offset
+ + dev_priv->fb_location) >> 10));
+
+ dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
+ ((dev_priv->back_offset
+ + dev_priv->fb_location) >> 10));
+
+ dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
+ ((dev_priv->depth_offset
+ + dev_priv->fb_location) >> 10));
+
+ dev_priv->gart_size = init->gart_size;
+
+ /* New let's set the memory map ... */
+ if (dev_priv->new_memmap) {
+ u32 base = 0;
+
+ DRM_INFO("Setting GART location based on new memory map\n");
+
+ /* If using AGP, try to locate the AGP aperture at the same
+ * location in the card and on the bus, though we have to
+ * align it down.
+ */
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ base = dev->agp->base;
+ /* Check if valid */
+ if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
+ base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
+ DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
+ dev->agp->base);
+ base = 0;
+ }
+ }
+#endif
+ /* If not or if AGP is at 0 (Macs), try to put it elsewhere */
+ if (base == 0) {
+ base = dev_priv->fb_location + dev_priv->fb_size;
+ if (base < dev_priv->fb_location ||
+ ((base + dev_priv->gart_size) & 0xfffffffful) < base)
+ base = dev_priv->fb_location
+ - dev_priv->gart_size;
+ }
+ dev_priv->gart_vm_start = base & 0xffc00000u;
+ if (dev_priv->gart_vm_start != base)
+ DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
+ base, dev_priv->gart_vm_start);
+ } else {
+ DRM_INFO("Setting GART location based on old memory map\n");
+ dev_priv->gart_vm_start = dev_priv->fb_location +
+ RADEON_READ(RADEON_CONFIG_APER_SIZE);
+ }
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP)
+ dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
+ - dev->agp->base
+ + dev_priv->gart_vm_start);
+ else
+#endif
+ dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
+ - (unsigned long)dev->sg->virtual
+ + dev_priv->gart_vm_start);
+
+ DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
+ DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start);
+ DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n",
+ dev_priv->gart_buffers_offset);
+
+ dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
+ dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+ + init->ring_size / sizeof(u32));
+ dev_priv->ring.size = init->ring_size;
+ dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+
+ dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
+ dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
+
+ dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
+ dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
+ dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
+
+ dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ /* Turn off PCI GART */
+ radeon_set_pcigart(dev_priv, 0);
+ } else
+#endif
+ {
+ u32 sctrl;
+ int ret;
+
+ dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
+ /* if we have an offset set from userspace */
+ if (dev_priv->pcigart_offset_set) {
+ dev_priv->gart_info.bus_addr =
+ (resource_size_t)dev_priv->pcigart_offset + dev_priv->fb_location;
+ dev_priv->gart_info.mapping.offset =
+ dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
+ dev_priv->gart_info.mapping.size =
+ dev_priv->gart_info.table_size;
+
+ drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
+ dev_priv->gart_info.addr =
+ dev_priv->gart_info.mapping.handle;
+
+ if (dev_priv->flags & RADEON_IS_PCIE)
+ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
+ else
+ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
+ dev_priv->gart_info.gart_table_location =
+ DRM_ATI_GART_FB;
+
+ DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
+ dev_priv->gart_info.addr,
+ dev_priv->pcigart_offset);
+ } else {
+ if (dev_priv->flags & RADEON_IS_IGPGART)
+ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
+ else
+ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
+ dev_priv->gart_info.gart_table_location =
+ DRM_ATI_GART_MAIN;
+ dev_priv->gart_info.addr = NULL;
+ dev_priv->gart_info.bus_addr = 0;
+ if (dev_priv->flags & RADEON_IS_PCIE) {
+ DRM_ERROR
+ ("Cannot use PCI Express without GART in FB memory\n");
+ radeon_do_cleanup_cp(dev);
+ return -EINVAL;
+ }
+ }
+
+ sctrl = RADEON_READ(RADEON_SURFACE_CNTL);
+ RADEON_WRITE(RADEON_SURFACE_CNTL, 0);
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+ ret = r600_page_table_init(dev);
+ else
+ ret = drm_ati_pcigart_init(dev, &dev_priv->gart_info);
+ RADEON_WRITE(RADEON_SURFACE_CNTL, sctrl);
+
+ if (!ret) {
+ DRM_ERROR("failed to init PCI GART!\n");
+ radeon_do_cleanup_cp(dev);
+ return -ENOMEM;
+ }
+
+ ret = radeon_setup_pcigart_surface(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to setup GART surface!\n");
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+ r600_page_table_cleanup(dev, &dev_priv->gart_info);
+ else
+ drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info);
+ radeon_do_cleanup_cp(dev);
+ return ret;
+ }
+
+ /* Turn on PCI GART */
+ radeon_set_pcigart(dev_priv, 1);
+ }
+
+ if (!dev_priv->me_fw) {
+ int err = radeon_cp_init_microcode(dev_priv);
+ if (err) {
+ DRM_ERROR("Failed to load firmware!\n");
+ radeon_do_cleanup_cp(dev);
+ return err;
+ }
+ }
+ radeon_cp_load_microcode(dev_priv);
+ radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
+
+ dev_priv->last_buf = 0;
+
+ radeon_do_engine_reset(dev);
+ radeon_test_writeback(dev_priv);
+
+ return 0;
+}
+
+static int radeon_do_cleanup_cp(struct drm_device * dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ DRM_DEBUG("\n");
+
+ /* Make sure interrupts are disabled here because the uninstall ioctl
+ * may not have been called from userspace and after dev_private
+ * is freed, it's too late.
+ */
+ if (dev->irq_enabled)
+ drm_irq_uninstall(dev);
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ if (dev_priv->cp_ring != NULL) {
+ drm_core_ioremapfree(dev_priv->cp_ring, dev);
+ dev_priv->cp_ring = NULL;
+ }
+ if (dev_priv->ring_rptr != NULL) {
+ drm_core_ioremapfree(dev_priv->ring_rptr, dev);
+ dev_priv->ring_rptr = NULL;
+ }
+ if (dev->agp_buffer_map != NULL) {
+ drm_core_ioremapfree(dev->agp_buffer_map, dev);
+ dev->agp_buffer_map = NULL;
+ }
+ } else
+#endif
+ {
+
+ if (dev_priv->gart_info.bus_addr) {
+ /* Turn off PCI GART */
+ radeon_set_pcigart(dev_priv, 0);
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+ r600_page_table_cleanup(dev, &dev_priv->gart_info);
+ else {
+ if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
+ DRM_ERROR("failed to cleanup PCI GART!\n");
+ }
+ }
+
+ if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
+ {
+ drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
+ dev_priv->gart_info.addr = NULL;
+ }
+ }
+ /* only clear to the start of flags */
+ memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
+
+ return 0;
+}
+
+/* This code will reinit the Radeon CP hardware after a resume from disc.
+ * AFAIK, it would be very difficult to pickle the state at suspend time, so
+ * here we make sure that all Radeon hardware initialisation is re-done without
+ * affecting running applications.
+ *
+ * Charl P. Botha <http://cpbotha.net>
+ */
+static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ if (!dev_priv) {
+ DRM_ERROR("Called with no initialization\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("Starting radeon_do_resume_cp()\n");
+
+#if __OS_HAS_AGP
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ /* Turn off PCI GART */
+ radeon_set_pcigart(dev_priv, 0);
+ } else
+#endif
+ {
+ /* Turn on PCI GART */
+ radeon_set_pcigart(dev_priv, 1);
+ }
+
+ radeon_cp_load_microcode(dev_priv);
+ radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
+
+ dev_priv->have_z_offset = 0;
+ radeon_do_engine_reset(dev);
+ radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
+
+ DRM_DEBUG("radeon_do_resume_cp() complete\n");
+
+ return 0;
+}
+
+int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_init_t *init = data;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ if (init->func == RADEON_INIT_R300_CP)
+ r300_init_reg_flags(dev);
+
+ switch (init->func) {
+ case RADEON_INIT_CP:
+ case RADEON_INIT_R200_CP:
+ case RADEON_INIT_R300_CP:
+ return radeon_do_init_cp(dev, init, file_priv);
+ case RADEON_INIT_R600_CP:
+ return r600_do_init_cp(dev, init, file_priv);
+ case RADEON_CLEANUP_CP:
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return r600_do_cleanup_cp(dev);
+ else
+ return radeon_do_cleanup_cp(dev);
+ }
+
+ return -EINVAL;
+}
+
+int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ DRM_DEBUG("\n");
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ if (dev_priv->cp_running) {
+ DRM_DEBUG("while CP running\n");
+ return 0;
+ }
+ if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) {
+ DRM_DEBUG("called with bogus CP mode (%d)\n",
+ dev_priv->cp_mode);
+ return 0;
+ }
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ r600_do_cp_start(dev_priv);
+ else
+ radeon_do_cp_start(dev_priv);
+
+ return 0;
+}
+
+/* Stop the CP. The engine must have been idled before calling this
+ * routine.
+ */
+int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_cp_stop_t *stop = data;
+ int ret;
+ DRM_DEBUG("\n");
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ if (!dev_priv->cp_running)
+ return 0;
+
+ /* Flush any pending CP commands. This ensures any outstanding
+ * commands are exectuted by the engine before we turn it off.
+ */
+ if (stop->flush) {
+ radeon_do_cp_flush(dev_priv);
+ }
+
+ /* If we fail to make the engine go idle, we return an error
+ * code so that the DRM ioctl wrapper can try again.
+ */
+ if (stop->idle) {
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ ret = r600_do_cp_idle(dev_priv);
+ else
+ ret = radeon_do_cp_idle(dev_priv);
+ if (ret)
+ return ret;
+ }
+
+ /* Finally, we can turn off the CP. If the engine isn't idle,
+ * we will get some dropped triangles as they won't be fully
+ * rendered before the CP is shut down.
+ */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ r600_do_cp_stop(dev_priv);
+ else
+ radeon_do_cp_stop(dev_priv);
+
+ /* Reset the engine */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ r600_do_engine_reset(dev);
+ else
+ radeon_do_engine_reset(dev);
+
+ return 0;
+}
+
+void radeon_do_release(struct drm_device * dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ int i, ret;
+
+ if (dev_priv) {
+ if (dev_priv->cp_running) {
+ /* Stop the cp */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
+ while ((ret = r600_do_cp_idle(dev_priv)) != 0) {
+ DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
+#ifdef __linux__
+ schedule();
+#else
+ tsleep(&ret, PZERO, "rdnrel", 1);
+#endif
+ }
+ } else {
+ while ((ret = radeon_do_cp_idle(dev_priv)) != 0) {
+ DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
+#ifdef __linux__
+ schedule();
+#else
+ tsleep(&ret, PZERO, "rdnrel", 1);
+#endif
+ }
+ }
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
+ r600_do_cp_stop(dev_priv);
+ r600_do_engine_reset(dev);
+ } else {
+ radeon_do_cp_stop(dev_priv);
+ radeon_do_engine_reset(dev);
+ }
+ }
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_R600) {
+ /* Disable *all* interrupts */
+ if (dev_priv->mmio) /* remove this after permanent addmaps */
+ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+
+ if (dev_priv->mmio) { /* remove all surfaces */
+ for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+ RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0);
+ RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND +
+ 16 * i, 0);
+ RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND +
+ 16 * i, 0);
+ }
+ }
+ }
+
+ /* Free memory heap structures */
+ radeon_mem_takedown(&(dev_priv->gart_heap));
+ radeon_mem_takedown(&(dev_priv->fb_heap));
+
+ /* deallocate kernel resources */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ r600_do_cleanup_cp(dev);
+ else
+ radeon_do_cleanup_cp(dev);
+ release_firmware(dev_priv->me_fw);
+ dev_priv->me_fw = NULL;
+ release_firmware(dev_priv->pfp_fw);
+ dev_priv->pfp_fw = NULL;
+ }
+}
+
+/* Just reset the CP ring. Called as part of an X Server engine reset.
+ */
+int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ DRM_DEBUG("\n");
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ if (!dev_priv) {
+ DRM_DEBUG("called before init done\n");
+ return -EINVAL;
+ }
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ r600_do_cp_reset(dev_priv);
+ else
+ radeon_do_cp_reset(dev_priv);
+
+ /* The CP is no longer running after an engine reset */
+ dev_priv->cp_running = 0;
+
+ return 0;
+}
+
+int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ DRM_DEBUG("\n");
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return r600_do_cp_idle(dev_priv);
+ else
+ return radeon_do_cp_idle(dev_priv);
+}
+
+/* Added by Charl P. Botha to call radeon_do_resume_cp().
+ */
+int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ DRM_DEBUG("\n");
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return r600_do_resume_cp(dev, file_priv);
+ else
+ return radeon_do_resume_cp(dev, file_priv);
+}
+
+int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ DRM_DEBUG("\n");
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return r600_do_engine_reset(dev);
+ else
+ return radeon_do_engine_reset(dev);
+}
+
+/* ================================================================
+ * Fullscreen mode
+ */
+
+/* KW: Deprecated to say the least:
+ */
+int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ return 0;
+}
+
+/* ================================================================
+ * Freelist management
+ */
+
+/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through
+ * bufs until freelist code is used. Note this hides a problem with
+ * the scratch register * (used to keep track of last buffer
+ * completed) being written to before * the last buffer has actually
+ * completed rendering.
+ *
+ * KW: It's also a good way to find free buffers quickly.
+ *
+ * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't
+ * sleep. However, bugs in older versions of radeon_accel.c mean that
+ * we essentially have to do this, else old clients will break.
+ *
+ * However, it does leave open a potential deadlock where all the
+ * buffers are held by other clients, which can't release them because
+ * they can't get the lock.
+ */
+
+struct drm_buf *radeon_freelist_get(struct drm_device * dev)
+{
+ struct drm_device_dma *dma = dev->dma;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_buf_priv_t *buf_priv;
+ struct drm_buf *buf;
+ int i, t;
+ int start;
+
+ if (++dev_priv->last_buf >= dma->buf_count)
+ dev_priv->last_buf = 0;
+
+ start = dev_priv->last_buf;
+
+ for (t = 0; t < dev_priv->usec_timeout; t++) {
+ u32 done_age = GET_SCRATCH(dev_priv, 1);
+ DRM_DEBUG("done_age = %d\n", done_age);
+ for (i = 0; i < dma->buf_count; i++) {
+ buf = dma->buflist[start];
+ buf_priv = buf->dev_private;
+ if (buf->file_priv == NULL || (buf->pending &&
+ buf_priv->age <=
+ done_age)) {
+ dev_priv->stats.requested_bufs++;
+ buf->pending = 0;
+ return buf;
+ }
+ if (++start >= dma->buf_count)
+ start = 0;
+ }
+
+ if (t) {
+ DRM_UDELAY(1);
+ dev_priv->stats.freelist_loops++;
+ }
+ }
+
+ return NULL;
+}
+
+void radeon_freelist_reset(struct drm_device * dev)
+{
+ struct drm_device_dma *dma = dev->dma;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ dev_priv->last_buf = 0;
+ for (i = 0; i < dma->buf_count; i++) {
+ struct drm_buf *buf = dma->buflist[i];
+ drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+ buf_priv->age = 0;
+ }
+}
+
+/* ================================================================
+ * CP command submission
+ */
+
+int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n)
+{
+ drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
+ int i;
+ u32 last_head = GET_RING_HEAD(dev_priv);
+
+ for (i = 0; i < dev_priv->usec_timeout; i++) {
+ u32 head = GET_RING_HEAD(dev_priv);
+
+ ring->space = (head - ring->tail) * sizeof(u32);
+ if (ring->space <= 0)
+ ring->space += ring->size;
+ if (ring->space > n)
+ return 0;
+
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+ if (head != last_head)
+ i = 0;
+ last_head = head;
+
+ DRM_UDELAY(1);
+ }
+
+ /* FIXME: This return value is ignored in the BEGIN_RING macro! */
+#if RADEON_FIFO_DEBUG
+ radeon_status(dev_priv);
+ DRM_ERROR("failed!\n");
+#endif
+ return -EBUSY;
+}
+
+static int radeon_cp_get_buffers(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_dma * d)
+{
+ int i;
+ struct drm_buf *buf;
+
+ for (i = d->granted_count; i < d->request_count; i++) {
+ buf = radeon_freelist_get(dev);
+ if (!buf)
+ return -EBUSY; /* NOTE: broken client */
+
+ buf->file_priv = file_priv;
+
+ if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
+ sizeof(buf->idx)))
+ return -EFAULT;
+ if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
+ sizeof(buf->total)))
+ return -EFAULT;
+
+ d->granted_count++;
+ }
+ return 0;
+}
+
+int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_device_dma *dma = dev->dma;
+ int ret = 0;
+ struct drm_dma *d = data;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ /* Please don't send us buffers.
+ */
+ if (d->send_count != 0) {
+ DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
+ DRM_CURRENTPID, d->send_count);
+ return -EINVAL;
+ }
+
+ /* We'll send you buffers.
+ */
+ if (d->request_count < 0 || d->request_count > dma->buf_count) {
+ DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
+ DRM_CURRENTPID, d->request_count, dma->buf_count);
+ return -EINVAL;
+ }
+
+ d->granted_count = 0;
+
+ if (d->request_count) {
+ ret = radeon_cp_get_buffers(dev, file_priv, d);
+ }
+
+ return ret;
+}
+
+int radeon_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ drm_radeon_private_t *dev_priv;
+ int ret = 0;
+
+ dev_priv = kzalloc(sizeof(drm_radeon_private_t), GFP_KERNEL);
+ if (dev_priv == NULL)
+ return -ENOMEM;
+
+ dev->dev_private = (void *)dev_priv;
+ dev_priv->flags = flags;
+
+ switch (flags & RADEON_FAMILY_MASK) {
+ case CHIP_R100:
+ case CHIP_RV200:
+ case CHIP_R200:
+ case CHIP_R300:
+ case CHIP_R350:
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
+ case CHIP_RV515:
+ case CHIP_R520:
+ case CHIP_RV570:
+ case CHIP_R580:
+ dev_priv->flags |= RADEON_HAS_HIERZ;
+ break;
+ default:
+ /* all other chips have no hierarchical z buffer */
+ break;
+ }
+
+ pci_set_master(dev->pdev);
+
+ if (drm_pci_device_is_agp(dev))
+ dev_priv->flags |= RADEON_IS_AGP;
+ else if (pci_is_pcie(dev->pdev))
+ dev_priv->flags |= RADEON_IS_PCIE;
+ else
+ dev_priv->flags |= RADEON_IS_PCI;
+
+ ret = drm_addmap(dev, pci_resource_start(dev->pdev, 2),
+ pci_resource_len(dev->pdev, 2), _DRM_REGISTERS,
+ _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio);
+ if (ret != 0)
+ return ret;
+
+ ret = drm_vblank_init(dev, 2);
+ if (ret) {
+ radeon_driver_unload(dev);
+ return ret;
+ }
+
+ DRM_DEBUG("%s card detected\n",
+ ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
+ return ret;
+}
+
+int radeon_master_create(struct drm_device *dev, struct drm_master *master)
+{
+ struct drm_radeon_master_private *master_priv;
+ unsigned long sareapage;
+ int ret;
+
+ master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
+ if (!master_priv)
+ return -ENOMEM;
+
+ /* prebuild the SAREA */
+ sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
+ ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK,
+ &master_priv->sarea);
+ if (ret) {
+ DRM_ERROR("SAREA setup failed\n");
+ kfree(master_priv);
+ return ret;
+ }
+ master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
+ master_priv->sarea_priv->pfCurrentPage = 0;
+
+ master->driver_priv = master_priv;
+ return 0;
+}
+
+void radeon_master_destroy(struct drm_device *dev, struct drm_master *master)
+{
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
+
+ if (!master_priv)
+ return;
+
+ if (master_priv->sarea_priv &&
+ master_priv->sarea_priv->pfCurrentPage != 0)
+ radeon_cp_dispatch_flip(dev, master);
+
+ master_priv->sarea_priv = NULL;
+ if (master_priv->sarea)
+ drm_rmmap_locked(dev, master_priv->sarea);
+
+ kfree(master_priv);
+
+ master->driver_priv = NULL;
+}
+
+/* Create mappings for registers and framebuffer so userland doesn't necessarily
+ * have to find them.
+ */
+int radeon_driver_firstopen(struct drm_device *dev)
+{
+ int ret;
+ drm_local_map_t *map;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
+
+ dev_priv->fb_aper_offset = pci_resource_start(dev->pdev, 0);
+ ret = drm_addmap(dev, dev_priv->fb_aper_offset,
+ pci_resource_len(dev->pdev, 0), _DRM_FRAME_BUFFER,
+ _DRM_WRITE_COMBINING, &map);
+ if (ret != 0)
+ return ret;
+
+ return 0;
+}
+
+int radeon_driver_unload(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ drm_rmmap(dev, dev_priv->mmio);
+
+ kfree(dev_priv);
+
+ dev->dev_private = NULL;
+ return 0;
+}
+
+void radeon_commit_ring(drm_radeon_private_t *dev_priv)
+{
+ int i;
+ u32 *ring;
+ int tail_aligned;
+
+ /* check if the ring is padded out to 16-dword alignment */
+
+ tail_aligned = dev_priv->ring.tail & (RADEON_RING_ALIGN-1);
+ if (tail_aligned) {
+ int num_p2 = RADEON_RING_ALIGN - tail_aligned;
+
+ ring = dev_priv->ring.start;
+ /* pad with some CP_PACKET2 */
+ for (i = 0; i < num_p2; i++)
+ ring[dev_priv->ring.tail + i] = CP_PACKET2();
+
+ dev_priv->ring.tail += i;
+
+ dev_priv->ring.space -= num_p2 * sizeof(u32);
+ }
+
+ dev_priv->ring.tail &= dev_priv->ring.tail_mask;
+
+ DRM_MEMORYBARRIER();
+ GET_RING_HEAD( dev_priv );
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
+ RADEON_WRITE(R600_CP_RB_WPTR, dev_priv->ring.tail);
+ /* read from PCI bus to ensure correct posting */
+ RADEON_READ(R600_CP_RB_RPTR);
+ } else {
+ RADEON_WRITE(RADEON_CP_RB_WPTR, dev_priv->ring.tail);
+ /* read from PCI bus to ensure correct posting */
+ RADEON_READ(RADEON_CP_RB_RPTR);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
new file mode 100644
index 0000000..fe36f1d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -0,0 +1,811 @@
+/*
+ * Copyright 2008 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jerome Glisse <glisse@freedesktop.org>
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
+{
+ struct drm_device *ddev = p->rdev->ddev;
+ struct radeon_cs_chunk *chunk;
+ unsigned i, j;
+ bool duplicate;
+
+ if (p->chunk_relocs_idx == -1) {
+ return 0;
+ }
+ chunk = &p->chunks[p->chunk_relocs_idx];
+ p->dma_reloc_idx = 0;
+ /* FIXME: we assume that each relocs use 4 dwords */
+ p->nrelocs = chunk->length_dw / 4;
+ p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
+ if (p->relocs_ptr == NULL) {
+ return -ENOMEM;
+ }
+ p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ if (p->relocs == NULL) {
+ return -ENOMEM;
+ }
+ for (i = 0; i < p->nrelocs; i++) {
+ struct drm_radeon_cs_reloc *r;
+
+ duplicate = false;
+ r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
+ for (j = 0; j < i; j++) {
+ if (r->handle == p->relocs[j].handle) {
+ p->relocs_ptr[i] = &p->relocs[j];
+ duplicate = true;
+ break;
+ }
+ }
+ if (duplicate) {
+ p->relocs[i].handle = 0;
+ continue;
+ }
+
+ p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
+ r->handle);
+ if (p->relocs[i].gobj == NULL) {
+ DRM_ERROR("gem object lookup failed 0x%x\n",
+ r->handle);
+ return -ENOENT;
+ }
+ p->relocs_ptr[i] = &p->relocs[i];
+ p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
+ p->relocs[i].lobj.bo = p->relocs[i].robj;
+ p->relocs[i].lobj.written = !!r->write_domain;
+
+ /* the first reloc of an UVD job is the msg and that must be in
+ VRAM, also but everything into VRAM on AGP cards to avoid
+ image corruptions */
+ if (p->ring == R600_RING_TYPE_UVD_INDEX &&
+ (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
+ /* TODO: is this still needed for NI+ ? */
+ p->relocs[i].lobj.domain =
+ RADEON_GEM_DOMAIN_VRAM;
+
+ p->relocs[i].lobj.alt_domain =
+ RADEON_GEM_DOMAIN_VRAM;
+
+ } else {
+ uint32_t domain = r->write_domain ?
+ r->write_domain : r->read_domains;
+
+ p->relocs[i].lobj.domain = domain;
+ if (domain == RADEON_GEM_DOMAIN_VRAM)
+ domain |= RADEON_GEM_DOMAIN_GTT;
+ p->relocs[i].lobj.alt_domain = domain;
+ }
+
+ p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
+ p->relocs[i].handle = r->handle;
+
+ radeon_bo_list_add_object(&p->relocs[i].lobj,
+ &p->validated);
+ }
+ return radeon_bo_list_validate(&p->validated, p->ring);
+}
+
+static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
+{
+ p->priority = priority;
+
+ switch (ring) {
+ default:
+ DRM_ERROR("unknown ring id: %d\n", ring);
+ return -EINVAL;
+ case RADEON_CS_RING_GFX:
+ p->ring = RADEON_RING_TYPE_GFX_INDEX;
+ break;
+ case RADEON_CS_RING_COMPUTE:
+ if (p->rdev->family >= CHIP_TAHITI) {
+ if (p->priority > 0)
+ p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
+ else
+ p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
+ } else
+ p->ring = RADEON_RING_TYPE_GFX_INDEX;
+ break;
+ case RADEON_CS_RING_DMA:
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ if (p->priority > 0)
+ p->ring = R600_RING_TYPE_DMA_INDEX;
+ else
+ p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
+ } else if (p->rdev->family >= CHIP_R600) {
+ p->ring = R600_RING_TYPE_DMA_INDEX;
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case RADEON_CS_RING_UVD:
+ p->ring = R600_RING_TYPE_UVD_INDEX;
+ break;
+ }
+ return 0;
+}
+
+static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
+{
+ int i;
+
+ for (i = 0; i < p->nrelocs; i++) {
+ if (!p->relocs[i].robj)
+ continue;
+
+ radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj);
+ }
+}
+
+/* XXX: note that this is called from the legacy UMS CS ioctl as well */
+int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+{
+ struct drm_radeon_cs *cs = data;
+ uint64_t *chunk_array_ptr;
+ unsigned size, i;
+ u32 ring = RADEON_CS_RING_GFX;
+ s32 priority = 0;
+
+ if (!cs->num_chunks) {
+ return 0;
+ }
+ /* get chunks */
+ INIT_LIST_HEAD(&p->validated);
+ p->idx = 0;
+ p->ib.sa_bo = NULL;
+ p->ib.semaphore = NULL;
+ p->const_ib.sa_bo = NULL;
+ p->const_ib.semaphore = NULL;
+ p->chunk_ib_idx = -1;
+ p->chunk_relocs_idx = -1;
+ p->chunk_flags_idx = -1;
+ p->chunk_const_ib_idx = -1;
+ p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
+ if (p->chunks_array == NULL) {
+ return -ENOMEM;
+ }
+ chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
+ if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
+ sizeof(uint64_t)*cs->num_chunks)) {
+ return -EFAULT;
+ }
+ p->cs_flags = 0;
+ p->nchunks = cs->num_chunks;
+ p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
+ if (p->chunks == NULL) {
+ return -ENOMEM;
+ }
+ for (i = 0; i < p->nchunks; i++) {
+ struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
+ struct drm_radeon_cs_chunk user_chunk;
+ uint32_t __user *cdata;
+
+ chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
+ if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
+ sizeof(struct drm_radeon_cs_chunk))) {
+ return -EFAULT;
+ }
+ p->chunks[i].length_dw = user_chunk.length_dw;
+ p->chunks[i].kdata = NULL;
+ p->chunks[i].chunk_id = user_chunk.chunk_id;
+ p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
+ p->chunk_relocs_idx = i;
+ }
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
+ p->chunk_ib_idx = i;
+ /* zero length IB isn't useful */
+ if (p->chunks[i].length_dw == 0)
+ return -EINVAL;
+ }
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
+ p->chunk_const_ib_idx = i;
+ /* zero length CONST IB isn't useful */
+ if (p->chunks[i].length_dw == 0)
+ return -EINVAL;
+ }
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ p->chunk_flags_idx = i;
+ /* zero length flags aren't useful */
+ if (p->chunks[i].length_dw == 0)
+ return -EINVAL;
+ }
+
+ cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
+ if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
+ (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
+ size = p->chunks[i].length_dw * sizeof(uint32_t);
+ p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
+ if (p->chunks[i].kdata == NULL) {
+ return -ENOMEM;
+ }
+ if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
+ p->chunks[i].user_ptr, size)) {
+ return -EFAULT;
+ }
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ p->cs_flags = p->chunks[i].kdata[0];
+ if (p->chunks[i].length_dw > 1)
+ ring = p->chunks[i].kdata[1];
+ if (p->chunks[i].length_dw > 2)
+ priority = (s32)p->chunks[i].kdata[2];
+ }
+ }
+ }
+
+ /* these are KMS only */
+ if (p->rdev) {
+ if ((p->cs_flags & RADEON_CS_USE_VM) &&
+ !p->rdev->vm_manager.enabled) {
+ DRM_ERROR("VM not active on asic!\n");
+ return -EINVAL;
+ }
+
+ if (radeon_cs_get_ring(p, ring, priority))
+ return -EINVAL;
+
+ /* we only support VM on some SI+ rings */
+ if ((p->rdev->asic->ring[p->ring].cs_parse == NULL) &&
+ ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
+ DRM_ERROR("Ring %d requires VM!\n", p->ring);
+ return -EINVAL;
+ }
+ }
+
+ /* deal with non-vm */
+ if ((p->chunk_ib_idx != -1) &&
+ ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
+ (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
+ if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
+ DRM_ERROR("cs IB too big: %d\n",
+ p->chunks[p->chunk_ib_idx].length_dw);
+ return -EINVAL;
+ }
+ if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
+ p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
+ p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
+ kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
+ kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
+ p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
+ p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
+ return -ENOMEM;
+ }
+ }
+ p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
+ p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
+ p->chunks[p->chunk_ib_idx].last_copied_page = -1;
+ p->chunks[p->chunk_ib_idx].last_page_index =
+ ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser: parser structure holding parsing context.
+ * @error: error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
+{
+ unsigned i;
+
+ if (!error) {
+ ttm_eu_fence_buffer_objects(&parser->validated,
+ parser->ib.fence);
+ } else {
+ ttm_eu_backoff_reservation(&parser->validated);
+ }
+
+ if (parser->relocs != NULL) {
+ for (i = 0; i < parser->nrelocs; i++) {
+ if (parser->relocs[i].gobj)
+ drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
+ }
+ }
+ kfree(parser->track);
+ kfree(parser->relocs);
+ kfree(parser->relocs_ptr);
+ for (i = 0; i < parser->nchunks; i++) {
+ kfree(parser->chunks[i].kdata);
+ if ((parser->rdev->flags & RADEON_IS_AGP)) {
+ kfree(parser->chunks[i].kpage[0]);
+ kfree(parser->chunks[i].kpage[1]);
+ }
+ }
+ kfree(parser->chunks);
+ kfree(parser->chunks_array);
+ radeon_ib_free(parser->rdev, &parser->ib);
+ radeon_ib_free(parser->rdev, &parser->const_ib);
+}
+
+static int radeon_cs_ib_chunk(struct radeon_device *rdev,
+ struct radeon_cs_parser *parser)
+{
+ struct radeon_cs_chunk *ib_chunk;
+ int r;
+
+ if (parser->chunk_ib_idx == -1)
+ return 0;
+
+ if (parser->cs_flags & RADEON_CS_USE_VM)
+ return 0;
+
+ ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ /* Copy the packet into the IB, the parser will read from the
+ * input memory (cached) and write to the IB (which can be
+ * uncached).
+ */
+ r = radeon_ib_get(rdev, parser->ring, &parser->ib,
+ NULL, ib_chunk->length_dw * 4);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
+ }
+ parser->ib.length_dw = ib_chunk->length_dw;
+ r = radeon_cs_parse(rdev, parser->ring, parser);
+ if (r || parser->parser_error) {
+ DRM_ERROR("Invalid command stream !\n");
+ return r;
+ }
+ r = radeon_cs_finish_pages(parser);
+ if (r) {
+ DRM_ERROR("Invalid command stream !\n");
+ return r;
+ }
+ radeon_cs_sync_rings(parser);
+ r = radeon_ib_schedule(rdev, &parser->ib, NULL);
+ if (r) {
+ DRM_ERROR("Failed to schedule IB !\n");
+ }
+ return r;
+}
+
+static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
+ struct radeon_vm *vm)
+{
+ struct radeon_device *rdev = parser->rdev;
+ struct radeon_bo_list *lobj;
+ struct radeon_bo *bo;
+ int r;
+
+ r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
+ if (r) {
+ return r;
+ }
+ list_for_each_entry(lobj, &parser->validated, tv.head) {
+ bo = lobj->bo;
+ r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
+ if (r) {
+ return r;
+ }
+ }
+ return 0;
+}
+
+static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
+ struct radeon_cs_parser *parser)
+{
+ struct radeon_cs_chunk *ib_chunk;
+ struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+ struct radeon_vm *vm = &fpriv->vm;
+ int r;
+
+ if (parser->chunk_ib_idx == -1)
+ return 0;
+ if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
+ return 0;
+
+ if ((rdev->family >= CHIP_TAHITI) &&
+ (parser->chunk_const_ib_idx != -1)) {
+ ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
+ if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+ DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
+ vm, ib_chunk->length_dw * 4);
+ if (r) {
+ DRM_ERROR("Failed to get const ib !\n");
+ return r;
+ }
+ parser->const_ib.is_const_ib = true;
+ parser->const_ib.length_dw = ib_chunk->length_dw;
+ /* Copy the packet into the IB */
+ if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
+ ib_chunk->length_dw * 4)) {
+ return -EFAULT;
+ }
+ r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
+ if (r) {
+ return r;
+ }
+ }
+
+ ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+ DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ r = radeon_ib_get(rdev, parser->ring, &parser->ib,
+ vm, ib_chunk->length_dw * 4);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
+ }
+ parser->ib.length_dw = ib_chunk->length_dw;
+ /* Copy the packet into the IB */
+ if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
+ ib_chunk->length_dw * 4)) {
+ return -EFAULT;
+ }
+ r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
+ if (r) {
+ return r;
+ }
+
+ mutex_lock(&rdev->vm_manager.lock);
+ mutex_lock(&vm->mutex);
+ r = radeon_vm_alloc_pt(rdev, vm);
+ if (r) {
+ goto out;
+ }
+ r = radeon_bo_vm_update_pte(parser, vm);
+ if (r) {
+ goto out;
+ }
+ radeon_cs_sync_rings(parser);
+ radeon_ib_sync_to(&parser->ib, vm->fence);
+ radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id(
+ rdev, vm, parser->ring));
+
+ if ((rdev->family >= CHIP_TAHITI) &&
+ (parser->chunk_const_ib_idx != -1)) {
+ r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
+ } else {
+ r = radeon_ib_schedule(rdev, &parser->ib, NULL);
+ }
+
+ if (!r) {
+ radeon_vm_fence(rdev, vm, parser->ib.fence);
+ }
+
+out:
+ radeon_vm_add_to_lru(rdev, vm);
+ mutex_unlock(&vm->mutex);
+ mutex_unlock(&rdev->vm_manager.lock);
+ return r;
+}
+
+static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
+{
+ if (r == -EDEADLK) {
+ r = radeon_gpu_reset(rdev);
+ if (!r)
+ r = -EAGAIN;
+ }
+ return r;
+}
+
+int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_cs_parser parser;
+ int r;
+
+ down_read(&rdev->exclusive_lock);
+ if (!rdev->accel_working) {
+ up_read(&rdev->exclusive_lock);
+ return -EBUSY;
+ }
+ /* initialize parser */
+ memset(&parser, 0, sizeof(struct radeon_cs_parser));
+ parser.filp = filp;
+ parser.rdev = rdev;
+ parser.dev = rdev->dev;
+ parser.family = rdev->family;
+ r = radeon_cs_parser_init(&parser, data);
+ if (r) {
+ DRM_ERROR("Failed to initialize parser !\n");
+ radeon_cs_parser_fini(&parser, r);
+ up_read(&rdev->exclusive_lock);
+ r = radeon_cs_handle_lockup(rdev, r);
+ return r;
+ }
+ r = radeon_cs_parser_relocs(&parser);
+ if (r) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to parse relocation %d!\n", r);
+ radeon_cs_parser_fini(&parser, r);
+ up_read(&rdev->exclusive_lock);
+ r = radeon_cs_handle_lockup(rdev, r);
+ return r;
+ }
+
+ if (parser.ring == R600_RING_TYPE_UVD_INDEX)
+ radeon_uvd_note_usage(rdev);
+
+ r = radeon_cs_ib_chunk(rdev, &parser);
+ if (r) {
+ goto out;
+ }
+ r = radeon_cs_ib_vm_chunk(rdev, &parser);
+ if (r) {
+ goto out;
+ }
+out:
+ radeon_cs_parser_fini(&parser, r);
+ up_read(&rdev->exclusive_lock);
+ r = radeon_cs_handle_lockup(rdev, r);
+ return r;
+}
+
+int radeon_cs_finish_pages(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+ int i;
+ int size = PAGE_SIZE;
+
+ for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
+ if (i == ibc->last_page_index) {
+ size = (ibc->length_dw * 4) % PAGE_SIZE;
+ if (size == 0)
+ size = PAGE_SIZE;
+ }
+
+ if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
+ ibc->user_ptr + (i * PAGE_SIZE),
+ size))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
+{
+ int new_page;
+ struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+ int i;
+ int size = PAGE_SIZE;
+ bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
+ false : true;
+
+ for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
+ if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
+ ibc->user_ptr + (i * PAGE_SIZE),
+ PAGE_SIZE)) {
+ p->parser_error = -EFAULT;
+ return 0;
+ }
+ }
+
+ if (pg_idx == ibc->last_page_index) {
+ size = (ibc->length_dw * 4) % PAGE_SIZE;
+ if (size == 0)
+ size = PAGE_SIZE;
+ }
+
+ new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
+ if (copy1)
+ ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
+
+ if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
+ ibc->user_ptr + (pg_idx * PAGE_SIZE),
+ size)) {
+ p->parser_error = -EFAULT;
+ return 0;
+ }
+
+ /* copy to IB for non single case */
+ if (!copy1)
+ memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
+
+ ibc->last_copied_page = pg_idx;
+ ibc->kpage_idx[new_page] = pg_idx;
+
+ return new_page;
+}
+
+u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+ struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+ u32 pg_idx, pg_offset;
+ u32 idx_value = 0;
+ int new_page;
+
+ pg_idx = (idx * 4) / PAGE_SIZE;
+ pg_offset = (idx * 4) % PAGE_SIZE;
+
+ if (ibc->kpage_idx[0] == pg_idx)
+ return ibc->kpage[0][pg_offset/4];
+ if (ibc->kpage_idx[1] == pg_idx)
+ return ibc->kpage[1][pg_offset/4];
+
+ new_page = radeon_cs_update_pages(p, pg_idx);
+ if (new_page < 0) {
+ p->parser_error = new_page;
+ return 0;
+ }
+
+ idx_value = ibc->kpage[new_page][pg_offset/4];
+ return idx_value;
+}
+
+/**
+ * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser: parser structure holding parsing context.
+ * @pkt: where to store packet information
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+int radeon_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_device *rdev = p->rdev;
+ uint32_t header;
+
+ if (idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ header = radeon_get_ib_value(p, idx);
+ pkt->idx = idx;
+ pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
+ pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
+ pkt->one_reg_wr = 0;
+ switch (pkt->type) {
+ case RADEON_PACKET_TYPE0:
+ if (rdev->family < CHIP_R600) {
+ pkt->reg = R100_CP_PACKET0_GET_REG(header);
+ pkt->one_reg_wr =
+ RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
+ } else
+ pkt->reg = R600_CP_PACKET0_GET_REG(header);
+ break;
+ case RADEON_PACKET_TYPE3:
+ pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
+ break;
+ case RADEON_PACKET_TYPE2:
+ pkt->count = -1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+ return -EINVAL;
+ }
+ if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+ DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+ pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
+ * @p: structure holding the parser context.
+ *
+ * Check if the next packet is NOP relocation packet3.
+ **/
+bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet p3reloc;
+ int r;
+
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r)
+ return false;
+ if (p3reloc.type != RADEON_PACKET_TYPE3)
+ return false;
+ if (p3reloc.opcode != RADEON_PACKET3_NOP)
+ return false;
+ return true;
+}
+
+/**
+ * radeon_cs_dump_packet() - dump raw packet context
+ * @p: structure holding the parser context.
+ * @pkt: structure holding the packet.
+ *
+ * Used mostly for debugging and error reporting.
+ **/
+void radeon_cs_dump_packet(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ volatile uint32_t *ib;
+ unsigned i;
+ unsigned idx;
+
+ ib = p->ib.ptr;
+ idx = pkt->idx;
+ for (i = 0; i <= (pkt->count + 1); i++, idx++)
+ DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
+}
+
+/**
+ * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
+ * @parser: parser structure holding parsing context.
+ * @data: pointer to relocation data
+ * @offset_start: starting offset
+ * @offset_mask: offset mask (to align start offset on)
+ * @reloc: reloc informations
+ *
+ * Check if next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc,
+ int nomm)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ struct radeon_cs_packet p3reloc;
+ unsigned idx;
+ int r;
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ *cs_reloc = NULL;
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r)
+ return r;
+ p->idx += p3reloc.count + 2;
+ if (p3reloc.type != RADEON_PACKET_TYPE3 ||
+ p3reloc.opcode != RADEON_PACKET3_NOP) {
+ DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+ p3reloc.idx);
+ radeon_cs_dump_packet(p, &p3reloc);
+ return -EINVAL;
+ }
+ idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ radeon_cs_dump_packet(p, &p3reloc);
+ return -EINVAL;
+ }
+ /* FIXME: we assume reloc size is 4 dwords */
+ if (nomm) {
+ *cs_reloc = p->relocs;
+ (*cs_reloc)->lobj.gpu_offset =
+ (u64)relocs_chunk->kdata[idx + 3] << 32;
+ (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
+ } else
+ *cs_reloc = p->relocs_ptr[(idx / 4)];
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
new file mode 100644
index 0000000..b097d5b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
+static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
+{
+ struct radeon_device *rdev = crtc->dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ uint32_t cur_lock;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset);
+ if (lock)
+ cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
+ else
+ cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
+ WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
+ if (lock)
+ cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
+ else
+ cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK;
+ WREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
+ } else {
+ cur_lock = RREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset);
+ if (lock)
+ cur_lock |= RADEON_CUR_LOCK;
+ else
+ cur_lock &= ~RADEON_CUR_LOCK;
+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, cur_lock);
+ }
+}
+
+static void radeon_hide_cursor(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32_IDX(EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset,
+ EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+ EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ WREG32_IDX(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset,
+ (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+ } else {
+ u32 reg;
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ reg = RADEON_CRTC_GEN_CNTL;
+ break;
+ case 1:
+ reg = RADEON_CRTC2_GEN_CNTL;
+ break;
+ default:
+ return;
+ }
+ WREG32_IDX(reg, RREG32_IDX(reg) & ~RADEON_CRTC_CUR_EN);
+ }
+}
+
+static void radeon_show_cursor(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
+ WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
+ EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+ EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
+ WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
+ (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+ } else {
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
+ break;
+ case 1:
+ WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
+ break;
+ default:
+ return;
+ }
+
+ WREG32_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN |
+ (RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)),
+ ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK));
+ }
+}
+
+static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
+ uint64_t gpu_addr)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(gpu_addr));
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ gpu_addr & 0xffffffff);
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ if (rdev->family >= CHIP_RV770) {
+ if (radeon_crtc->crtc_id)
+ WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
+ else
+ WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
+ }
+ WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ gpu_addr & 0xffffffff);
+ } else {
+ radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
+ /* offset is from DISP(2)_BASE_ADDRESS */
+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
+ }
+}
+
+int radeon_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
+ struct radeon_bo *robj;
+ uint64_t gpu_addr;
+ int ret;
+
+ if (!handle) {
+ /* turn off cursor */
+ radeon_hide_cursor(crtc);
+ obj = NULL;
+ goto unpin;
+ }
+
+ if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
+ DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
+ return -ENOENT;
+ }
+
+ robj = gem_to_radeon_bo(obj);
+ ret = radeon_bo_reserve(robj, false);
+ if (unlikely(ret != 0))
+ goto fail;
+ /* Only 27 bit offset for legacy cursor */
+ ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+ ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+ &gpu_addr);
+ radeon_bo_unreserve(robj);
+ if (ret)
+ goto fail;
+
+ radeon_crtc->cursor_width = width;
+ radeon_crtc->cursor_height = height;
+
+ radeon_lock_cursor(crtc, true);
+ radeon_set_cursor(crtc, obj, gpu_addr);
+ radeon_show_cursor(crtc);
+ radeon_lock_cursor(crtc, false);
+
+unpin:
+ if (radeon_crtc->cursor_bo) {
+ robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+ ret = radeon_bo_reserve(robj, false);
+ if (likely(ret == 0)) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
+ drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+ }
+
+ radeon_crtc->cursor_bo = obj;
+ return 0;
+fail:
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+ int x, int y)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
+ int xorigin = 0, yorigin = 0;
+ int w = radeon_crtc->cursor_width;
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ /* avivo cursor are offset into the total surface */
+ x += crtc->x;
+ y += crtc->y;
+ }
+ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+ if (x < 0) {
+ xorigin = min(-x, CURSOR_WIDTH - 1);
+ x = 0;
+ }
+ if (y < 0) {
+ yorigin = min(-y, CURSOR_HEIGHT - 1);
+ y = 0;
+ }
+
+ /* fixed on DCE6 and newer */
+ if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
+ int i = 0;
+ struct drm_crtc *crtc_p;
+
+ /*
+ * avivo cursor image can't end on 128 pixel boundary or
+ * go past the end of the frame if both crtcs are enabled
+ *
+ * NOTE: It is safe to access crtc->enabled of other crtcs
+ * without holding either the mode_config lock or the other
+ * crtc's lock as long as write access to this flag _always_
+ * grabs all locks.
+ */
+ list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
+ if (crtc_p->enabled)
+ i++;
+ }
+ if (i > 1) {
+ int cursor_end, frame_end;
+
+ cursor_end = x - xorigin + w;
+ frame_end = crtc->x + crtc->mode.crtc_hdisplay;
+ if (cursor_end >= frame_end) {
+ w = w - (cursor_end - frame_end);
+ if (!(frame_end & 0x7f))
+ w--;
+ } else {
+ if (!(cursor_end & 0x7f))
+ w--;
+ }
+ if (w <= 0) {
+ w = 1;
+ cursor_end = x - xorigin + w;
+ if (!(cursor_end & 0x7f)) {
+ x--;
+ WARN_ON_ONCE(x < 0);
+ }
+ }
+ }
+ }
+
+ radeon_lock_cursor(crtc, true);
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
+ WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
+ ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
+ WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
+ ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
+ } else {
+ if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
+ y *= 2;
+
+ WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset,
+ (RADEON_CUR_LOCK
+ | (xorigin << 16)
+ | yorigin));
+ WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
+ (RADEON_CUR_LOCK
+ | (x << 16)
+ | y));
+ /* offset is from DISP(2)_BASE_ADDRESS */
+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
+ (yorigin * 256)));
+ }
+ radeon_lock_cursor(crtc, false);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
new file mode 100644
index 0000000..8df1525
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -0,0 +1,1549 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <linux/console.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
+#include <linux/efi.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+static const char radeon_family_name[][16] = {
+ "R100",
+ "RV100",
+ "RS100",
+ "RV200",
+ "RS200",
+ "R200",
+ "RV250",
+ "RS300",
+ "RV280",
+ "R300",
+ "R350",
+ "RV350",
+ "RV380",
+ "R420",
+ "R423",
+ "RV410",
+ "RS400",
+ "RS480",
+ "RS600",
+ "RS690",
+ "RS740",
+ "RV515",
+ "R520",
+ "RV530",
+ "RV560",
+ "RV570",
+ "R580",
+ "R600",
+ "RV610",
+ "RV630",
+ "RV670",
+ "RV620",
+ "RV635",
+ "RS780",
+ "RS880",
+ "RV770",
+ "RV730",
+ "RV710",
+ "RV740",
+ "CEDAR",
+ "REDWOOD",
+ "JUNIPER",
+ "CYPRESS",
+ "HEMLOCK",
+ "PALM",
+ "SUMO",
+ "SUMO2",
+ "BARTS",
+ "TURKS",
+ "CAICOS",
+ "CAYMAN",
+ "ARUBA",
+ "TAHITI",
+ "PITCAIRN",
+ "VERDE",
+ "OLAND",
+ "HAINAN",
+ "LAST",
+};
+
+/**
+ * radeon_program_register_sequence - program an array of registers.
+ *
+ * @rdev: radeon_device pointer
+ * @registers: pointer to the register array
+ * @array_size: size of the register array
+ *
+ * Programs an array or registers with and and or masks.
+ * This is a helper for setting golden registers.
+ */
+void radeon_program_register_sequence(struct radeon_device *rdev,
+ const u32 *registers,
+ const u32 array_size)
+{
+ u32 tmp, reg, and_mask, or_mask;
+ int i;
+
+ if (array_size % 3)
+ return;
+
+ for (i = 0; i < array_size; i +=3) {
+ reg = registers[i + 0];
+ and_mask = registers[i + 1];
+ or_mask = registers[i + 2];
+
+ if (and_mask == 0xffffffff) {
+ tmp = or_mask;
+ } else {
+ tmp = RREG32(reg);
+ tmp &= ~and_mask;
+ tmp |= or_mask;
+ }
+ WREG32(reg, tmp);
+ }
+}
+
+/**
+ * radeon_surface_init - Clear GPU surface registers.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clear GPU surface registers (r1xx-r5xx).
+ */
+void radeon_surface_init(struct radeon_device *rdev)
+{
+ /* FIXME: check this out */
+ if (rdev->family < CHIP_R600) {
+ int i;
+
+ for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
+ if (rdev->surface_regs[i].bo)
+ radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
+ else
+ radeon_clear_surface_reg(rdev, i);
+ }
+ /* enable surfaces */
+ WREG32(RADEON_SURFACE_CNTL, 0);
+ }
+}
+
+/*
+ * GPU scratch registers helpers function.
+ */
+/**
+ * radeon_scratch_init - Init scratch register driver information.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Init CP scratch register driver information (r1xx-r5xx)
+ */
+void radeon_scratch_init(struct radeon_device *rdev)
+{
+ int i;
+
+ /* FIXME: check this out */
+ if (rdev->family < CHIP_R300) {
+ rdev->scratch.num_reg = 5;
+ } else {
+ rdev->scratch.num_reg = 7;
+ }
+ rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
+ for (i = 0; i < rdev->scratch.num_reg; i++) {
+ rdev->scratch.free[i] = true;
+ rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
+ }
+}
+
+/**
+ * radeon_scratch_get - Allocate a scratch register
+ *
+ * @rdev: radeon_device pointer
+ * @reg: scratch register mmio offset
+ *
+ * Allocate a CP scratch register for use by the driver (all asics).
+ * Returns 0 on success or -EINVAL on failure.
+ */
+int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
+{
+ int i;
+
+ for (i = 0; i < rdev->scratch.num_reg; i++) {
+ if (rdev->scratch.free[i]) {
+ rdev->scratch.free[i] = false;
+ *reg = rdev->scratch.reg[i];
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+/**
+ * radeon_scratch_free - Free a scratch register
+ *
+ * @rdev: radeon_device pointer
+ * @reg: scratch register mmio offset
+ *
+ * Free a CP scratch register allocated for use by the driver (all asics)
+ */
+void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
+{
+ int i;
+
+ for (i = 0; i < rdev->scratch.num_reg; i++) {
+ if (rdev->scratch.reg[i] == reg) {
+ rdev->scratch.free[i] = true;
+ return;
+ }
+ }
+}
+
+/*
+ * radeon_wb_*()
+ * Writeback is the the method by which the the GPU updates special pages
+ * in memory with the status of certain GPU events (fences, ring pointers,
+ * etc.).
+ */
+
+/**
+ * radeon_wb_disable - Disable Writeback
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback (all asics). Used for suspend.
+ */
+void radeon_wb_disable(struct radeon_device *rdev)
+{
+ rdev->wb.enabled = false;
+}
+
+/**
+ * radeon_wb_fini - Disable Writeback and free memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback and frees the Writeback memory (all asics).
+ * Used at driver shutdown.
+ */
+void radeon_wb_fini(struct radeon_device *rdev)
+{
+ radeon_wb_disable(rdev);
+ if (rdev->wb.wb_obj) {
+ if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
+ radeon_bo_kunmap(rdev->wb.wb_obj);
+ radeon_bo_unpin(rdev->wb.wb_obj);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ }
+ radeon_bo_unref(&rdev->wb.wb_obj);
+ rdev->wb.wb = NULL;
+ rdev->wb.wb_obj = NULL;
+ }
+}
+
+/**
+ * radeon_wb_init- Init Writeback driver info and allocate memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback and frees the Writeback memory (all asics).
+ * Used at driver startup.
+ * Returns 0 on success or an -error on failure.
+ */
+int radeon_wb_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->wb.wb_obj == NULL) {
+ r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
+ return r;
+ }
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0)) {
+ radeon_wb_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+ &rdev->wb.gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
+ radeon_wb_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
+ radeon_wb_fini(rdev);
+ return r;
+ }
+ }
+
+ /* clear wb memory */
+ memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
+ /* disable event_write fences */
+ rdev->wb.use_event = false;
+ /* disabled via module param */
+ if (radeon_no_wb == 1) {
+ rdev->wb.enabled = false;
+ } else {
+ if (rdev->flags & RADEON_IS_AGP) {
+ /* often unreliable on AGP */
+ rdev->wb.enabled = false;
+ } else if (rdev->family < CHIP_R300) {
+ /* often unreliable on pre-r300 */
+ rdev->wb.enabled = false;
+ } else {
+ rdev->wb.enabled = true;
+ /* event_write fences are only available on r600+ */
+ if (rdev->family >= CHIP_R600) {
+ rdev->wb.use_event = true;
+ }
+ }
+ }
+ /* always use writeback/events on NI, APUs */
+ if (rdev->family >= CHIP_PALM) {
+ rdev->wb.enabled = true;
+ rdev->wb.use_event = true;
+ }
+
+ dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
+
+ return 0;
+}
+
+/**
+ * radeon_vram_location - try to find VRAM location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ * @base: base address at which to put VRAM
+ *
+ * Function will place try to place VRAM at base address provided
+ * as parameter (which is so far either PCI aperture address or
+ * for IGP TOM base address).
+ *
+ * If there is not enough space to fit the unvisible VRAM in the 32bits
+ * address space then we limit the VRAM size to the aperture.
+ *
+ * If we are using AGP and if the AGP aperture doesn't allow us to have
+ * room for all the VRAM than we restrict the VRAM to the PCI aperture
+ * size and print a warning.
+ *
+ * This function will never fails, worst case are limiting VRAM.
+ *
+ * Note: GTT start, end, size should be initialized before calling this
+ * function on AGP platform.
+ *
+ * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
+ * this shouldn't be a problem as we are using the PCI aperture as a reference.
+ * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
+ * not IGP.
+ *
+ * Note: we use mc_vram_size as on some board we need to program the mc to
+ * cover the whole aperture even if VRAM size is inferior to aperture size
+ * Novell bug 204882 + along with lots of ubuntu ones
+ *
+ * Note: when limiting vram it's safe to overwritte real_vram_size because
+ * we are not in case where real_vram_size is inferior to mc_vram_size (ie
+ * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
+ * ones)
+ *
+ * Note: IGP TOM addr should be the same as the aperture addr, we don't
+ * explicitly check for that thought.
+ *
+ * FIXME: when reducing VRAM size align new size on power of 2.
+ */
+void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
+{
+ uint64_t limit = (uint64_t)radeon_vram_limit << 20;
+
+ mc->vram_start = base;
+ if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
+ dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
+ mc->real_vram_size = mc->aper_size;
+ mc->mc_vram_size = mc->aper_size;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
+ dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
+ mc->real_vram_size = mc->aper_size;
+ mc->mc_vram_size = mc->aper_size;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ if (limit && limit < mc->real_vram_size)
+ mc->real_vram_size = limit;
+ dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+}
+
+/**
+ * radeon_gtt_location - try to find GTT location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+ * Function will place try to place GTT before or after VRAM.
+ *
+ * If GTT size is bigger than space left then we ajust GTT size.
+ * Thus function will never fails.
+ *
+ * FIXME: when reducing GTT size align new size on power of 2.
+ */
+void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+ u64 size_af, size_bf;
+
+ size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
+ size_bf = mc->vram_start & ~mc->gtt_base_align;
+ if (size_bf > size_af) {
+ if (mc->gtt_size > size_bf) {
+ dev_warn(rdev->dev, "limiting GTT\n");
+ mc->gtt_size = size_bf;
+ }
+ mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
+ } else {
+ if (mc->gtt_size > size_af) {
+ dev_warn(rdev->dev, "limiting GTT\n");
+ mc->gtt_size = size_af;
+ }
+ mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
+ }
+ mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
+ dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
+ mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
+}
+
+/*
+ * GPU helpers function.
+ */
+/**
+ * radeon_card_posted - check if the hw has already been initialized
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check if the asic has been initialized (all asics).
+ * Used at driver startup.
+ * Returns true if initialized or false if not.
+ */
+bool radeon_card_posted(struct radeon_device *rdev)
+{
+ uint32_t reg;
+
+ /* required for EFI mode on macbook2,1 which uses an r5xx asic */
+ if (efi_enabled(EFI_BOOT) &&
+ (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
+ (rdev->family < CHIP_R600))
+ return false;
+
+ if (ASIC_IS_NODCE(rdev))
+ goto check_memsize;
+
+ /* first check CRTCs */
+ if (ASIC_IS_DCE4(rdev)) {
+ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ if (rdev->num_crtc >= 4) {
+ reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ }
+ if (rdev->num_crtc >= 6) {
+ reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ }
+ if (reg & EVERGREEN_CRTC_MASTER_EN)
+ return true;
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ reg = RREG32(AVIVO_D1CRTC_CONTROL) |
+ RREG32(AVIVO_D2CRTC_CONTROL);
+ if (reg & AVIVO_CRTC_EN) {
+ return true;
+ }
+ } else {
+ reg = RREG32(RADEON_CRTC_GEN_CNTL) |
+ RREG32(RADEON_CRTC2_GEN_CNTL);
+ if (reg & RADEON_CRTC_EN) {
+ return true;
+ }
+ }
+
+check_memsize:
+ /* then check MEM_SIZE, in case the crtcs are off */
+ if (rdev->family >= CHIP_R600)
+ reg = RREG32(R600_CONFIG_MEMSIZE);
+ else
+ reg = RREG32(RADEON_CONFIG_MEMSIZE);
+
+ if (reg)
+ return true;
+
+ return false;
+
+}
+
+/**
+ * radeon_update_bandwidth_info - update display bandwidth params
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Used when sclk/mclk are switched or display modes are set.
+ * params are used to calculate display watermarks (all asics)
+ */
+void radeon_update_bandwidth_info(struct radeon_device *rdev)
+{
+ fixed20_12 a;
+ u32 sclk = rdev->pm.current_sclk;
+ u32 mclk = rdev->pm.current_mclk;
+
+ /* sclk/mclk in Mhz */
+ a.full = dfixed_const(100);
+ rdev->pm.sclk.full = dfixed_const(sclk);
+ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+ rdev->pm.mclk.full = dfixed_const(mclk);
+ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ a.full = dfixed_const(16);
+ /* core_bandwidth = sclk(Mhz) * 16 */
+ rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
+ }
+}
+
+/**
+ * radeon_boot_test_post_card - check and possibly initialize the hw
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check if the asic is initialized and if not, attempt to initialize
+ * it (all asics).
+ * Returns true if initialized or false if not.
+ */
+bool radeon_boot_test_post_card(struct radeon_device *rdev)
+{
+ if (radeon_card_posted(rdev))
+ return true;
+
+ if (rdev->bios) {
+ DRM_INFO("GPU not posted. posting now...\n");
+ if (rdev->is_atom_bios)
+ atom_asic_init(rdev->mode_info.atom_context);
+ else
+ radeon_combios_asic_init(rdev->ddev);
+ return true;
+ } else {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return false;
+ }
+}
+
+/**
+ * radeon_dummy_page_init - init dummy page used by the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate the dummy page used by the driver (all asics).
+ * This dummy page is used by the driver as a filler for gart entries
+ * when pages are taken out of the GART
+ * Returns 0 on sucess, -ENOMEM on failure.
+ */
+int radeon_dummy_page_init(struct radeon_device *rdev)
+{
+ if (rdev->dummy_page.page)
+ return 0;
+ rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
+ if (rdev->dummy_page.page == NULL)
+ return -ENOMEM;
+ rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
+ 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
+ dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
+ __free_page(rdev->dummy_page.page);
+ rdev->dummy_page.page = NULL;
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * radeon_dummy_page_fini - free dummy page used by the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the dummy page used by the driver (all asics).
+ */
+void radeon_dummy_page_fini(struct radeon_device *rdev)
+{
+ if (rdev->dummy_page.page == NULL)
+ return;
+ pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ __free_page(rdev->dummy_page.page);
+ rdev->dummy_page.page = NULL;
+}
+
+
+/* ATOM accessor methods */
+/*
+ * ATOM is an interpreted byte code stored in tables in the vbios. The
+ * driver registers callbacks to access registers and the interpreter
+ * in the driver parses the tables and executes then to program specific
+ * actions (set display modes, asic init, etc.). See radeon_atombios.c,
+ * atombios.h, and atom.c
+ */
+
+/**
+ * cail_pll_read - read PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the PLL register.
+ */
+static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+ uint32_t r;
+
+ r = rdev->pll_rreg(rdev, reg);
+ return r;
+}
+
+/**
+ * cail_pll_write - write PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+
+ rdev->pll_wreg(rdev, reg, val);
+}
+
+/**
+ * cail_mc_read - read MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ *
+ * Provides an MC register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MC register.
+ */
+static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+ uint32_t r;
+
+ r = rdev->mc_rreg(rdev, reg);
+ return r;
+}
+
+/**
+ * cail_mc_write - write MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MC register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+
+ rdev->mc_wreg(rdev, reg, val);
+}
+
+/**
+ * cail_reg_write - write MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MMIO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+
+ WREG32(reg*4, val);
+}
+
+/**
+ * cail_reg_read - read MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ *
+ * Provides an MMIO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MMIO register.
+ */
+static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+ uint32_t r;
+
+ r = RREG32(reg*4);
+ return r;
+}
+
+/**
+ * cail_ioreg_write - write IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a IO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+
+ WREG32_IO(reg*4, val);
+}
+
+/**
+ * cail_ioreg_read - read IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ *
+ * Provides an IO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the IO register.
+ */
+static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+ uint32_t r;
+
+ r = RREG32_IO(reg*4);
+ return r;
+}
+
+/**
+ * radeon_atombios_init - init the driver info and callbacks for atombios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initializes the driver info and register access callbacks for the
+ * ATOM interpreter (r4xx+).
+ * Returns 0 on sucess, -ENOMEM on failure.
+ * Called at driver startup.
+ */
+int radeon_atombios_init(struct radeon_device *rdev)
+{
+ struct card_info *atom_card_info =
+ kzalloc(sizeof(struct card_info), GFP_KERNEL);
+
+ if (!atom_card_info)
+ return -ENOMEM;
+
+ rdev->mode_info.atom_card_info = atom_card_info;
+ atom_card_info->dev = rdev->ddev;
+ atom_card_info->reg_read = cail_reg_read;
+ atom_card_info->reg_write = cail_reg_write;
+ /* needed for iio ops */
+ if (rdev->rio_mem) {
+ atom_card_info->ioreg_read = cail_ioreg_read;
+ atom_card_info->ioreg_write = cail_ioreg_write;
+ } else {
+ DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
+ atom_card_info->ioreg_read = cail_reg_read;
+ atom_card_info->ioreg_write = cail_reg_write;
+ }
+ atom_card_info->mc_read = cail_mc_read;
+ atom_card_info->mc_write = cail_mc_write;
+ atom_card_info->pll_read = cail_pll_read;
+ atom_card_info->pll_write = cail_pll_write;
+
+ rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
+ if (!rdev->mode_info.atom_context) {
+ radeon_atombios_fini(rdev);
+ return -ENOMEM;
+ }
+
+ mutex_init(&rdev->mode_info.atom_context->mutex);
+ radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
+ atom_allocate_fb_scratch(rdev->mode_info.atom_context);
+ return 0;
+}
+
+/**
+ * radeon_atombios_fini - free the driver info and callbacks for atombios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the driver info and register access callbacks for the ATOM
+ * interpreter (r4xx+).
+ * Called at driver shutdown.
+ */
+void radeon_atombios_fini(struct radeon_device *rdev)
+{
+ if (rdev->mode_info.atom_context) {
+ kfree(rdev->mode_info.atom_context->scratch);
+ }
+ kfree(rdev->mode_info.atom_context);
+ rdev->mode_info.atom_context = NULL;
+ kfree(rdev->mode_info.atom_card_info);
+ rdev->mode_info.atom_card_info = NULL;
+}
+
+/* COMBIOS */
+/*
+ * COMBIOS is the bios format prior to ATOM. It provides
+ * command tables similar to ATOM, but doesn't have a unified
+ * parser. See radeon_combios.c
+ */
+
+/**
+ * radeon_combios_init - init the driver info for combios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initializes the driver info for combios (r1xx-r3xx).
+ * Returns 0 on sucess.
+ * Called at driver startup.
+ */
+int radeon_combios_init(struct radeon_device *rdev)
+{
+ radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
+ return 0;
+}
+
+/**
+ * radeon_combios_fini - free the driver info for combios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the driver info for combios (r1xx-r3xx).
+ * Called at driver shutdown.
+ */
+void radeon_combios_fini(struct radeon_device *rdev)
+{
+}
+
+/* if we get transitioned to only one device, take VGA back */
+/**
+ * radeon_vga_set_decode - enable/disable vga decode
+ *
+ * @cookie: radeon_device pointer
+ * @state: enable/disable vga decode
+ *
+ * Enable/disable vga decode (all asics).
+ * Returns VGA resource flags.
+ */
+static unsigned int radeon_vga_set_decode(void *cookie, bool state)
+{
+ struct radeon_device *rdev = cookie;
+ radeon_vga_set_state(rdev, state);
+ if (state)
+ return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+ else
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+/**
+ * radeon_check_pot_argument - check that argument is a power of two
+ *
+ * @arg: value to check
+ *
+ * Validates that a certain argument is a power of two (all asics).
+ * Returns true if argument is valid.
+ */
+static bool radeon_check_pot_argument(int arg)
+{
+ return (arg & (arg - 1)) == 0;
+}
+
+/**
+ * radeon_check_arguments - validate module params
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Validates certain module parameters and updates
+ * the associated values used by the driver (all asics).
+ */
+static void radeon_check_arguments(struct radeon_device *rdev)
+{
+ /* vramlimit must be a power of two */
+ if (!radeon_check_pot_argument(radeon_vram_limit)) {
+ dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
+ radeon_vram_limit);
+ radeon_vram_limit = 0;
+ }
+
+ /* gtt size must be power of two and greater or equal to 32M */
+ if (radeon_gart_size < 32) {
+ dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
+ radeon_gart_size);
+ radeon_gart_size = 512;
+
+ } else if (!radeon_check_pot_argument(radeon_gart_size)) {
+ dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
+ radeon_gart_size);
+ radeon_gart_size = 512;
+ }
+ rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
+
+ /* AGP mode can only be -1, 1, 2, 4, 8 */
+ switch (radeon_agpmode) {
+ case -1:
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
+ "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
+ radeon_agpmode = 0;
+ break;
+ }
+}
+
+/**
+ * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
+ * needed for waking up.
+ *
+ * @pdev: pci dev pointer
+ */
+static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
+{
+
+ /* 6600m in a macbook pro */
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
+ pdev->subsystem_device == 0x00e2) {
+ printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * radeon_switcheroo_set_state - set switcheroo state
+ *
+ * @pdev: pci dev pointer
+ * @state: vga switcheroo state
+ *
+ * Callback for the switcheroo driver. Suspends or resumes the
+ * the asics before or after it is powered up using ACPI methods.
+ */
+static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+ if (state == VGA_SWITCHEROO_ON) {
+ unsigned d3_delay = dev->pdev->d3_delay;
+
+ printk(KERN_INFO "radeon: switched on\n");
+ /* don't suspend or resume card normally */
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+
+ if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
+ dev->pdev->d3_delay = 20;
+
+ radeon_resume_kms(dev);
+
+ dev->pdev->d3_delay = d3_delay;
+
+ dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ drm_kms_helper_poll_enable(dev);
+ } else {
+ printk(KERN_INFO "radeon: switched off\n");
+ drm_kms_helper_poll_disable(dev);
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ radeon_suspend_kms(dev, pmm);
+ dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+ }
+}
+
+/**
+ * radeon_switcheroo_can_switch - see if switcheroo state can change
+ *
+ * @pdev: pci dev pointer
+ *
+ * Callback for the switcheroo driver. Check of the switcheroo
+ * state can be changed.
+ * Returns true if the state can be changed, false if not.
+ */
+static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+ can_switch = (dev->open_count == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+}
+
+static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
+ .set_gpu_state = radeon_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = radeon_switcheroo_can_switch,
+};
+
+/**
+ * radeon_device_init - initialize the driver
+ *
+ * @rdev: radeon_device pointer
+ * @pdev: drm dev pointer
+ * @pdev: pci dev pointer
+ * @flags: driver flags
+ *
+ * Initializes the driver info and hw (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver startup.
+ */
+int radeon_device_init(struct radeon_device *rdev,
+ struct drm_device *ddev,
+ struct pci_dev *pdev,
+ uint32_t flags)
+{
+ int r, i;
+ int dma_bits;
+
+ rdev->shutdown = false;
+ rdev->dev = &pdev->dev;
+ rdev->ddev = ddev;
+ rdev->pdev = pdev;
+ rdev->flags = flags;
+ rdev->family = flags & RADEON_FAMILY_MASK;
+ rdev->is_atom_bios = false;
+ rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ rdev->accel_working = false;
+ /* set up ring ids */
+ for (i = 0; i < RADEON_NUM_RINGS; i++) {
+ rdev->ring[i].idx = i;
+ }
+
+ DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
+ radeon_family_name[rdev->family], pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
+
+ /* mutex initialization are all done here so we
+ * can recall function without having locking issues */
+ mutex_init(&rdev->ring_lock);
+ mutex_init(&rdev->dc_hw_i2c_mutex);
+ atomic_set(&rdev->ih.lock, 0);
+ mutex_init(&rdev->gem.mutex);
+ mutex_init(&rdev->pm.mutex);
+ mutex_init(&rdev->gpu_clock_mutex);
+ init_rwsem(&rdev->pm.mclk_lock);
+ init_rwsem(&rdev->exclusive_lock);
+ init_waitqueue_head(&rdev->irq.vblank_queue);
+ r = radeon_gem_init(rdev);
+ if (r)
+ return r;
+ /* initialize vm here */
+ mutex_init(&rdev->vm_manager.lock);
+ /* Adjust VM size here.
+ * Currently set to 4GB ((1 << 20) 4k pages).
+ * Max GPUVM size for cayman and SI is 40 bits.
+ */
+ rdev->vm_manager.max_pfn = 1 << 20;
+ INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
+
+ /* Set asic functions */
+ r = radeon_asic_init(rdev);
+ if (r)
+ return r;
+ radeon_check_arguments(rdev);
+
+ /* all of the newer IGP chips have an internal gart
+ * However some rs4xx report as AGP, so remove that here.
+ */
+ if ((rdev->family >= CHIP_RS400) &&
+ (rdev->flags & RADEON_IS_IGP)) {
+ rdev->flags &= ~RADEON_IS_AGP;
+ }
+
+ if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
+ radeon_agp_disable(rdev);
+ }
+
+ /* Set the internal MC address mask
+ * This is the max address of the GPU's
+ * internal address space.
+ */
+ if (rdev->family >= CHIP_CAYMAN)
+ rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
+ else if (rdev->family >= CHIP_CEDAR)
+ rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
+ else
+ rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
+
+ /* set DMA mask + need_dma32 flags.
+ * PCIE - can handle 40-bits.
+ * IGP - can handle 40-bits
+ * AGP - generally dma32 is safest
+ * PCI - dma32 for legacy pci gart, 40 bits on newer asics
+ */
+ rdev->need_dma32 = false;
+ if (rdev->flags & RADEON_IS_AGP)
+ rdev->need_dma32 = true;
+ if ((rdev->flags & RADEON_IS_PCI) &&
+ (rdev->family <= CHIP_RS740))
+ rdev->need_dma32 = true;
+
+ dma_bits = rdev->need_dma32 ? 32 : 40;
+ r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
+ if (r) {
+ rdev->need_dma32 = true;
+ dma_bits = 32;
+ printk(KERN_WARNING "radeon: No suitable DMA available.\n");
+ }
+ r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
+ if (r) {
+ pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
+ printk(KERN_WARNING "radeon: No coherent DMA available.\n");
+ }
+
+ /* Registers mapping */
+ /* TODO: block userspace mapping of io register */
+ spin_lock_init(&rdev->mmio_idx_lock);
+ rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
+ rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
+ rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
+ if (rdev->rmmio == NULL) {
+ return -ENOMEM;
+ }
+ DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
+ DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
+
+ /* io port mapping */
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
+ rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
+ rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
+ break;
+ }
+ }
+ if (rdev->rio_mem == NULL)
+ DRM_ERROR("Unable to find PCI I/O BAR\n");
+
+ /* if we have > 1 VGA cards, then disable the radeon VGA resources */
+ /* this will fail for cards that aren't VGA class devices, just
+ * ignore it */
+ vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
+ vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
+
+ r = radeon_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
+
+ r = radeon_gem_debugfs_init(rdev);
+ if (r) {
+ DRM_ERROR("registering gem debugfs failed (%d).\n", r);
+ }
+
+ if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
+ /* Acceleration not working on AGP card try again
+ * with fallback to PCI or PCIE GART
+ */
+ radeon_asic_reset(rdev);
+ radeon_fini(rdev);
+ radeon_agp_disable(rdev);
+ r = radeon_init(rdev);
+ if (r)
+ return r;
+ }
+ if ((radeon_testing & 1)) {
+ if (rdev->accel_working)
+ radeon_test_moves(rdev);
+ else
+ DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
+ }
+ if ((radeon_testing & 2)) {
+ if (rdev->accel_working)
+ radeon_test_syncing(rdev);
+ else
+ DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
+ }
+ if (radeon_benchmarking) {
+ if (rdev->accel_working)
+ radeon_benchmark(rdev, radeon_benchmarking);
+ else
+ DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
+ }
+ return 0;
+}
+
+static void radeon_debugfs_remove_files(struct radeon_device *rdev);
+
+/**
+ * radeon_device_fini - tear down the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the driver info (all asics).
+ * Called at driver shutdown.
+ */
+void radeon_device_fini(struct radeon_device *rdev)
+{
+ DRM_INFO("radeon: finishing device.\n");
+ rdev->shutdown = true;
+ /* evict vram memory */
+ radeon_bo_evict_vram(rdev);
+ radeon_fini(rdev);
+ vga_switcheroo_unregister_client(rdev->pdev);
+ vga_client_register(rdev->pdev, NULL, NULL, NULL);
+ if (rdev->rio_mem)
+ pci_iounmap(rdev->pdev, rdev->rio_mem);
+ rdev->rio_mem = NULL;
+ iounmap(rdev->rmmio);
+ rdev->rmmio = NULL;
+ radeon_debugfs_remove_files(rdev);
+}
+
+
+/*
+ * Suspend & resume.
+ */
+/**
+ * radeon_suspend_kms - initiate device suspend
+ *
+ * @pdev: drm dev pointer
+ * @state: suspend state
+ *
+ * Puts the hw in the suspend state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver suspend.
+ */
+int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
+{
+ struct radeon_device *rdev;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+ int i, r;
+ bool force_completion = false;
+
+ if (dev == NULL || dev->dev_private == NULL) {
+ return -ENODEV;
+ }
+ if (state.event == PM_EVENT_PRETHAW) {
+ return 0;
+ }
+ rdev = dev->dev_private;
+
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ drm_kms_helper_poll_disable(dev);
+
+ /* turn off display hw */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ }
+
+ /* unpin the front buffers */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
+ struct radeon_bo *robj;
+
+ if (rfb == NULL || rfb->obj == NULL) {
+ continue;
+ }
+ robj = gem_to_radeon_bo(rfb->obj);
+ /* don't unpin kernel fb objects */
+ if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
+ r = radeon_bo_reserve(robj, false);
+ if (r == 0) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
+ }
+ }
+ /* evict vram memory */
+ radeon_bo_evict_vram(rdev);
+
+ mutex_lock(&rdev->ring_lock);
+ /* wait for gpu to finish processing current batch */
+ for (i = 0; i < RADEON_NUM_RINGS; i++) {
+ r = radeon_fence_wait_empty_locked(rdev, i);
+ if (r) {
+ /* delay GPU reset to resume */
+ force_completion = true;
+ }
+ }
+ if (force_completion) {
+ radeon_fence_driver_force_completion(rdev);
+ }
+ mutex_unlock(&rdev->ring_lock);
+
+ radeon_save_bios_scratch_regs(rdev);
+
+ radeon_pm_suspend(rdev);
+ radeon_suspend(rdev);
+ radeon_hpd_fini(rdev);
+ /* evict remaining vram memory */
+ radeon_bo_evict_vram(rdev);
+
+ radeon_agp_suspend(rdev);
+
+ pci_save_state(dev->pdev);
+ if (state.event == PM_EVENT_SUSPEND) {
+ /* Shut down the device */
+ pci_disable_device(dev->pdev);
+ pci_set_power_state(dev->pdev, PCI_D3hot);
+ }
+ console_lock();
+ radeon_fbdev_set_suspend(rdev, 1);
+ console_unlock();
+ return 0;
+}
+
+/**
+ * radeon_resume_kms - initiate device resume
+ *
+ * @pdev: drm dev pointer
+ *
+ * Bring the hw back to operating state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver resume.
+ */
+int radeon_resume_kms(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct radeon_device *rdev = dev->dev_private;
+ int r;
+
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ console_lock();
+ pci_set_power_state(dev->pdev, PCI_D0);
+ pci_restore_state(dev->pdev);
+ if (pci_enable_device(dev->pdev)) {
+ console_unlock();
+ return -1;
+ }
+ /* resume AGP if in use */
+ radeon_agp_resume(rdev);
+ radeon_resume(rdev);
+
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
+
+ radeon_pm_resume(rdev);
+ radeon_restore_bios_scratch_regs(rdev);
+
+ radeon_fbdev_set_suspend(rdev, 0);
+ console_unlock();
+
+ /* init dig PHYs, disp eng pll */
+ if (rdev->is_atom_bios) {
+ radeon_atom_encoder_init(rdev);
+ radeon_atom_disp_eng_pll_init(rdev);
+ /* turn on the BL */
+ if (rdev->mode_info.bl_encoder) {
+ u8 bl_level = radeon_get_backlight_level(rdev,
+ rdev->mode_info.bl_encoder);
+ radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
+ bl_level);
+ }
+ }
+ /* reset hpd state */
+ radeon_hpd_init(rdev);
+ /* blat the mode back in */
+ drm_helper_resume_force_mode(dev);
+ /* turn on display hw */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
+
+ drm_kms_helper_poll_enable(dev);
+ return 0;
+}
+
+/**
+ * radeon_gpu_reset - reset the asic
+ *
+ * @rdev: radeon device pointer
+ *
+ * Attempt the reset the GPU if it has hung (all asics).
+ * Returns 0 for success or an error on failure.
+ */
+int radeon_gpu_reset(struct radeon_device *rdev)
+{
+ unsigned ring_sizes[RADEON_NUM_RINGS];
+ uint32_t *ring_data[RADEON_NUM_RINGS];
+
+ bool saved = false;
+
+ int i, r;
+ int resched;
+
+ down_write(&rdev->exclusive_lock);
+ radeon_save_bios_scratch_regs(rdev);
+ /* block TTM */
+ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
+ radeon_suspend(rdev);
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
+ &ring_data[i]);
+ if (ring_sizes[i]) {
+ saved = true;
+ dev_info(rdev->dev, "Saved %d dwords of commands "
+ "on ring %d.\n", ring_sizes[i], i);
+ }
+ }
+
+retry:
+ r = radeon_asic_reset(rdev);
+ if (!r) {
+ dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
+ radeon_resume(rdev);
+ }
+
+ radeon_restore_bios_scratch_regs(rdev);
+
+ if (!r) {
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ radeon_ring_restore(rdev, &rdev->ring[i],
+ ring_sizes[i], ring_data[i]);
+ ring_sizes[i] = 0;
+ ring_data[i] = NULL;
+ }
+
+ r = radeon_ib_ring_tests(rdev);
+ if (r) {
+ dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
+ if (saved) {
+ saved = false;
+ radeon_suspend(rdev);
+ goto retry;
+ }
+ }
+ } else {
+ radeon_fence_driver_force_completion(rdev);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ kfree(ring_data[i]);
+ }
+ }
+
+ drm_helper_resume_force_mode(rdev->ddev);
+
+ ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
+ if (r) {
+ /* bad news, how to tell it to userspace ? */
+ dev_info(rdev->dev, "GPU reset failed\n");
+ }
+
+ up_write(&rdev->exclusive_lock);
+ return r;
+}
+
+
+/*
+ * Debugfs
+ */
+int radeon_debugfs_add_files(struct radeon_device *rdev,
+ struct drm_info_list *files,
+ unsigned nfiles)
+{
+ unsigned i;
+
+ for (i = 0; i < rdev->debugfs_count; i++) {
+ if (rdev->debugfs[i].files == files) {
+ /* Already registered */
+ return 0;
+ }
+ }
+
+ i = rdev->debugfs_count + 1;
+ if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
+ DRM_ERROR("Reached maximum number of debugfs components.\n");
+ DRM_ERROR("Report so we increase "
+ "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
+ return -EINVAL;
+ }
+ rdev->debugfs[rdev->debugfs_count].files = files;
+ rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
+ rdev->debugfs_count = i;
+#if defined(CONFIG_DEBUG_FS)
+ drm_debugfs_create_files(files, nfiles,
+ rdev->ddev->control->debugfs_root,
+ rdev->ddev->control);
+ drm_debugfs_create_files(files, nfiles,
+ rdev->ddev->primary->debugfs_root,
+ rdev->ddev->primary);
+#endif
+ return 0;
+}
+
+static void radeon_debugfs_remove_files(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ unsigned i;
+
+ for (i = 0; i < rdev->debugfs_count; i++) {
+ drm_debugfs_remove_files(rdev->debugfs[i].files,
+ rdev->debugfs[i].num_files,
+ rdev->ddev->control);
+ drm_debugfs_remove_files(rdev->debugfs[i].files,
+ rdev->debugfs[i].num_files,
+ rdev->ddev->primary);
+ }
+#endif
+}
+
+#if defined(CONFIG_DEBUG_FS)
+int radeon_debugfs_init(struct drm_minor *minor)
+{
+ return 0;
+}
+
+void radeon_debugfs_cleanup(struct drm_minor *minor)
+{
+}
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
new file mode 100644
index 0000000..eb18bb7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -0,0 +1,1666 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#include "atom.h"
+#include <asm/div64.h>
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+static void avivo_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int i;
+
+ DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+ WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0);
+
+ WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+ WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+ WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+ WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+ WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id);
+ WREG32(AVIVO_DC_LUT_RW_MODE, 0);
+ WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f);
+
+ WREG8(AVIVO_DC_LUT_RW_INDEX, 0);
+ for (i = 0; i < 256; i++) {
+ WREG32(AVIVO_DC_LUT_30_COLOR,
+ (radeon_crtc->lut_r[i] << 20) |
+ (radeon_crtc->lut_g[i] << 10) |
+ (radeon_crtc->lut_b[i] << 0));
+ }
+
+ WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
+}
+
+static void dce4_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int i;
+
+ DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+ WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+ WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
+
+ WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
+ for (i = 0; i < 256; i++) {
+ WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
+ (radeon_crtc->lut_r[i] << 20) |
+ (radeon_crtc->lut_g[i] << 10) |
+ (radeon_crtc->lut_b[i] << 0));
+ }
+}
+
+static void dce5_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int i;
+
+ DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+
+ WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+ (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
+ NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
+ WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset,
+ NI_GRPH_PRESCALE_BYPASS);
+ WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset,
+ NI_OVL_PRESCALE_BYPASS);
+ WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset,
+ (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
+ NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
+
+ WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+ WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
+
+ WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
+ for (i = 0; i < 256; i++) {
+ WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
+ (radeon_crtc->lut_r[i] << 20) |
+ (radeon_crtc->lut_g[i] << 10) |
+ (radeon_crtc->lut_b[i] << 0));
+ }
+
+ WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset,
+ (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
+ WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset,
+ (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
+ NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
+ WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset,
+ (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
+ NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
+ WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+ (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) |
+ NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+ /* XXX match this to the depth of the crtc fmt block, move to modeset? */
+ WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
+
+}
+
+static void legacy_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int i;
+ uint32_t dac2_cntl;
+
+ dac2_cntl = RREG32(RADEON_DAC_CNTL2);
+ if (radeon_crtc->crtc_id == 0)
+ dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL;
+ else
+ dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL;
+ WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+
+ WREG8(RADEON_PALETTE_INDEX, 0);
+ for (i = 0; i < 256; i++) {
+ WREG32(RADEON_PALETTE_30_DATA,
+ (radeon_crtc->lut_r[i] << 20) |
+ (radeon_crtc->lut_g[i] << 10) |
+ (radeon_crtc->lut_b[i] << 0));
+ }
+}
+
+void radeon_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (!crtc->enabled)
+ return;
+
+ if (ASIC_IS_DCE5(rdev))
+ dce5_crtc_load_lut(crtc);
+ else if (ASIC_IS_DCE4(rdev))
+ dce4_crtc_load_lut(crtc);
+ else if (ASIC_IS_AVIVO(rdev))
+ avivo_crtc_load_lut(crtc);
+ else
+ legacy_crtc_load_lut(crtc);
+}
+
+/** Sets the color ramps on behalf of fbcon */
+void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ radeon_crtc->lut_r[regno] = red >> 6;
+ radeon_crtc->lut_g[regno] = green >> 6;
+ radeon_crtc->lut_b[regno] = blue >> 6;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ *red = radeon_crtc->lut_r[regno] << 6;
+ *green = radeon_crtc->lut_g[regno] << 6;
+ *blue = radeon_crtc->lut_b[regno] << 6;
+}
+
+static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ int end = (start + size > 256) ? 256 : start + size, i;
+
+ /* userspace palettes are always correct as is */
+ for (i = start; i < end; i++) {
+ radeon_crtc->lut_r[i] = red[i] >> 6;
+ radeon_crtc->lut_g[i] = green[i] >> 6;
+ radeon_crtc->lut_b[i] = blue[i] >> 6;
+ }
+ radeon_crtc_load_lut(crtc);
+}
+
+static void radeon_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(radeon_crtc);
+}
+
+/*
+ * Handle unpin events outside the interrupt handler proper.
+ */
+static void radeon_unpin_work_func(struct work_struct *__work)
+{
+ struct radeon_unpin_work *work =
+ container_of(__work, struct radeon_unpin_work, work);
+ int r;
+
+ /* unpin of the old buffer */
+ r = radeon_bo_reserve(work->old_rbo, false);
+ if (likely(r == 0)) {
+ r = radeon_bo_unpin(work->old_rbo);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to unpin buffer after flip\n");
+ }
+ radeon_bo_unreserve(work->old_rbo);
+ } else
+ DRM_ERROR("failed to reserve buffer after flip\n");
+
+ drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+ kfree(work);
+}
+
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ struct radeon_unpin_work *work;
+ unsigned long flags;
+ u32 update_pending;
+ int vpos, hpos;
+
+ spin_lock_irqsave(&rdev->ddev->event_lock, flags);
+ work = radeon_crtc->unpin_work;
+ if (work == NULL ||
+ (work->fence && !radeon_fence_signaled(work->fence))) {
+ spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ return;
+ }
+ /* New pageflip, or just completion of a previous one? */
+ if (!radeon_crtc->deferred_flip_completion) {
+ /* do the flip (mmio) */
+ update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base);
+ } else {
+ /* This is just a completion of a flip queued in crtc
+ * at last invocation. Make sure we go directly to
+ * completion routine.
+ */
+ update_pending = 0;
+ radeon_crtc->deferred_flip_completion = 0;
+ }
+
+ /* Has the pageflip already completed in crtc, or is it certain
+ * to complete in this vblank?
+ */
+ if (update_pending &&
+ (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+ &vpos, &hpos)) &&
+ ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
+ (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
+ /* crtc didn't flip in this target vblank interval,
+ * but flip is pending in crtc. Based on the current
+ * scanout position we know that the current frame is
+ * (nearly) complete and the flip will (likely)
+ * complete before the start of the next frame.
+ */
+ update_pending = 0;
+ }
+ if (update_pending) {
+ /* crtc didn't flip in this target vblank interval,
+ * but flip is pending in crtc. It will complete it
+ * in next vblank interval, so complete the flip at
+ * next vblank irq.
+ */
+ radeon_crtc->deferred_flip_completion = 1;
+ spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ return;
+ }
+
+ /* Pageflip (will be) certainly completed in this vblank. Clean up. */
+ radeon_crtc->unpin_work = NULL;
+
+ /* wakeup userspace */
+ if (work->event)
+ drm_send_vblank_event(rdev->ddev, crtc_id, work->event);
+
+ spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+
+ drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
+ radeon_fence_unref(&work->fence);
+ radeon_post_page_flip(work->rdev, work->crtc_id);
+ schedule_work(&work->work);
+}
+
+static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_framebuffer *old_radeon_fb;
+ struct radeon_framebuffer *new_radeon_fb;
+ struct drm_gem_object *obj;
+ struct radeon_bo *rbo;
+ struct radeon_unpin_work *work;
+ unsigned long flags;
+ u32 tiling_flags, pitch_pixels;
+ u64 base;
+ int r;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL)
+ return -ENOMEM;
+
+ work->event = event;
+ work->rdev = rdev;
+ work->crtc_id = radeon_crtc->crtc_id;
+ old_radeon_fb = to_radeon_framebuffer(crtc->fb);
+ new_radeon_fb = to_radeon_framebuffer(fb);
+ /* schedule unpin of the old buffer */
+ obj = old_radeon_fb->obj;
+ /* take a reference to the old object */
+ drm_gem_object_reference(obj);
+ rbo = gem_to_radeon_bo(obj);
+ work->old_rbo = rbo;
+ obj = new_radeon_fb->obj;
+ rbo = gem_to_radeon_bo(obj);
+
+ spin_lock(&rbo->tbo.bdev->fence_lock);
+ if (rbo->tbo.sync_obj)
+ work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
+ spin_unlock(&rbo->tbo.bdev->fence_lock);
+
+ INIT_WORK(&work->work, radeon_unpin_work_func);
+
+ /* We borrow the event spin lock for protecting unpin_work */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (radeon_crtc->unpin_work) {
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+ r = -EBUSY;
+ goto unlock_free;
+ }
+ radeon_crtc->unpin_work = work;
+ radeon_crtc->deferred_flip_completion = 0;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /* pin the new buffer */
+ DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
+ work->old_rbo, rbo);
+
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to reserve new rbo buffer before flip\n");
+ goto pflip_cleanup;
+ }
+ /* Only 27 bit offset for legacy CRTC */
+ r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
+ ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ r = -EINVAL;
+ DRM_ERROR("failed to pin new rbo buffer before flip\n");
+ goto pflip_cleanup;
+ }
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
+
+ if (!ASIC_IS_AVIVO(rdev)) {
+ /* crtc offset is from display base addr not FB location */
+ base -= radeon_crtc->legacy_display_base_addr;
+ pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8);
+
+ if (tiling_flags & RADEON_TILING_MACRO) {
+ if (ASIC_IS_R300(rdev)) {
+ base &= ~0x7ff;
+ } else {
+ int byteshift = fb->bits_per_pixel >> 4;
+ int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11;
+ base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8);
+ }
+ } else {
+ int offset = crtc->y * pitch_pixels + crtc->x;
+ switch (fb->bits_per_pixel) {
+ case 8:
+ default:
+ offset *= 1;
+ break;
+ case 15:
+ case 16:
+ offset *= 2;
+ break;
+ case 24:
+ offset *= 3;
+ break;
+ case 32:
+ offset *= 4;
+ break;
+ }
+ base += offset;
+ }
+ base &= ~7;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work->new_crtc_base = base;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /* update crtc fb */
+ crtc->fb = fb;
+
+ r = drm_vblank_get(dev, radeon_crtc->crtc_id);
+ if (r) {
+ DRM_ERROR("failed to get vblank before flip\n");
+ goto pflip_cleanup1;
+ }
+
+ /* set the proper interrupt */
+ radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
+
+ return 0;
+
+pflip_cleanup1:
+ if (unlikely(radeon_bo_reserve(rbo, false) != 0)) {
+ DRM_ERROR("failed to reserve new rbo in error path\n");
+ goto pflip_cleanup;
+ }
+ if (unlikely(radeon_bo_unpin(rbo) != 0)) {
+ DRM_ERROR("failed to unpin new rbo in error path\n");
+ }
+ radeon_bo_unreserve(rbo);
+
+pflip_cleanup:
+ spin_lock_irqsave(&dev->event_lock, flags);
+ radeon_crtc->unpin_work = NULL;
+unlock_free:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
+ radeon_fence_unref(&work->fence);
+ kfree(work);
+
+ return r;
+}
+
+static const struct drm_crtc_funcs radeon_crtc_funcs = {
+ .cursor_set = radeon_crtc_cursor_set,
+ .cursor_move = radeon_crtc_cursor_move,
+ .gamma_set = radeon_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = radeon_crtc_destroy,
+ .page_flip = radeon_crtc_page_flip,
+};
+
+static void radeon_crtc_init(struct drm_device *dev, int index)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc;
+ int i;
+
+ radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+ if (radeon_crtc == NULL)
+ return;
+
+ drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
+ radeon_crtc->crtc_id = index;
+ rdev->mode_info.crtcs[index] = radeon_crtc;
+
+#if 0
+ radeon_crtc->mode_set.crtc = &radeon_crtc->base;
+ radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
+ radeon_crtc->mode_set.num_connectors = 0;
+#endif
+
+ for (i = 0; i < 256; i++) {
+ radeon_crtc->lut_r[i] = i << 2;
+ radeon_crtc->lut_g[i] = i << 2;
+ radeon_crtc->lut_b[i] = i << 2;
+ }
+
+ if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
+ radeon_atombios_init_crtc(dev, radeon_crtc);
+ else
+ radeon_legacy_init_crtc(dev, radeon_crtc);
+}
+
+static const char *encoder_names[37] = {
+ "NONE",
+ "INTERNAL_LVDS",
+ "INTERNAL_TMDS1",
+ "INTERNAL_TMDS2",
+ "INTERNAL_DAC1",
+ "INTERNAL_DAC2",
+ "INTERNAL_SDVOA",
+ "INTERNAL_SDVOB",
+ "SI170B",
+ "CH7303",
+ "CH7301",
+ "INTERNAL_DVO1",
+ "EXTERNAL_SDVOA",
+ "EXTERNAL_SDVOB",
+ "TITFP513",
+ "INTERNAL_LVTM1",
+ "VT1623",
+ "HDMI_SI1930",
+ "HDMI_INTERNAL",
+ "INTERNAL_KLDSCP_TMDS1",
+ "INTERNAL_KLDSCP_DVO1",
+ "INTERNAL_KLDSCP_DAC1",
+ "INTERNAL_KLDSCP_DAC2",
+ "SI178",
+ "MVPU_FPGA",
+ "INTERNAL_DDI",
+ "VT1625",
+ "HDMI_SI1932",
+ "DP_AN9801",
+ "DP_DP501",
+ "INTERNAL_UNIPHY",
+ "INTERNAL_KLDSCP_LVTMA",
+ "INTERNAL_UNIPHY1",
+ "INTERNAL_UNIPHY2",
+ "NUTMEG",
+ "TRAVIS",
+ "INTERNAL_VCE"
+};
+
+static const char *hpd_names[6] = {
+ "HPD1",
+ "HPD2",
+ "HPD3",
+ "HPD4",
+ "HPD5",
+ "HPD6",
+};
+
+static void radeon_print_display_setup(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+ uint32_t devices;
+ int i = 0;
+
+ DRM_INFO("Radeon Display Connectors\n");
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ radeon_connector = to_radeon_connector(connector);
+ DRM_INFO("Connector %d:\n", i);
+ DRM_INFO(" %s\n", drm_get_connector_name(connector));
+ if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+ DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
+ if (radeon_connector->ddc_bus) {
+ DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ radeon_connector->ddc_bus->rec.mask_clk_reg,
+ radeon_connector->ddc_bus->rec.mask_data_reg,
+ radeon_connector->ddc_bus->rec.a_clk_reg,
+ radeon_connector->ddc_bus->rec.a_data_reg,
+ radeon_connector->ddc_bus->rec.en_clk_reg,
+ radeon_connector->ddc_bus->rec.en_data_reg,
+ radeon_connector->ddc_bus->rec.y_clk_reg,
+ radeon_connector->ddc_bus->rec.y_data_reg);
+ if (radeon_connector->router.ddc_valid)
+ DRM_INFO(" DDC Router 0x%x/0x%x\n",
+ radeon_connector->router.ddc_mux_control_pin,
+ radeon_connector->router.ddc_mux_state);
+ if (radeon_connector->router.cd_valid)
+ DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
+ radeon_connector->router.cd_mux_control_pin,
+ radeon_connector->router.cd_mux_state);
+ } else {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
+ }
+ DRM_INFO(" Encoders:\n");
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ devices = radeon_encoder->devices & radeon_connector->devices;
+ if (devices) {
+ if (devices & ATOM_DEVICE_CRT1_SUPPORT)
+ DRM_INFO(" CRT1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_CRT2_SUPPORT)
+ DRM_INFO(" CRT2: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_LCD1_SUPPORT)
+ DRM_INFO(" LCD1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_DFP1_SUPPORT)
+ DRM_INFO(" DFP1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_DFP2_SUPPORT)
+ DRM_INFO(" DFP2: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_DFP3_SUPPORT)
+ DRM_INFO(" DFP3: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_DFP4_SUPPORT)
+ DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_DFP5_SUPPORT)
+ DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_DFP6_SUPPORT)
+ DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_TV1_SUPPORT)
+ DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_CV_SUPPORT)
+ DRM_INFO(" CV: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ }
+ }
+ i++;
+ }
+}
+
+static bool radeon_setup_enc_conn(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ bool ret = false;
+
+ if (rdev->bios) {
+ if (rdev->is_atom_bios) {
+ ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
+ if (ret == false)
+ ret = radeon_get_atom_connector_info_from_object_table(dev);
+ } else {
+ ret = radeon_get_legacy_connector_info_from_bios(dev);
+ if (ret == false)
+ ret = radeon_get_legacy_connector_info_from_table(dev);
+ }
+ } else {
+ if (!ASIC_IS_AVIVO(rdev))
+ ret = radeon_get_legacy_connector_info_from_table(dev);
+ }
+ if (ret) {
+ radeon_setup_encoder_clones(dev);
+ radeon_print_display_setup(dev);
+ }
+
+ return ret;
+}
+
+int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
+{
+ struct drm_device *dev = radeon_connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int ret = 0;
+
+ /* on hw with routers, select right port */
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
+
+ if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
+ ENCODER_OBJECT_ID_NONE) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if (dig->dp_i2c_bus)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &dig->dp_i2c_bus->adapter);
+ } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
+ dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &dig->dp_i2c_bus->adapter);
+ else if (radeon_connector->ddc_bus && !radeon_connector->edid)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
+ } else {
+ if (radeon_connector->ddc_bus && !radeon_connector->edid)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
+ }
+
+ if (!radeon_connector->edid) {
+ if (rdev->is_atom_bios) {
+ /* some laptops provide a hardcoded edid in rom for LCDs */
+ if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
+ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ } else
+ /* some servers provide a hardcoded edid in rom for KVMs */
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ }
+ if (radeon_connector->edid) {
+ drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+ ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
+ return ret;
+ }
+ drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
+ return 0;
+}
+
+/* avivo */
+static void avivo_get_fb_div(struct radeon_pll *pll,
+ u32 target_clock,
+ u32 post_div,
+ u32 ref_div,
+ u32 *fb_div,
+ u32 *frac_fb_div)
+{
+ u32 tmp = post_div * ref_div;
+
+ tmp *= target_clock;
+ *fb_div = tmp / pll->reference_freq;
+ *frac_fb_div = tmp % pll->reference_freq;
+
+ if (*fb_div > pll->max_feedback_div)
+ *fb_div = pll->max_feedback_div;
+ else if (*fb_div < pll->min_feedback_div)
+ *fb_div = pll->min_feedback_div;
+}
+
+static u32 avivo_get_post_div(struct radeon_pll *pll,
+ u32 target_clock)
+{
+ u32 vco, post_div, tmp;
+
+ if (pll->flags & RADEON_PLL_USE_POST_DIV)
+ return pll->post_div;
+
+ if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
+ if (pll->flags & RADEON_PLL_IS_LCD)
+ vco = pll->lcd_pll_out_min;
+ else
+ vco = pll->pll_out_min;
+ } else {
+ if (pll->flags & RADEON_PLL_IS_LCD)
+ vco = pll->lcd_pll_out_max;
+ else
+ vco = pll->pll_out_max;
+ }
+
+ post_div = vco / target_clock;
+ tmp = vco % target_clock;
+
+ if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
+ if (tmp)
+ post_div++;
+ } else {
+ if (!tmp)
+ post_div--;
+ }
+
+ if (post_div > pll->max_post_div)
+ post_div = pll->max_post_div;
+ else if (post_div < pll->min_post_div)
+ post_div = pll->min_post_div;
+
+ return post_div;
+}
+
+#define MAX_TOLERANCE 10
+
+void radeon_compute_pll_avivo(struct radeon_pll *pll,
+ u32 freq,
+ u32 *dot_clock_p,
+ u32 *fb_div_p,
+ u32 *frac_fb_div_p,
+ u32 *ref_div_p,
+ u32 *post_div_p)
+{
+ u32 target_clock = freq / 10;
+ u32 post_div = avivo_get_post_div(pll, target_clock);
+ u32 ref_div = pll->min_ref_div;
+ u32 fb_div = 0, frac_fb_div = 0, tmp;
+
+ if (pll->flags & RADEON_PLL_USE_REF_DIV)
+ ref_div = pll->reference_div;
+
+ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+ avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div);
+ frac_fb_div = (100 * frac_fb_div) / pll->reference_freq;
+ if (frac_fb_div >= 5) {
+ frac_fb_div -= 5;
+ frac_fb_div = frac_fb_div / 10;
+ frac_fb_div++;
+ }
+ if (frac_fb_div >= 10) {
+ fb_div++;
+ frac_fb_div = 0;
+ }
+ } else {
+ while (ref_div <= pll->max_ref_div) {
+ avivo_get_fb_div(pll, target_clock, post_div, ref_div,
+ &fb_div, &frac_fb_div);
+ if (frac_fb_div >= (pll->reference_freq / 2))
+ fb_div++;
+ frac_fb_div = 0;
+ tmp = (pll->reference_freq * fb_div) / (post_div * ref_div);
+ tmp = (tmp * 10000) / target_clock;
+
+ if (tmp > (10000 + MAX_TOLERANCE))
+ ref_div++;
+ else if (tmp >= (10000 - MAX_TOLERANCE))
+ break;
+ else
+ ref_div++;
+ }
+ }
+
+ *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) /
+ (ref_div * post_div * 10);
+ *fb_div_p = fb_div;
+ *frac_fb_div_p = frac_fb_div;
+ *ref_div_p = ref_div;
+ *post_div_p = post_div;
+ DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+ *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div);
+}
+
+/* pre-avivo */
+static inline uint32_t radeon_div(uint64_t n, uint32_t d)
+{
+ uint64_t mod;
+
+ n += d / 2;
+
+ mod = do_div(n, d);
+ return n;
+}
+
+void radeon_compute_pll_legacy(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p)
+{
+ uint32_t min_ref_div = pll->min_ref_div;
+ uint32_t max_ref_div = pll->max_ref_div;
+ uint32_t min_post_div = pll->min_post_div;
+ uint32_t max_post_div = pll->max_post_div;
+ uint32_t min_fractional_feed_div = 0;
+ uint32_t max_fractional_feed_div = 0;
+ uint32_t best_vco = pll->best_vco;
+ uint32_t best_post_div = 1;
+ uint32_t best_ref_div = 1;
+ uint32_t best_feedback_div = 1;
+ uint32_t best_frac_feedback_div = 0;
+ uint32_t best_freq = -1;
+ uint32_t best_error = 0xffffffff;
+ uint32_t best_vco_diff = 1;
+ uint32_t post_div;
+ u32 pll_out_min, pll_out_max;
+
+ DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
+ freq = freq * 1000;
+
+ if (pll->flags & RADEON_PLL_IS_LCD) {
+ pll_out_min = pll->lcd_pll_out_min;
+ pll_out_max = pll->lcd_pll_out_max;
+ } else {
+ pll_out_min = pll->pll_out_min;
+ pll_out_max = pll->pll_out_max;
+ }
+
+ if (pll_out_min > 64800)
+ pll_out_min = 64800;
+
+ if (pll->flags & RADEON_PLL_USE_REF_DIV)
+ min_ref_div = max_ref_div = pll->reference_div;
+ else {
+ while (min_ref_div < max_ref_div-1) {
+ uint32_t mid = (min_ref_div + max_ref_div) / 2;
+ uint32_t pll_in = pll->reference_freq / mid;
+ if (pll_in < pll->pll_in_min)
+ max_ref_div = mid;
+ else if (pll_in > pll->pll_in_max)
+ min_ref_div = mid;
+ else
+ break;
+ }
+ }
+
+ if (pll->flags & RADEON_PLL_USE_POST_DIV)
+ min_post_div = max_post_div = pll->post_div;
+
+ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+ min_fractional_feed_div = pll->min_frac_feedback_div;
+ max_fractional_feed_div = pll->max_frac_feedback_div;
+ }
+
+ for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
+ uint32_t ref_div;
+
+ if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
+ continue;
+
+ /* legacy radeons only have a few post_divs */
+ if (pll->flags & RADEON_PLL_LEGACY) {
+ if ((post_div == 5) ||
+ (post_div == 7) ||
+ (post_div == 9) ||
+ (post_div == 10) ||
+ (post_div == 11) ||
+ (post_div == 13) ||
+ (post_div == 14) ||
+ (post_div == 15))
+ continue;
+ }
+
+ for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) {
+ uint32_t feedback_div, current_freq = 0, error, vco_diff;
+ uint32_t pll_in = pll->reference_freq / ref_div;
+ uint32_t min_feed_div = pll->min_feedback_div;
+ uint32_t max_feed_div = pll->max_feedback_div + 1;
+
+ if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max)
+ continue;
+
+ while (min_feed_div < max_feed_div) {
+ uint32_t vco;
+ uint32_t min_frac_feed_div = min_fractional_feed_div;
+ uint32_t max_frac_feed_div = max_fractional_feed_div + 1;
+ uint32_t frac_feedback_div;
+ uint64_t tmp;
+
+ feedback_div = (min_feed_div + max_feed_div) / 2;
+
+ tmp = (uint64_t)pll->reference_freq * feedback_div;
+ vco = radeon_div(tmp, ref_div);
+
+ if (vco < pll_out_min) {
+ min_feed_div = feedback_div + 1;
+ continue;
+ } else if (vco > pll_out_max) {
+ max_feed_div = feedback_div;
+ continue;
+ }
+
+ while (min_frac_feed_div < max_frac_feed_div) {
+ frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2;
+ tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div;
+ tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
+ current_freq = radeon_div(tmp, ref_div * post_div);
+
+ if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
+ if (freq < current_freq)
+ error = 0xffffffff;
+ else
+ error = freq - current_freq;
+ } else
+ error = abs(current_freq - freq);
+ vco_diff = abs(vco - best_vco);
+
+ if ((best_vco == 0 && error < best_error) ||
+ (best_vco != 0 &&
+ ((best_error > 100 && error < best_error - 100) ||
+ (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) {
+ best_post_div = post_div;
+ best_ref_div = ref_div;
+ best_feedback_div = feedback_div;
+ best_frac_feedback_div = frac_feedback_div;
+ best_freq = current_freq;
+ best_error = error;
+ best_vco_diff = vco_diff;
+ } else if (current_freq == freq) {
+ if (best_freq == -1) {
+ best_post_div = post_div;
+ best_ref_div = ref_div;
+ best_feedback_div = feedback_div;
+ best_frac_feedback_div = frac_feedback_div;
+ best_freq = current_freq;
+ best_error = error;
+ best_vco_diff = vco_diff;
+ } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
+ best_post_div = post_div;
+ best_ref_div = ref_div;
+ best_feedback_div = feedback_div;
+ best_frac_feedback_div = frac_feedback_div;
+ best_freq = current_freq;
+ best_error = error;
+ best_vco_diff = vco_diff;
+ }
+ }
+ if (current_freq < freq)
+ min_frac_feed_div = frac_feedback_div + 1;
+ else
+ max_frac_feed_div = frac_feedback_div;
+ }
+ if (current_freq < freq)
+ min_feed_div = feedback_div + 1;
+ else
+ max_feed_div = feedback_div;
+ }
+ }
+ }
+
+ *dot_clock_p = best_freq / 10000;
+ *fb_div_p = best_feedback_div;
+ *frac_fb_div_p = best_frac_feedback_div;
+ *ref_div_p = best_ref_div;
+ *post_div_p = best_post_div;
+ DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+ (long long)freq,
+ best_freq / 1000, best_feedback_div, best_frac_feedback_div,
+ best_ref_div, best_post_div);
+
+}
+
+static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
+
+ if (radeon_fb->obj) {
+ drm_gem_object_unreference_unlocked(radeon_fb->obj);
+ }
+ drm_framebuffer_cleanup(fb);
+ kfree(radeon_fb);
+}
+
+static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
+
+ return drm_gem_handle_create(file_priv, radeon_fb->obj, handle);
+}
+
+static const struct drm_framebuffer_funcs radeon_fb_funcs = {
+ .destroy = radeon_user_framebuffer_destroy,
+ .create_handle = radeon_user_framebuffer_create_handle,
+};
+
+int
+radeon_framebuffer_init(struct drm_device *dev,
+ struct radeon_framebuffer *rfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+ rfb->obj = obj;
+ drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
+ ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+ if (ret) {
+ rfb->obj = NULL;
+ return ret;
+ }
+ return 0;
+}
+
+static struct drm_framebuffer *
+radeon_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct radeon_framebuffer *radeon_fb;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ if (obj == NULL) {
+ dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
+ "can't create framebuffer\n", mode_cmd->handles[0]);
+ return ERR_PTR(-ENOENT);
+ }
+
+ radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
+ if (radeon_fb == NULL) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+ if (ret) {
+ kfree(radeon_fb);
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(ret);
+ }
+
+ return &radeon_fb->base;
+}
+
+static void radeon_output_poll_changed(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ radeon_fb_output_poll_changed(rdev);
+}
+
+static const struct drm_mode_config_funcs radeon_mode_funcs = {
+ .fb_create = radeon_user_framebuffer_create,
+ .output_poll_changed = radeon_output_poll_changed
+};
+
+static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] =
+{ { 0, "driver" },
+ { 1, "bios" },
+};
+
+static struct drm_prop_enum_list radeon_tv_std_enum_list[] =
+{ { TV_STD_NTSC, "ntsc" },
+ { TV_STD_PAL, "pal" },
+ { TV_STD_PAL_M, "pal-m" },
+ { TV_STD_PAL_60, "pal-60" },
+ { TV_STD_NTSC_J, "ntsc-j" },
+ { TV_STD_SCART_PAL, "scart-pal" },
+ { TV_STD_PAL_CN, "pal-cn" },
+ { TV_STD_SECAM, "secam" },
+};
+
+static struct drm_prop_enum_list radeon_underscan_enum_list[] =
+{ { UNDERSCAN_OFF, "off" },
+ { UNDERSCAN_ON, "on" },
+ { UNDERSCAN_AUTO, "auto" },
+};
+
+static int radeon_modeset_create_props(struct radeon_device *rdev)
+{
+ int sz;
+
+ if (rdev->is_atom_bios) {
+ rdev->mode_info.coherent_mode_property =
+ drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1);
+ if (!rdev->mode_info.coherent_mode_property)
+ return -ENOMEM;
+ }
+
+ if (!ASIC_IS_AVIVO(rdev)) {
+ sz = ARRAY_SIZE(radeon_tmds_pll_enum_list);
+ rdev->mode_info.tmds_pll_property =
+ drm_property_create_enum(rdev->ddev, 0,
+ "tmds_pll",
+ radeon_tmds_pll_enum_list, sz);
+ }
+
+ rdev->mode_info.load_detect_property =
+ drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1);
+ if (!rdev->mode_info.load_detect_property)
+ return -ENOMEM;
+
+ drm_mode_create_scaling_mode_property(rdev->ddev);
+
+ sz = ARRAY_SIZE(radeon_tv_std_enum_list);
+ rdev->mode_info.tv_std_property =
+ drm_property_create_enum(rdev->ddev, 0,
+ "tv standard",
+ radeon_tv_std_enum_list, sz);
+
+ sz = ARRAY_SIZE(radeon_underscan_enum_list);
+ rdev->mode_info.underscan_property =
+ drm_property_create_enum(rdev->ddev, 0,
+ "underscan",
+ radeon_underscan_enum_list, sz);
+
+ rdev->mode_info.underscan_hborder_property =
+ drm_property_create_range(rdev->ddev, 0,
+ "underscan hborder", 0, 128);
+ if (!rdev->mode_info.underscan_hborder_property)
+ return -ENOMEM;
+
+ rdev->mode_info.underscan_vborder_property =
+ drm_property_create_range(rdev->ddev, 0,
+ "underscan vborder", 0, 128);
+ if (!rdev->mode_info.underscan_vborder_property)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void radeon_update_display_priority(struct radeon_device *rdev)
+{
+ /* adjustment options for the display watermarks */
+ if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) {
+ /* set display priority to high for r3xx, rv515 chips
+ * this avoids flickering due to underflow to the
+ * display controllers during heavy acceleration.
+ * Don't force high on rs4xx igp chips as it seems to
+ * affect the sound card. See kernel bug 15982.
+ */
+ if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
+ !(rdev->flags & RADEON_IS_IGP))
+ rdev->disp_priority = 2;
+ else
+ rdev->disp_priority = 0;
+ } else
+ rdev->disp_priority = radeon_disp_priority;
+
+}
+
+/*
+ * Allocate hdmi structs and determine register offsets
+ */
+static void radeon_afmt_init(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
+ rdev->mode_info.afmt[i] = NULL;
+
+ if (ASIC_IS_DCE6(rdev)) {
+ /* todo */
+ } else if (ASIC_IS_DCE4(rdev)) {
+ /* DCE4/5 has 6 audio blocks tied to DIG encoders */
+ /* DCE4.1 has 2 audio blocks tied to DIG encoders */
+ rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[0]) {
+ rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
+ rdev->mode_info.afmt[0]->id = 0;
+ }
+ rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[1]) {
+ rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
+ rdev->mode_info.afmt[1]->id = 1;
+ }
+ if (!ASIC_IS_DCE41(rdev)) {
+ rdev->mode_info.afmt[2] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[2]) {
+ rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
+ rdev->mode_info.afmt[2]->id = 2;
+ }
+ rdev->mode_info.afmt[3] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[3]) {
+ rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
+ rdev->mode_info.afmt[3]->id = 3;
+ }
+ rdev->mode_info.afmt[4] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[4]) {
+ rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
+ rdev->mode_info.afmt[4]->id = 4;
+ }
+ rdev->mode_info.afmt[5] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[5]) {
+ rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
+ rdev->mode_info.afmt[5]->id = 5;
+ }
+ }
+ } else if (ASIC_IS_DCE3(rdev)) {
+ /* DCE3.x has 2 audio blocks tied to DIG encoders */
+ rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[0]) {
+ rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0;
+ rdev->mode_info.afmt[0]->id = 0;
+ }
+ rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[1]) {
+ rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1;
+ rdev->mode_info.afmt[1]->id = 1;
+ }
+ } else if (ASIC_IS_DCE2(rdev)) {
+ /* DCE2 has at least 1 routable audio block */
+ rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[0]) {
+ rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0;
+ rdev->mode_info.afmt[0]->id = 0;
+ }
+ /* r6xx has 2 routable audio blocks */
+ if (rdev->family >= CHIP_R600) {
+ rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[1]) {
+ rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1;
+ rdev->mode_info.afmt[1]->id = 1;
+ }
+ }
+ }
+}
+
+static void radeon_afmt_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) {
+ kfree(rdev->mode_info.afmt[i]);
+ rdev->mode_info.afmt[i] = NULL;
+ }
+}
+
+int radeon_modeset_init(struct radeon_device *rdev)
+{
+ int i;
+ int ret;
+
+ drm_mode_config_init(rdev->ddev);
+ rdev->mode_info.mode_config_initialized = true;
+
+ rdev->ddev->mode_config.funcs = &radeon_mode_funcs;
+
+ if (ASIC_IS_DCE5(rdev)) {
+ rdev->ddev->mode_config.max_width = 16384;
+ rdev->ddev->mode_config.max_height = 16384;
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ rdev->ddev->mode_config.max_width = 8192;
+ rdev->ddev->mode_config.max_height = 8192;
+ } else {
+ rdev->ddev->mode_config.max_width = 4096;
+ rdev->ddev->mode_config.max_height = 4096;
+ }
+
+ rdev->ddev->mode_config.preferred_depth = 24;
+ rdev->ddev->mode_config.prefer_shadow = 1;
+
+ rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
+
+ ret = radeon_modeset_create_props(rdev);
+ if (ret) {
+ return ret;
+ }
+
+ /* init i2c buses */
+ radeon_i2c_init(rdev);
+
+ /* check combios for a valid hardcoded EDID - Sun servers */
+ if (!rdev->is_atom_bios) {
+ /* check for hardcoded EDID in BIOS */
+ radeon_combios_check_hardcoded_edid(rdev);
+ }
+
+ /* allocate crtcs */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ radeon_crtc_init(rdev->ddev, i);
+ }
+
+ /* okay we should have all the bios connectors */
+ ret = radeon_setup_enc_conn(rdev->ddev);
+ if (!ret) {
+ return ret;
+ }
+
+ /* init dig PHYs, disp eng pll */
+ if (rdev->is_atom_bios) {
+ radeon_atom_encoder_init(rdev);
+ radeon_atom_disp_eng_pll_init(rdev);
+ }
+
+ /* initialize hpd */
+ radeon_hpd_init(rdev);
+
+ /* setup afmt */
+ radeon_afmt_init(rdev);
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
+ radeon_fbdev_init(rdev);
+ drm_kms_helper_poll_init(rdev->ddev);
+
+ return 0;
+}
+
+void radeon_modeset_fini(struct radeon_device *rdev)
+{
+ radeon_fbdev_fini(rdev);
+ kfree(rdev->mode_info.bios_hardcoded_edid);
+ radeon_pm_fini(rdev);
+
+ if (rdev->mode_info.mode_config_initialized) {
+ radeon_afmt_fini(rdev);
+ drm_kms_helper_poll_fini(rdev->ddev);
+ radeon_hpd_fini(rdev);
+ drm_mode_config_cleanup(rdev->ddev);
+ rdev->mode_info.mode_config_initialized = false;
+ }
+ /* free i2c buses */
+ radeon_i2c_fini(rdev);
+}
+
+static bool is_hdtv_mode(const struct drm_display_mode *mode)
+{
+ /* try and guess if this is a tv or a monitor */
+ if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
+ (mode->vdisplay == 576) || /* 576p */
+ (mode->vdisplay == 720) || /* 720p */
+ (mode->vdisplay == 1080)) /* 1080p */
+ return true;
+ else
+ return false;
+}
+
+bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_encoder *radeon_encoder;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ bool first = true;
+ u32 src_v = 1, dst_v = 1;
+ u32 src_h = 1, dst_h = 1;
+
+ radeon_crtc->h_border = 0;
+ radeon_crtc->v_border = 0;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+ radeon_encoder = to_radeon_encoder(encoder);
+ connector = radeon_get_connector_for_encoder(encoder);
+ radeon_connector = to_radeon_connector(connector);
+
+ if (first) {
+ /* set scaling */
+ if (radeon_encoder->rmx_type == RMX_OFF)
+ radeon_crtc->rmx_type = RMX_OFF;
+ else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay ||
+ mode->vdisplay < radeon_encoder->native_mode.vdisplay)
+ radeon_crtc->rmx_type = radeon_encoder->rmx_type;
+ else
+ radeon_crtc->rmx_type = RMX_OFF;
+ /* copy native mode */
+ memcpy(&radeon_crtc->native_mode,
+ &radeon_encoder->native_mode,
+ sizeof(struct drm_display_mode));
+ src_v = crtc->mode.vdisplay;
+ dst_v = radeon_crtc->native_mode.vdisplay;
+ src_h = crtc->mode.hdisplay;
+ dst_h = radeon_crtc->native_mode.hdisplay;
+
+ /* fix up for overscan on hdmi */
+ if (ASIC_IS_AVIVO(rdev) &&
+ (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
+ ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
+ ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
+ drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ is_hdtv_mode(mode)))) {
+ if (radeon_encoder->underscan_hborder != 0)
+ radeon_crtc->h_border = radeon_encoder->underscan_hborder;
+ else
+ radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
+ if (radeon_encoder->underscan_vborder != 0)
+ radeon_crtc->v_border = radeon_encoder->underscan_vborder;
+ else
+ radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
+ radeon_crtc->rmx_type = RMX_FULL;
+ src_v = crtc->mode.vdisplay;
+ dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2);
+ src_h = crtc->mode.hdisplay;
+ dst_h = crtc->mode.hdisplay - (radeon_crtc->h_border * 2);
+ }
+ first = false;
+ } else {
+ if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) {
+ /* WARNING: Right now this can't happen but
+ * in the future we need to check that scaling
+ * are consistent across different encoder
+ * (ie all encoder can work with the same
+ * scaling).
+ */
+ DRM_ERROR("Scaling not consistent across encoder.\n");
+ return false;
+ }
+ }
+ }
+ if (radeon_crtc->rmx_type != RMX_OFF) {
+ fixed20_12 a, b;
+ a.full = dfixed_const(src_v);
+ b.full = dfixed_const(dst_v);
+ radeon_crtc->vsc.full = dfixed_div(a, b);
+ a.full = dfixed_const(src_h);
+ b.full = dfixed_const(dst_h);
+ radeon_crtc->hsc.full = dfixed_div(a, b);
+ } else {
+ radeon_crtc->vsc.full = dfixed_const(1);
+ radeon_crtc->hsc.full = dfixed_const(1);
+ }
+ return true;
+}
+
+/*
+ * Retrieve current video scanout position of crtc on a given gpu.
+ *
+ * \param dev Device to query.
+ * \param crtc Crtc to query.
+ * \param *vpos Location where vertical scanout position should be stored.
+ * \param *hpos Location where horizontal scanout position should go.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * \return Flags, or'ed together as follows:
+ *
+ * DRM_SCANOUTPOS_VALID = Query successful.
+ * DRM_SCANOUTPOS_INVBL = Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * this flag means that returned position may be offset by a constant but
+ * unknown small number of scanlines wrt. real scanout position.
+ *
+ */
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos)
+{
+ u32 stat_crtc = 0, vbl = 0, position = 0;
+ int vbl_start, vbl_end, vtotal, ret = 0;
+ bool in_vbl = true;
+
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ if (crtc == 0) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC0_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC0_REGISTER_OFFSET);
+ ret |= DRM_SCANOUTPOS_VALID;
+ }
+ if (crtc == 1) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC1_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC1_REGISTER_OFFSET);
+ ret |= DRM_SCANOUTPOS_VALID;
+ }
+ if (crtc == 2) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC2_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC2_REGISTER_OFFSET);
+ ret |= DRM_SCANOUTPOS_VALID;
+ }
+ if (crtc == 3) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC3_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC3_REGISTER_OFFSET);
+ ret |= DRM_SCANOUTPOS_VALID;
+ }
+ if (crtc == 4) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC4_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC4_REGISTER_OFFSET);
+ ret |= DRM_SCANOUTPOS_VALID;
+ }
+ if (crtc == 5) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC5_REGISTER_OFFSET);
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC5_REGISTER_OFFSET);
+ ret |= DRM_SCANOUTPOS_VALID;
+ }
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ if (crtc == 0) {
+ vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
+ position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
+ ret |= DRM_SCANOUTPOS_VALID;
+ }
+ if (crtc == 1) {
+ vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
+ position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
+ ret |= DRM_SCANOUTPOS_VALID;
+ }
+ } else {
+ /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
+ if (crtc == 0) {
+ /* Assume vbl_end == 0, get vbl_start from
+ * upper 16 bits.
+ */
+ vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) &
+ RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
+ /* Only retrieve vpos from upper 16 bits, set hpos == 0. */
+ position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ stat_crtc = RREG32(RADEON_CRTC_STATUS);
+ if (!(stat_crtc & 1))
+ in_vbl = false;
+
+ ret |= DRM_SCANOUTPOS_VALID;
+ }
+ if (crtc == 1) {
+ vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
+ RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
+ position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ stat_crtc = RREG32(RADEON_CRTC2_STATUS);
+ if (!(stat_crtc & 1))
+ in_vbl = false;
+
+ ret |= DRM_SCANOUTPOS_VALID;
+ }
+ }
+
+ /* Decode into vertical and horizontal scanout position. */
+ *vpos = position & 0x1fff;
+ *hpos = (position >> 16) & 0x1fff;
+
+ /* Valid vblank area boundaries from gpu retrieved? */
+ if (vbl > 0) {
+ /* Yes: Decode. */
+ ret |= DRM_SCANOUTPOS_ACCURATE;
+ vbl_start = vbl & 0x1fff;
+ vbl_end = (vbl >> 16) & 0x1fff;
+ }
+ else {
+ /* No: Fake something reasonable which gives at least ok results. */
+ vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
+ vbl_end = 0;
+ }
+
+ /* Test scanout position against vblank region. */
+ if ((*vpos < vbl_start) && (*vpos >= vbl_end))
+ in_vbl = false;
+
+ /* Check if inside vblank area and apply corrective offsets:
+ * vpos will then be >=0 in video scanout area, but negative
+ * within vblank area, counting down the number of lines until
+ * start of scanout.
+ */
+
+ /* Inside "upper part" of vblank area? Apply corrective offset if so: */
+ if (in_vbl && (*vpos >= vbl_start)) {
+ vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
+ *vpos = *vpos - vtotal;
+ }
+
+ /* Correct for shifted end of vbl at vbl_end. */
+ *vpos = *vpos - vbl_end;
+
+ /* In vblank? */
+ if (in_vbl)
+ ret |= DRM_SCANOUTPOS_INVBL;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
new file mode 100644
index 0000000..8357aad
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -0,0 +1,505 @@
+/**
+ * \file radeon_drv.c
+ * ATI Radeon driver
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+
+#include <drm/drm_pciids.h>
+#include <linux/console.h>
+#include <linux/module.h>
+
+
+/*
+ * KMS wrapper.
+ * - 2.0.0 - initial interface
+ * - 2.1.0 - add square tiling interface
+ * - 2.2.0 - add r6xx/r7xx const buffer support
+ * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
+ * - 2.4.0 - add crtc id query
+ * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
+ * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
+ * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
+ * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
+ * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
+ * 2.10.0 - fusion 2D tiling
+ * 2.11.0 - backend map, initial compute support for the CS checker
+ * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
+ * 2.13.0 - virtual memory support, streamout
+ * 2.14.0 - add evergreen tiling informations
+ * 2.15.0 - add max_pipes query
+ * 2.16.0 - fix evergreen 2D tiled surface calculation
+ * 2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
+ * 2.18.0 - r600-eg: allow "invalid" DB formats
+ * 2.19.0 - r600-eg: MSAA textures
+ * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
+ * 2.21.0 - r600-r700: FMASK and CMASK
+ * 2.22.0 - r600 only: RESOLVE_BOX allowed
+ * 2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
+ * 2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
+ * 2.25.0 - eg+: new info request for num SE and num SH
+ * 2.26.0 - r600-eg: fix htile size computation
+ * 2.27.0 - r600-SI: Add CS ioctl support for async DMA
+ * 2.28.0 - r600-eg: Add MEM_WRITE packet support
+ * 2.29.0 - R500 FP16 color clear registers
+ * 2.30.0 - fix for FMASK texturing
+ * 2.31.0 - Add fastfb support for rs690
+ * 2.32.0 - new info request for rings working
+ * 2.33.0 - Add SI tiling mode array query
+ */
+#define KMS_DRIVER_MAJOR 2
+#define KMS_DRIVER_MINOR 33
+#define KMS_DRIVER_PATCHLEVEL 0
+int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
+int radeon_driver_unload_kms(struct drm_device *dev);
+int radeon_driver_firstopen_kms(struct drm_device *dev);
+void radeon_driver_lastclose_kms(struct drm_device *dev);
+int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
+void radeon_driver_postclose_kms(struct drm_device *dev,
+ struct drm_file *file_priv);
+void radeon_driver_preclose_kms(struct drm_device *dev,
+ struct drm_file *file_priv);
+int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
+int radeon_resume_kms(struct drm_device *dev);
+u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
+int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
+void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags);
+void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
+int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
+void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
+irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
+int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int radeon_gem_object_init(struct drm_gem_object *obj);
+void radeon_gem_object_free(struct drm_gem_object *obj);
+int radeon_gem_object_open(struct drm_gem_object *obj,
+ struct drm_file *file_priv);
+void radeon_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv);
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+ int *vpos, int *hpos);
+extern struct drm_ioctl_desc radeon_ioctls_kms[];
+extern int radeon_max_kms_ioctl;
+int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p);
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg);
+int radeon_gem_prime_pin(struct drm_gem_object *obj);
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+
+#if defined(CONFIG_DEBUG_FS)
+int radeon_debugfs_init(struct drm_minor *minor);
+void radeon_debugfs_cleanup(struct drm_minor *minor);
+#endif
+
+/* atpx handler */
+#if defined(CONFIG_VGA_SWITCHEROO)
+void radeon_register_atpx_handler(void);
+void radeon_unregister_atpx_handler(void);
+#else
+static inline void radeon_register_atpx_handler(void) {}
+static inline void radeon_unregister_atpx_handler(void) {}
+#endif
+
+int radeon_no_wb;
+int radeon_modeset = -1;
+int radeon_dynclks = -1;
+int radeon_r4xx_atom = 0;
+int radeon_agpmode = 0;
+int radeon_vram_limit = 0;
+int radeon_gart_size = 512; /* default gart size */
+int radeon_benchmarking = 0;
+int radeon_testing = 0;
+int radeon_connector_table = 0;
+int radeon_tv = 1;
+int radeon_audio = 0;
+int radeon_disp_priority = 0;
+int radeon_hw_i2c = 0;
+int radeon_pcie_gen2 = -1;
+int radeon_msi = -1;
+int radeon_lockup_timeout = 10000;
+int radeon_fastfb = 0;
+
+MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
+module_param_named(no_wb, radeon_no_wb, int, 0444);
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, radeon_modeset, int, 0400);
+
+MODULE_PARM_DESC(dynclks, "Disable/Enable dynamic clocks");
+module_param_named(dynclks, radeon_dynclks, int, 0444);
+
+MODULE_PARM_DESC(r4xx_atom, "Enable ATOMBIOS modesetting for R4xx");
+module_param_named(r4xx_atom, radeon_r4xx_atom, int, 0444);
+
+MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing");
+module_param_named(vramlimit, radeon_vram_limit, int, 0600);
+
+MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
+module_param_named(agpmode, radeon_agpmode, int, 0444);
+
+MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc)");
+module_param_named(gartsize, radeon_gart_size, int, 0600);
+
+MODULE_PARM_DESC(benchmark, "Run benchmark");
+module_param_named(benchmark, radeon_benchmarking, int, 0444);
+
+MODULE_PARM_DESC(test, "Run tests");
+module_param_named(test, radeon_testing, int, 0444);
+
+MODULE_PARM_DESC(connector_table, "Force connector table");
+module_param_named(connector_table, radeon_connector_table, int, 0444);
+
+MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
+module_param_named(tv, radeon_tv, int, 0444);
+
+MODULE_PARM_DESC(audio, "Audio enable (1 = enable)");
+module_param_named(audio, radeon_audio, int, 0444);
+
+MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
+module_param_named(disp_priority, radeon_disp_priority, int, 0444);
+
+MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
+module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
+
+MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
+module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
+
+MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(msi, radeon_msi, int, 0444);
+
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
+module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
+
+MODULE_PARM_DESC(fastfb, "Direct FB access for IGP chips (0 = disable, 1 = enable)");
+module_param_named(fastfb, radeon_fastfb, int, 0444);
+
+static struct pci_device_id pciidlist[] = {
+ radeon_PCI_IDS
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+#ifdef CPTCFG_DRM_RADEON_UMS
+
+static int radeon_suspend(struct drm_device *dev, pm_message_t state)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return 0;
+
+ /* Disable *all* interrupts */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
+ RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
+ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+ return 0;
+}
+
+static int radeon_resume(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return 0;
+
+ /* Restore interrupt registers */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
+ RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
+ RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
+ return 0;
+}
+
+static const struct file_operations radeon_driver_old_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = radeon_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+static struct drm_driver driver_old = {
+ .driver_features =
+ DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+ DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
+ .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
+ .load = radeon_driver_load,
+ .firstopen = radeon_driver_firstopen,
+ .open = radeon_driver_open,
+ .preclose = radeon_driver_preclose,
+ .postclose = radeon_driver_postclose,
+ .lastclose = radeon_driver_lastclose,
+ .unload = radeon_driver_unload,
+ .suspend = radeon_suspend,
+ .resume = radeon_resume,
+ .get_vblank_counter = radeon_get_vblank_counter,
+ .enable_vblank = radeon_enable_vblank,
+ .disable_vblank = radeon_disable_vblank,
+ .master_create = radeon_master_create,
+ .master_destroy = radeon_master_destroy,
+ .irq_preinstall = radeon_driver_irq_preinstall,
+ .irq_postinstall = radeon_driver_irq_postinstall,
+ .irq_uninstall = radeon_driver_irq_uninstall,
+ .irq_handler = radeon_driver_irq_handler,
+ .ioctls = radeon_ioctls,
+ .dma_ioctl = radeon_cp_buffers,
+ .fops = &radeon_driver_old_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+#endif
+
+static struct drm_driver kms_driver;
+
+static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+ bool primary = false;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+ remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
+ kfree(ap);
+
+ return 0;
+}
+
+static int radeon_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int ret;
+
+ /* Get rid of things like offb */
+ ret = radeon_kick_out_firmware_fb(pdev);
+ if (ret)
+ return ret;
+
+ return drm_get_pci_dev(pdev, ent, &kms_driver);
+}
+
+static void
+radeon_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static int
+radeon_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ return radeon_suspend_kms(dev, state);
+}
+
+static int
+radeon_pci_resume(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ return radeon_resume_kms(dev);
+}
+
+static const struct file_operations radeon_driver_kms_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = radeon_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = radeon_kms_compat_ioctl,
+#endif
+};
+
+static struct drm_driver kms_driver = {
+ .driver_features =
+ DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+ DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM |
+ DRIVER_PRIME,
+ .dev_priv_size = 0,
+ .load = radeon_driver_load_kms,
+ .firstopen = radeon_driver_firstopen_kms,
+ .open = radeon_driver_open_kms,
+ .preclose = radeon_driver_preclose_kms,
+ .postclose = radeon_driver_postclose_kms,
+ .lastclose = radeon_driver_lastclose_kms,
+ .unload = radeon_driver_unload_kms,
+ .suspend = radeon_suspend_kms,
+ .resume = radeon_resume_kms,
+ .get_vblank_counter = radeon_get_vblank_counter_kms,
+ .enable_vblank = radeon_enable_vblank_kms,
+ .disable_vblank = radeon_disable_vblank_kms,
+ .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
+ .get_scanout_position = radeon_get_crtc_scanoutpos,
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = radeon_debugfs_init,
+ .debugfs_cleanup = radeon_debugfs_cleanup,
+#endif
+ .irq_preinstall = radeon_driver_irq_preinstall_kms,
+ .irq_postinstall = radeon_driver_irq_postinstall_kms,
+ .irq_uninstall = radeon_driver_irq_uninstall_kms,
+ .irq_handler = radeon_driver_irq_handler_kms,
+ .ioctls = radeon_ioctls_kms,
+ .gem_init_object = radeon_gem_object_init,
+ .gem_free_object = radeon_gem_object_free,
+ .gem_open_object = radeon_gem_object_open,
+ .gem_close_object = radeon_gem_object_close,
+ .dma_ioctl = radeon_dma_ioctl_kms,
+ .dumb_create = radeon_mode_dumb_create,
+ .dumb_map_offset = radeon_mode_dumb_mmap,
+ .dumb_destroy = radeon_mode_dumb_destroy,
+ .fops = &radeon_driver_kms_fops,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_pin = radeon_gem_prime_pin,
+ .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
+ .gem_prime_vmap = radeon_gem_prime_vmap,
+ .gem_prime_vunmap = radeon_gem_prime_vunmap,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = KMS_DRIVER_MAJOR,
+ .minor = KMS_DRIVER_MINOR,
+ .patchlevel = KMS_DRIVER_PATCHLEVEL,
+};
+
+static struct drm_driver *driver;
+static struct pci_driver *pdriver;
+
+#ifdef CPTCFG_DRM_RADEON_UMS
+static struct pci_driver radeon_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+#endif
+
+static struct pci_driver radeon_kms_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = radeon_pci_probe,
+ .remove = radeon_pci_remove,
+ .suspend = radeon_pci_suspend,
+ .resume = radeon_pci_resume,
+};
+
+static int __init radeon_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && radeon_modeset == -1) {
+ DRM_INFO("VGACON disable radeon kernel modesetting.\n");
+ radeon_modeset = 0;
+ }
+#endif
+ /* set to modesetting by default if not nomodeset */
+ if (radeon_modeset == -1)
+ radeon_modeset = 1;
+
+ if (radeon_modeset == 1) {
+ DRM_INFO("radeon kernel modesetting enabled.\n");
+ driver = &kms_driver;
+ pdriver = &radeon_kms_pci_driver;
+ driver->driver_features |= DRIVER_MODESET;
+ driver->num_ioctls = radeon_max_kms_ioctl;
+ radeon_register_atpx_handler();
+
+ } else {
+#ifdef CPTCFG_DRM_RADEON_UMS
+ DRM_INFO("radeon userspace modesetting enabled.\n");
+ driver = &driver_old;
+ pdriver = &radeon_pci_driver;
+ driver->driver_features &= ~DRIVER_MODESET;
+ driver->num_ioctls = radeon_max_ioctl;
+#else
+ DRM_ERROR("No UMS support in radeon module!\n");
+ return -EINVAL;
+#endif
+ }
+
+ /* let modprobe override vga console setting */
+ return drm_pci_init(driver, pdriver);
+}
+
+static void __exit radeon_exit(void)
+{
+ drm_pci_exit(driver, pdriver);
+ radeon_unregister_atpx_handler();
+}
+
+module_init(radeon_init);
+module_exit(radeon_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
new file mode 100644
index 0000000..b9ca18c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -0,0 +1,2164 @@
+/* radeon_drv.h -- Private header for radeon driver -*- linux-c -*-
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#ifndef __RADEON_DRV_H__
+#define __RADEON_DRV_H__
+
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+
+#include "radeon_family.h"
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR "Gareth Hughes, Keith Whitwell, others."
+
+#define DRIVER_NAME "radeon"
+#define DRIVER_DESC "ATI Radeon"
+#define DRIVER_DATE "20080528"
+
+/* Interface history:
+ *
+ * 1.1 - ??
+ * 1.2 - Add vertex2 ioctl (keith)
+ * - Add stencil capability to clear ioctl (gareth, keith)
+ * - Increase MAX_TEXTURE_LEVELS (brian)
+ * 1.3 - Add cmdbuf ioctl (keith)
+ * - Add support for new radeon packets (keith)
+ * - Add getparam ioctl (keith)
+ * - Add flip-buffers ioctl, deprecate fullscreen foo (keith).
+ * 1.4 - Add scratch registers to get_param ioctl.
+ * 1.5 - Add r200 packets to cmdbuf ioctl
+ * - Add r200 function to init ioctl
+ * - Add 'scalar2' instruction to cmdbuf
+ * 1.6 - Add static GART memory manager
+ * Add irq handler (won't be turned on unless X server knows to)
+ * Add irq ioctls and irq_active getparam.
+ * Add wait command for cmdbuf ioctl
+ * Add GART offset query for getparam
+ * 1.7 - Add support for cube map registers: R200_PP_CUBIC_FACES_[0..5]
+ * and R200_PP_CUBIC_OFFSET_F1_[0..5].
+ * Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and
+ * R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian)
+ * 1.8 - Remove need to call cleanup ioctls on last client exit (keith)
+ * Add 'GET' queries for starting additional clients on different VT's.
+ * 1.9 - Add DRM_IOCTL_RADEON_CP_RESUME ioctl.
+ * Add texture rectangle support for r100.
+ * 1.10- Add SETPARAM ioctl; first parameter to set is FB_LOCATION, which
+ * clients use to tell the DRM where they think the framebuffer is
+ * located in the card's address space
+ * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color
+ * and GL_EXT_blend_[func|equation]_separate on r200
+ * 1.12- Add R300 CP microcode support - this just loads the CP on r300
+ * (No 3D support yet - just microcode loading).
+ * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters
+ * - Add hyperz support, add hyperz flags to clear ioctl.
+ * 1.14- Add support for color tiling
+ * - Add R100/R200 surface allocation/free support
+ * 1.15- Add support for texture micro tiling
+ * - Add support for r100 cube maps
+ * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear
+ * texture filtering on r200
+ * 1.17- Add initial support for R300 (3D).
+ * 1.18- Add support for GL_ATI_fragment_shader, new packets
+ * R200_EMIT_PP_AFS_0/1, R200_EMIT_PP_TXCTLALL_0-5 (replaces
+ * R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR
+ * (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6)
+ * 1.19- Add support for gart table in FB memory and PCIE r300
+ * 1.20- Add support for r300 texrect
+ * 1.21- Add support for card type getparam
+ * 1.22- Add support for texture cache flushes (R300_TX_CNTL)
+ * 1.23- Add new radeon memory map work from benh
+ * 1.24- Add general-purpose packet for manipulating scratch registers (r300)
+ * 1.25- Add support for r200 vertex programs (R200_EMIT_VAP_PVS_CNTL,
+ * new packet type)
+ * 1.26- Add support for variable size PCI(E) gart aperture
+ * 1.27- Add support for IGP GART
+ * 1.28- Add support for VBL on CRTC2
+ * 1.29- R500 3D cmd buffer support
+ * 1.30- Add support for occlusion queries
+ * 1.31- Add support for num Z pipes from GET_PARAM
+ * 1.32- fixes for rv740 setup
+ * 1.33- Add r6xx/r7xx const buffer support
+ */
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 33
+#define DRIVER_PATCHLEVEL 0
+
+/* The rest of the file is DEPRECATED! */
+#ifdef CPTCFG_DRM_RADEON_UMS
+
+enum radeon_cp_microcode_version {
+ UCODE_R100,
+ UCODE_R200,
+ UCODE_R300,
+};
+
+typedef struct drm_radeon_freelist {
+ unsigned int age;
+ struct drm_buf *buf;
+ struct drm_radeon_freelist *next;
+ struct drm_radeon_freelist *prev;
+} drm_radeon_freelist_t;
+
+typedef struct drm_radeon_ring_buffer {
+ u32 *start;
+ u32 *end;
+ int size;
+ int size_l2qw;
+
+ int rptr_update; /* Double Words */
+ int rptr_update_l2qw; /* log2 Quad Words */
+
+ int fetch_size; /* Double Words */
+ int fetch_size_l2ow; /* log2 Oct Words */
+
+ u32 tail;
+ u32 tail_mask;
+ int space;
+
+ int high_mark;
+} drm_radeon_ring_buffer_t;
+
+typedef struct drm_radeon_depth_clear_t {
+ u32 rb3d_cntl;
+ u32 rb3d_zstencilcntl;
+ u32 se_cntl;
+} drm_radeon_depth_clear_t;
+
+struct drm_radeon_driver_file_fields {
+ int64_t radeon_fb_delta;
+};
+
+struct mem_block {
+ struct mem_block *next;
+ struct mem_block *prev;
+ int start;
+ int size;
+ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
+};
+
+struct radeon_surface {
+ int refcount;
+ u32 lower;
+ u32 upper;
+ u32 flags;
+};
+
+struct radeon_virt_surface {
+ int surface_index;
+ u32 lower;
+ u32 upper;
+ u32 flags;
+ struct drm_file *file_priv;
+#define PCIGART_FILE_PRIV ((void *) -1L)
+};
+
+#define RADEON_FLUSH_EMITED (1 << 0)
+#define RADEON_PURGE_EMITED (1 << 1)
+
+struct drm_radeon_master_private {
+ drm_local_map_t *sarea;
+ drm_radeon_sarea_t *sarea_priv;
+};
+
+typedef struct drm_radeon_private {
+ drm_radeon_ring_buffer_t ring;
+
+ u32 fb_location;
+ u32 fb_size;
+ int new_memmap;
+
+ int gart_size;
+ u32 gart_vm_start;
+ unsigned long gart_buffers_offset;
+
+ int cp_mode;
+ int cp_running;
+
+ drm_radeon_freelist_t *head;
+ drm_radeon_freelist_t *tail;
+ int last_buf;
+ int writeback_works;
+
+ int usec_timeout;
+
+ int microcode_version;
+
+ struct {
+ u32 boxes;
+ int freelist_timeouts;
+ int freelist_loops;
+ int requested_bufs;
+ int last_frame_reads;
+ int last_clear_reads;
+ int clears;
+ int texture_uploads;
+ } stats;
+
+ int do_boxes;
+ int page_flipping;
+
+ u32 color_fmt;
+ unsigned int front_offset;
+ unsigned int front_pitch;
+ unsigned int back_offset;
+ unsigned int back_pitch;
+
+ u32 depth_fmt;
+ unsigned int depth_offset;
+ unsigned int depth_pitch;
+
+ u32 front_pitch_offset;
+ u32 back_pitch_offset;
+ u32 depth_pitch_offset;
+
+ drm_radeon_depth_clear_t depth_clear;
+
+ unsigned long ring_offset;
+ unsigned long ring_rptr_offset;
+ unsigned long buffers_offset;
+ unsigned long gart_textures_offset;
+
+ drm_local_map_t *sarea;
+ drm_local_map_t *cp_ring;
+ drm_local_map_t *ring_rptr;
+ drm_local_map_t *gart_textures;
+
+ struct mem_block *gart_heap;
+ struct mem_block *fb_heap;
+
+ /* SW interrupt */
+ wait_queue_head_t swi_queue;
+ atomic_t swi_emitted;
+ int vblank_crtc;
+ uint32_t irq_enable_reg;
+ uint32_t r500_disp_irq_reg;
+
+ struct radeon_surface surfaces[RADEON_MAX_SURFACES];
+ struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];
+
+ unsigned long pcigart_offset;
+ unsigned int pcigart_offset_set;
+ struct drm_ati_pcigart_info gart_info;
+
+ u32 scratch_ages[5];
+
+ int have_z_offset;
+
+ /* starting from here on, data is preserved across an open */
+ uint32_t flags; /* see radeon_chip_flags */
+ resource_size_t fb_aper_offset;
+
+ int num_gb_pipes;
+ int num_z_pipes;
+ int track_flush;
+ drm_local_map_t *mmio;
+
+ /* r6xx/r7xx pipe/shader config */
+ int r600_max_pipes;
+ int r600_max_tile_pipes;
+ int r600_max_simds;
+ int r600_max_backends;
+ int r600_max_gprs;
+ int r600_max_threads;
+ int r600_max_stack_entries;
+ int r600_max_hw_contexts;
+ int r600_max_gs_threads;
+ int r600_sx_max_export_size;
+ int r600_sx_max_export_pos_size;
+ int r600_sx_max_export_smx_size;
+ int r600_sq_num_cf_insts;
+ int r700_sx_num_of_sets;
+ int r700_sc_prim_fifo_size;
+ int r700_sc_hiz_tile_fifo_size;
+ int r700_sc_earlyz_tile_fifo_fize;
+ int r600_group_size;
+ int r600_npipes;
+ int r600_nbanks;
+
+ struct mutex cs_mutex;
+ u32 cs_id_scnt;
+ u32 cs_id_wcnt;
+ /* r6xx/r7xx drm blit vertex buffer */
+ struct drm_buf *blit_vb;
+
+ /* firmware */
+ const struct firmware *me_fw, *pfp_fw;
+} drm_radeon_private_t;
+
+typedef struct drm_radeon_buf_priv {
+ u32 age;
+} drm_radeon_buf_priv_t;
+
+struct drm_buffer;
+
+typedef struct drm_radeon_kcmd_buffer {
+ int bufsz;
+ struct drm_buffer *buffer;
+ int nbox;
+ struct drm_clip_rect __user *boxes;
+} drm_radeon_kcmd_buffer_t;
+
+extern int radeon_no_wb;
+extern struct drm_ioctl_desc radeon_ioctls[];
+extern int radeon_max_ioctl;
+
+extern u32 radeon_get_ring_head(drm_radeon_private_t *dev_priv);
+extern void radeon_set_ring_head(drm_radeon_private_t *dev_priv, u32 val);
+
+#define GET_RING_HEAD(dev_priv) radeon_get_ring_head(dev_priv)
+#define SET_RING_HEAD(dev_priv, val) radeon_set_ring_head(dev_priv, val)
+
+/* Check whether the given hardware address is inside the framebuffer or the
+ * GART area.
+ */
+static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv,
+ u64 off)
+{
+ u32 fb_start = dev_priv->fb_location;
+ u32 fb_end = fb_start + dev_priv->fb_size - 1;
+ u32 gart_start = dev_priv->gart_vm_start;
+ u32 gart_end = gart_start + dev_priv->gart_size - 1;
+
+ return ((off >= fb_start && off <= fb_end) ||
+ (off >= gart_start && off <= gart_end));
+}
+
+/* radeon_state.c */
+extern void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf);
+
+ /* radeon_cp.c */
+extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
+extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc);
+extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base);
+
+extern void radeon_freelist_reset(struct drm_device * dev);
+extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
+
+extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n);
+
+extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv);
+
+extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags);
+extern int radeon_presetup(struct drm_device *dev);
+extern int radeon_driver_postcleanup(struct drm_device *dev);
+
+extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern void radeon_mem_takedown(struct mem_block **heap);
+extern void radeon_mem_release(struct drm_file *file_priv,
+ struct mem_block *heap);
+
+extern void radeon_enable_bm(struct drm_radeon_private *dev_priv);
+extern u32 radeon_read_ring_rptr(drm_radeon_private_t *dev_priv, u32 off);
+extern void radeon_write_ring_rptr(drm_radeon_private_t *dev_priv, u32 off, u32 val);
+
+ /* radeon_irq.c */
+extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state);
+extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
+
+extern void radeon_do_release(struct drm_device * dev);
+extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
+extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
+extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
+extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
+extern void radeon_driver_irq_preinstall(struct drm_device * dev);
+extern int radeon_driver_irq_postinstall(struct drm_device *dev);
+extern void radeon_driver_irq_uninstall(struct drm_device * dev);
+extern void radeon_enable_interrupt(struct drm_device *dev);
+extern int radeon_vblank_crtc_get(struct drm_device *dev);
+extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
+
+extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
+extern int radeon_driver_unload(struct drm_device *dev);
+extern int radeon_driver_firstopen(struct drm_device *dev);
+extern void radeon_driver_preclose(struct drm_device *dev,
+ struct drm_file *file_priv);
+extern void radeon_driver_postclose(struct drm_device *dev,
+ struct drm_file *file_priv);
+extern void radeon_driver_lastclose(struct drm_device * dev);
+extern int radeon_driver_open(struct drm_device *dev,
+ struct drm_file *file_priv);
+extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+
+extern int radeon_master_create(struct drm_device *dev, struct drm_master *master);
+extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master);
+extern void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master);
+/* r300_cmdbuf.c */
+extern void r300_init_reg_flags(struct drm_device *dev);
+
+extern int r300_do_cp_cmdbuf(struct drm_device *dev,
+ struct drm_file *file_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf);
+
+/* r600_cp.c */
+extern int r600_do_engine_reset(struct drm_device *dev);
+extern int r600_do_cleanup_cp(struct drm_device *dev);
+extern int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
+ struct drm_file *file_priv);
+extern int r600_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv);
+extern int r600_do_cp_idle(drm_radeon_private_t *dev_priv);
+extern void r600_do_cp_start(drm_radeon_private_t *dev_priv);
+extern void r600_do_cp_reset(drm_radeon_private_t *dev_priv);
+extern void r600_do_cp_stop(drm_radeon_private_t *dev_priv);
+extern int r600_cp_dispatch_indirect(struct drm_device *dev,
+ struct drm_buf *buf, int start, int end);
+extern int r600_page_table_init(struct drm_device *dev);
+extern void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
+extern int r600_cs_legacy_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv);
+extern void r600_cp_dispatch_swap(struct drm_device *dev, struct drm_file *file_priv);
+extern int r600_cp_dispatch_texture(struct drm_device *dev,
+ struct drm_file *file_priv,
+ drm_radeon_texture_t *tex,
+ drm_radeon_tex_image_t *image);
+/* r600_blit.c */
+extern int r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv);
+extern void r600_done_blit_copy(struct drm_device *dev);
+extern void r600_blit_copy(struct drm_device *dev,
+ uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
+ int size_bytes);
+extern void r600_blit_swap(struct drm_device *dev,
+ uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
+ int sx, int sy, int dx, int dy,
+ int w, int h, int src_pitch, int dst_pitch, int cpp);
+
+/* Flags for stats.boxes
+ */
+#define RADEON_BOX_DMA_IDLE 0x1
+#define RADEON_BOX_RING_FULL 0x2
+#define RADEON_BOX_FLIP 0x4
+#define RADEON_BOX_WAIT_IDLE 0x8
+#define RADEON_BOX_TEXTURE_LOAD 0x10
+
+/* Register definitions, register access macros and drmAddMap constants
+ * for Radeon kernel driver.
+ */
+#define RADEON_MM_INDEX 0x0000
+#define RADEON_MM_DATA 0x0004
+
+#define RADEON_AGP_COMMAND 0x0f60
+#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */
+# define RADEON_AGP_ENABLE (1<<8)
+#define RADEON_AUX_SCISSOR_CNTL 0x26f0
+# define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24)
+# define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25)
+# define RADEON_EXCLUSIVE_SCISSOR_2 (1 << 26)
+# define RADEON_SCISSOR_0_ENABLE (1 << 28)
+# define RADEON_SCISSOR_1_ENABLE (1 << 29)
+# define RADEON_SCISSOR_2_ENABLE (1 << 30)
+
+/*
+ * PCIE radeons (rv370/rv380, rv410, r423/r430/r480, r5xx)
+ * don't have an explicit bus mastering disable bit. It's handled
+ * by the PCI D-states. PMI_BM_DIS disables D-state bus master
+ * handling, not bus mastering itself.
+ */
+#define RADEON_BUS_CNTL 0x0030
+/* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+# define RADEON_BUS_MASTER_DIS (1 << 6)
+/* rs600/rs690/rs740 */
+# define RS600_BUS_MASTER_DIS (1 << 14)
+# define RS600_MSI_REARM (1 << 20)
+/* see RS400_MSI_REARM in AIC_CNTL for rs480 */
+
+#define RADEON_BUS_CNTL1 0x0034
+# define RADEON_PMI_BM_DIS (1 << 2)
+# define RADEON_PMI_INT_DIS (1 << 3)
+
+#define RV370_BUS_CNTL 0x004c
+# define RV370_PMI_BM_DIS (1 << 5)
+# define RV370_PMI_INT_DIS (1 << 6)
+
+#define RADEON_MSI_REARM_EN 0x0160
+/* rv370/rv380, rv410, r423/r430/r480, r5xx */
+# define RV370_MSI_REARM_EN (1 << 0)
+
+#define RADEON_CLOCK_CNTL_DATA 0x000c
+# define RADEON_PLL_WR_EN (1 << 7)
+#define RADEON_CLOCK_CNTL_INDEX 0x0008
+#define RADEON_CONFIG_APER_SIZE 0x0108
+#define RADEON_CONFIG_MEMSIZE 0x00f8
+#define RADEON_CRTC_OFFSET 0x0224
+#define RADEON_CRTC_OFFSET_CNTL 0x0228
+# define RADEON_CRTC_TILE_EN (1 << 15)
+# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16)
+#define RADEON_CRTC2_OFFSET 0x0324
+#define RADEON_CRTC2_OFFSET_CNTL 0x0328
+
+#define RADEON_PCIE_INDEX 0x0030
+#define RADEON_PCIE_DATA 0x0034
+#define RADEON_PCIE_TX_GART_CNTL 0x10
+# define RADEON_PCIE_TX_GART_EN (1 << 0)
+# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
+# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1 << 1)
+# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3 << 1)
+# define RADEON_PCIE_TX_GART_MODE_32_128_CACHE (0 << 3)
+# define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE (1 << 3)
+# define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN (1 << 5)
+# define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1 << 8)
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
+#define RADEON_PCIE_TX_GART_BASE 0x13
+#define RADEON_PCIE_TX_GART_START_LO 0x14
+#define RADEON_PCIE_TX_GART_START_HI 0x15
+#define RADEON_PCIE_TX_GART_END_LO 0x16
+#define RADEON_PCIE_TX_GART_END_HI 0x17
+
+#define RS480_NB_MC_INDEX 0x168
+# define RS480_NB_MC_IND_WR_EN (1 << 8)
+#define RS480_NB_MC_DATA 0x16c
+
+#define RS690_MC_INDEX 0x78
+# define RS690_MC_INDEX_MASK 0x1ff
+# define RS690_MC_INDEX_WR_EN (1 << 9)
+# define RS690_MC_INDEX_WR_ACK 0x7f
+#define RS690_MC_DATA 0x7c
+
+/* MC indirect registers */
+#define RS480_MC_MISC_CNTL 0x18
+# define RS480_DISABLE_GTW (1 << 1)
+/* switch between MCIND GART and MM GART registers. 0 = mmgart, 1 = mcind gart */
+# define RS480_GART_INDEX_REG_EN (1 << 12)
+# define RS690_BLOCK_GFX_D3_EN (1 << 14)
+#define RS480_K8_FB_LOCATION 0x1e
+#define RS480_GART_FEATURE_ID 0x2b
+# define RS480_HANG_EN (1 << 11)
+# define RS480_TLB_ENABLE (1 << 18)
+# define RS480_P2P_ENABLE (1 << 19)
+# define RS480_GTW_LAC_EN (1 << 25)
+# define RS480_2LEVEL_GART (0 << 30)
+# define RS480_1LEVEL_GART (1 << 30)
+# define RS480_PDC_EN (1 << 31)
+#define RS480_GART_BASE 0x2c
+#define RS480_GART_CACHE_CNTRL 0x2e
+# define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
+#define RS480_AGP_ADDRESS_SPACE_SIZE 0x38
+# define RS480_GART_EN (1 << 0)
+# define RS480_VA_SIZE_32MB (0 << 1)
+# define RS480_VA_SIZE_64MB (1 << 1)
+# define RS480_VA_SIZE_128MB (2 << 1)
+# define RS480_VA_SIZE_256MB (3 << 1)
+# define RS480_VA_SIZE_512MB (4 << 1)
+# define RS480_VA_SIZE_1GB (5 << 1)
+# define RS480_VA_SIZE_2GB (6 << 1)
+#define RS480_AGP_MODE_CNTL 0x39
+# define RS480_POST_GART_Q_SIZE (1 << 18)
+# define RS480_NONGART_SNOOP (1 << 19)
+# define RS480_AGP_RD_BUF_SIZE (1 << 20)
+# define RS480_REQ_TYPE_SNOOP_SHIFT 22
+# define RS480_REQ_TYPE_SNOOP_MASK 0x3
+# define RS480_REQ_TYPE_SNOOP_DIS (1 << 24)
+#define RS480_MC_MISC_UMA_CNTL 0x5f
+#define RS480_MC_MCLK_CNTL 0x7a
+#define RS480_MC_UMA_DUALCH_CNTL 0x86
+
+#define RS690_MC_FB_LOCATION 0x100
+#define RS690_MC_AGP_LOCATION 0x101
+#define RS690_MC_AGP_BASE 0x102
+#define RS690_MC_AGP_BASE_2 0x103
+
+#define RS600_MC_INDEX 0x70
+# define RS600_MC_ADDR_MASK 0xffff
+# define RS600_MC_IND_SEQ_RBS_0 (1 << 16)
+# define RS600_MC_IND_SEQ_RBS_1 (1 << 17)
+# define RS600_MC_IND_SEQ_RBS_2 (1 << 18)
+# define RS600_MC_IND_SEQ_RBS_3 (1 << 19)
+# define RS600_MC_IND_AIC_RBS (1 << 20)
+# define RS600_MC_IND_CITF_ARB0 (1 << 21)
+# define RS600_MC_IND_CITF_ARB1 (1 << 22)
+# define RS600_MC_IND_WR_EN (1 << 23)
+#define RS600_MC_DATA 0x74
+
+#define RS600_MC_STATUS 0x0
+# define RS600_MC_IDLE (1 << 1)
+#define RS600_MC_FB_LOCATION 0x4
+#define RS600_MC_AGP_LOCATION 0x5
+#define RS600_AGP_BASE 0x6
+#define RS600_AGP_BASE_2 0x7
+#define RS600_MC_CNTL1 0x9
+# define RS600_ENABLE_PAGE_TABLES (1 << 26)
+#define RS600_MC_PT0_CNTL 0x100
+# define RS600_ENABLE_PT (1 << 0)
+# define RS600_EFFECTIVE_L2_CACHE_SIZE(x) ((x) << 15)
+# define RS600_EFFECTIVE_L2_QUEUE_SIZE(x) ((x) << 21)
+# define RS600_INVALIDATE_ALL_L1_TLBS (1 << 28)
+# define RS600_INVALIDATE_L2_CACHE (1 << 29)
+#define RS600_MC_PT0_CONTEXT0_CNTL 0x102
+# define RS600_ENABLE_PAGE_TABLE (1 << 0)
+# define RS600_PAGE_TABLE_TYPE_FLAT (0 << 1)
+#define RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x112
+#define RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x114
+#define RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x11c
+#define RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x12c
+#define RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x13c
+#define RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x14c
+#define RS600_MC_PT0_CLIENT0_CNTL 0x16c
+# define RS600_ENABLE_TRANSLATION_MODE_OVERRIDE (1 << 0)
+# define RS600_TRANSLATION_MODE_OVERRIDE (1 << 1)
+# define RS600_SYSTEM_ACCESS_MODE_MASK (3 << 8)
+# define RS600_SYSTEM_ACCESS_MODE_PA_ONLY (0 << 8)
+# define RS600_SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 8)
+# define RS600_SYSTEM_ACCESS_MODE_IN_SYS (2 << 8)
+# define RS600_SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 8)
+# define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH (0 << 10)
+# define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 10)
+# define RS600_EFFECTIVE_L1_CACHE_SIZE(x) ((x) << 11)
+# define RS600_ENABLE_FRAGMENT_PROCESSING (1 << 14)
+# define RS600_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15)
+# define RS600_INVALIDATE_L1_TLB (1 << 20)
+
+#define R520_MC_IND_INDEX 0x70
+#define R520_MC_IND_WR_EN (1 << 24)
+#define R520_MC_IND_DATA 0x74
+
+#define RV515_MC_FB_LOCATION 0x01
+#define RV515_MC_AGP_LOCATION 0x02
+#define RV515_MC_AGP_BASE 0x03
+#define RV515_MC_AGP_BASE_2 0x04
+
+#define R520_MC_FB_LOCATION 0x04
+#define R520_MC_AGP_LOCATION 0x05
+#define R520_MC_AGP_BASE 0x06
+#define R520_MC_AGP_BASE_2 0x07
+
+#define RADEON_MPP_TB_CONFIG 0x01c0
+#define RADEON_MEM_CNTL 0x0140
+#define RADEON_MEM_SDRAM_MODE_REG 0x0158
+#define RADEON_AGP_BASE_2 0x015c /* r200+ only */
+#define RS480_AGP_BASE_2 0x0164
+#define RADEON_AGP_BASE 0x0170
+
+/* pipe config regs */
+#define R400_GB_PIPE_SELECT 0x402c
+#define RV530_GB_PIPE_SELECT2 0x4124
+#define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */
+#define R300_GB_TILE_CONFIG 0x4018
+# define R300_ENABLE_TILING (1 << 0)
+# define R300_PIPE_COUNT_RV350 (0 << 1)
+# define R300_PIPE_COUNT_R300 (3 << 1)
+# define R300_PIPE_COUNT_R420_3P (6 << 1)
+# define R300_PIPE_COUNT_R420 (7 << 1)
+# define R300_TILE_SIZE_8 (0 << 4)
+# define R300_TILE_SIZE_16 (1 << 4)
+# define R300_TILE_SIZE_32 (2 << 4)
+# define R300_SUBPIXEL_1_12 (0 << 16)
+# define R300_SUBPIXEL_1_16 (1 << 16)
+#define R300_DST_PIPE_CONFIG 0x170c
+# define R300_PIPE_AUTO_CONFIG (1 << 31)
+#define R300_RB2D_DSTCACHE_MODE 0x3428
+# define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
+# define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
+
+#define RADEON_RB3D_COLOROFFSET 0x1c40
+#define RADEON_RB3D_COLORPITCH 0x1c48
+
+#define RADEON_SRC_X_Y 0x1590
+
+#define RADEON_DP_GUI_MASTER_CNTL 0x146c
+# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
+# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
+# define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4)
+# define RADEON_GMC_BRUSH_NONE (15 << 4)
+# define RADEON_GMC_DST_16BPP (4 << 8)
+# define RADEON_GMC_DST_24BPP (5 << 8)
+# define RADEON_GMC_DST_32BPP (6 << 8)
+# define RADEON_GMC_DST_DATATYPE_SHIFT 8
+# define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12)
+# define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24)
+# define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24)
+# define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28)
+# define RADEON_GMC_WR_MSK_DIS (1 << 30)
+# define RADEON_ROP3_S 0x00cc0000
+# define RADEON_ROP3_P 0x00f00000
+#define RADEON_DP_WRITE_MASK 0x16cc
+#define RADEON_SRC_PITCH_OFFSET 0x1428
+#define RADEON_DST_PITCH_OFFSET 0x142c
+#define RADEON_DST_PITCH_OFFSET_C 0x1c80
+# define RADEON_DST_TILE_LINEAR (0 << 30)
+# define RADEON_DST_TILE_MACRO (1 << 30)
+# define RADEON_DST_TILE_MICRO (2 << 30)
+# define RADEON_DST_TILE_BOTH (3 << 30)
+
+#define RADEON_SCRATCH_REG0 0x15e0
+#define RADEON_SCRATCH_REG1 0x15e4
+#define RADEON_SCRATCH_REG2 0x15e8
+#define RADEON_SCRATCH_REG3 0x15ec
+#define RADEON_SCRATCH_REG4 0x15f0
+#define RADEON_SCRATCH_REG5 0x15f4
+#define RADEON_SCRATCH_UMSK 0x0770
+#define RADEON_SCRATCH_ADDR 0x0774
+
+#define RADEON_SCRATCHOFF( x ) (RADEON_SCRATCH_REG_OFFSET + 4*(x))
+
+extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
+
+#define GET_SCRATCH(dev_priv, x) radeon_get_scratch(dev_priv, x)
+
+#define R600_SCRATCH_REG0 0x8500
+#define R600_SCRATCH_REG1 0x8504
+#define R600_SCRATCH_REG2 0x8508
+#define R600_SCRATCH_REG3 0x850c
+#define R600_SCRATCH_REG4 0x8510
+#define R600_SCRATCH_REG5 0x8514
+#define R600_SCRATCH_REG6 0x8518
+#define R600_SCRATCH_REG7 0x851c
+#define R600_SCRATCH_UMSK 0x8540
+#define R600_SCRATCH_ADDR 0x8544
+
+#define R600_SCRATCHOFF(x) (R600_SCRATCH_REG_OFFSET + 4*(x))
+
+#define RADEON_GEN_INT_CNTL 0x0040
+# define RADEON_CRTC_VBLANK_MASK (1 << 0)
+# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
+# define RADEON_GUI_IDLE_INT_ENABLE (1 << 19)
+# define RADEON_SW_INT_ENABLE (1 << 25)
+
+#define RADEON_GEN_INT_STATUS 0x0044
+# define RADEON_CRTC_VBLANK_STAT (1 << 0)
+# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
+# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
+# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
+# define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19)
+# define RADEON_SW_INT_TEST (1 << 25)
+# define RADEON_SW_INT_TEST_ACK (1 << 25)
+# define RADEON_SW_INT_FIRE (1 << 26)
+# define R500_DISPLAY_INT_STATUS (1 << 0)
+
+#define RADEON_HOST_PATH_CNTL 0x0130
+# define RADEON_HDP_SOFT_RESET (1 << 26)
+# define RADEON_HDP_WC_TIMEOUT_MASK (7 << 28)
+# define RADEON_HDP_WC_TIMEOUT_28BCLK (7 << 28)
+
+#define RADEON_ISYNC_CNTL 0x1724
+# define RADEON_ISYNC_ANY2D_IDLE3D (1 << 0)
+# define RADEON_ISYNC_ANY3D_IDLE2D (1 << 1)
+# define RADEON_ISYNC_TRIG2D_IDLE3D (1 << 2)
+# define RADEON_ISYNC_TRIG3D_IDLE2D (1 << 3)
+# define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4)
+# define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5)
+
+#define RADEON_RBBM_GUICNTL 0x172c
+# define RADEON_HOST_DATA_SWAP_NONE (0 << 0)
+# define RADEON_HOST_DATA_SWAP_16BIT (1 << 0)
+# define RADEON_HOST_DATA_SWAP_32BIT (2 << 0)
+# define RADEON_HOST_DATA_SWAP_HDW (3 << 0)
+
+#define RADEON_MC_AGP_LOCATION 0x014c
+#define RADEON_MC_FB_LOCATION 0x0148
+#define RADEON_MCLK_CNTL 0x0012
+# define RADEON_FORCEON_MCLKA (1 << 16)
+# define RADEON_FORCEON_MCLKB (1 << 17)
+# define RADEON_FORCEON_YCLKA (1 << 18)
+# define RADEON_FORCEON_YCLKB (1 << 19)
+# define RADEON_FORCEON_MC (1 << 20)
+# define RADEON_FORCEON_AIC (1 << 21)
+
+#define RADEON_PP_BORDER_COLOR_0 0x1d40
+#define RADEON_PP_BORDER_COLOR_1 0x1d44
+#define RADEON_PP_BORDER_COLOR_2 0x1d48
+#define RADEON_PP_CNTL 0x1c38
+# define RADEON_SCISSOR_ENABLE (1 << 1)
+#define RADEON_PP_LUM_MATRIX 0x1d00
+#define RADEON_PP_MISC 0x1c14
+#define RADEON_PP_ROT_MATRIX_0 0x1d58
+#define RADEON_PP_TXFILTER_0 0x1c54
+#define RADEON_PP_TXOFFSET_0 0x1c5c
+#define RADEON_PP_TXFILTER_1 0x1c6c
+#define RADEON_PP_TXFILTER_2 0x1c84
+
+#define R300_RB2D_DSTCACHE_CTLSTAT 0x342c /* use R300_DSTCACHE_CTLSTAT */
+#define R300_DSTCACHE_CTLSTAT 0x1714
+# define R300_RB2D_DC_FLUSH (3 << 0)
+# define R300_RB2D_DC_FREE (3 << 2)
+# define R300_RB2D_DC_FLUSH_ALL 0xf
+# define R300_RB2D_DC_BUSY (1 << 31)
+#define RADEON_RB3D_CNTL 0x1c3c
+# define RADEON_ALPHA_BLEND_ENABLE (1 << 0)
+# define RADEON_PLANE_MASK_ENABLE (1 << 1)
+# define RADEON_DITHER_ENABLE (1 << 2)
+# define RADEON_ROUND_ENABLE (1 << 3)
+# define RADEON_SCALE_DITHER_ENABLE (1 << 4)
+# define RADEON_DITHER_INIT (1 << 5)
+# define RADEON_ROP_ENABLE (1 << 6)
+# define RADEON_STENCIL_ENABLE (1 << 7)
+# define RADEON_Z_ENABLE (1 << 8)
+# define RADEON_ZBLOCK16 (1 << 15)
+#define RADEON_RB3D_DEPTHOFFSET 0x1c24
+#define RADEON_RB3D_DEPTHCLEARVALUE 0x3230
+#define RADEON_RB3D_DEPTHPITCH 0x1c28
+#define RADEON_RB3D_PLANEMASK 0x1d84
+#define RADEON_RB3D_STENCILREFMASK 0x1d7c
+#define RADEON_RB3D_ZCACHE_MODE 0x3250
+#define RADEON_RB3D_ZCACHE_CTLSTAT 0x3254
+# define RADEON_RB3D_ZC_FLUSH (1 << 0)
+# define RADEON_RB3D_ZC_FREE (1 << 2)
+# define RADEON_RB3D_ZC_FLUSH_ALL 0x5
+# define RADEON_RB3D_ZC_BUSY (1 << 31)
+#define R300_ZB_ZCACHE_CTLSTAT 0x4f18
+# define R300_ZC_FLUSH (1 << 0)
+# define R300_ZC_FREE (1 << 1)
+# define R300_ZC_BUSY (1 << 31)
+#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c
+# define RADEON_RB3D_DC_FLUSH (3 << 0)
+# define RADEON_RB3D_DC_FREE (3 << 2)
+# define RADEON_RB3D_DC_FLUSH_ALL 0xf
+# define RADEON_RB3D_DC_BUSY (1 << 31)
+#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
+# define R300_RB3D_DC_FLUSH (2 << 0)
+# define R300_RB3D_DC_FREE (2 << 2)
+# define R300_RB3D_DC_FINISH (1 << 4)
+#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
+# define RADEON_Z_TEST_MASK (7 << 4)
+# define RADEON_Z_TEST_ALWAYS (7 << 4)
+# define RADEON_Z_HIERARCHY_ENABLE (1 << 8)
+# define RADEON_STENCIL_TEST_ALWAYS (7 << 12)
+# define RADEON_STENCIL_S_FAIL_REPLACE (2 << 16)
+# define RADEON_STENCIL_ZPASS_REPLACE (2 << 20)
+# define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24)
+# define RADEON_Z_COMPRESSION_ENABLE (1 << 28)
+# define RADEON_FORCE_Z_DIRTY (1 << 29)
+# define RADEON_Z_WRITE_ENABLE (1 << 30)
+# define RADEON_Z_DECOMPRESSION_ENABLE (1 << 31)
+#define RADEON_RBBM_SOFT_RESET 0x00f0
+# define RADEON_SOFT_RESET_CP (1 << 0)
+# define RADEON_SOFT_RESET_HI (1 << 1)
+# define RADEON_SOFT_RESET_SE (1 << 2)
+# define RADEON_SOFT_RESET_RE (1 << 3)
+# define RADEON_SOFT_RESET_PP (1 << 4)
+# define RADEON_SOFT_RESET_E2 (1 << 5)
+# define RADEON_SOFT_RESET_RB (1 << 6)
+# define RADEON_SOFT_RESET_HDP (1 << 7)
+/*
+ * 6:0 Available slots in the FIFO
+ * 8 Host Interface active
+ * 9 CP request active
+ * 10 FIFO request active
+ * 11 Host Interface retry active
+ * 12 CP retry active
+ * 13 FIFO retry active
+ * 14 FIFO pipeline busy
+ * 15 Event engine busy
+ * 16 CP command stream busy
+ * 17 2D engine busy
+ * 18 2D portion of render backend busy
+ * 20 3D setup engine busy
+ * 26 GA engine busy
+ * 27 CBA 2D engine busy
+ * 31 2D engine busy or 3D engine busy or FIFO not empty or CP busy or
+ * command stream queue not empty or Ring Buffer not empty
+ */
+#define RADEON_RBBM_STATUS 0x0e40
+/* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register. */
+/* #define RADEON_RBBM_STATUS 0x1740 */
+/* bits 6:0 are dword slots available in the cmd fifo */
+# define RADEON_RBBM_FIFOCNT_MASK 0x007f
+# define RADEON_HIRQ_ON_RBB (1 << 8)
+# define RADEON_CPRQ_ON_RBB (1 << 9)
+# define RADEON_CFRQ_ON_RBB (1 << 10)
+# define RADEON_HIRQ_IN_RTBUF (1 << 11)
+# define RADEON_CPRQ_IN_RTBUF (1 << 12)
+# define RADEON_CFRQ_IN_RTBUF (1 << 13)
+# define RADEON_PIPE_BUSY (1 << 14)
+# define RADEON_ENG_EV_BUSY (1 << 15)
+# define RADEON_CP_CMDSTRM_BUSY (1 << 16)
+# define RADEON_E2_BUSY (1 << 17)
+# define RADEON_RB2D_BUSY (1 << 18)
+# define RADEON_RB3D_BUSY (1 << 19) /* not used on r300 */
+# define RADEON_VAP_BUSY (1 << 20)
+# define RADEON_RE_BUSY (1 << 21) /* not used on r300 */
+# define RADEON_TAM_BUSY (1 << 22) /* not used on r300 */
+# define RADEON_TDM_BUSY (1 << 23) /* not used on r300 */
+# define RADEON_PB_BUSY (1 << 24) /* not used on r300 */
+# define RADEON_TIM_BUSY (1 << 25) /* not used on r300 */
+# define RADEON_GA_BUSY (1 << 26)
+# define RADEON_CBA2D_BUSY (1 << 27)
+# define RADEON_RBBM_ACTIVE (1 << 31)
+#define RADEON_RE_LINE_PATTERN 0x1cd0
+#define RADEON_RE_MISC 0x26c4
+#define RADEON_RE_TOP_LEFT 0x26c0
+#define RADEON_RE_WIDTH_HEIGHT 0x1c44
+#define RADEON_RE_STIPPLE_ADDR 0x1cc8
+#define RADEON_RE_STIPPLE_DATA 0x1ccc
+
+#define RADEON_SCISSOR_TL_0 0x1cd8
+#define RADEON_SCISSOR_BR_0 0x1cdc
+#define RADEON_SCISSOR_TL_1 0x1ce0
+#define RADEON_SCISSOR_BR_1 0x1ce4
+#define RADEON_SCISSOR_TL_2 0x1ce8
+#define RADEON_SCISSOR_BR_2 0x1cec
+#define RADEON_SE_COORD_FMT 0x1c50
+#define RADEON_SE_CNTL 0x1c4c
+# define RADEON_FFACE_CULL_CW (0 << 0)
+# define RADEON_BFACE_SOLID (3 << 1)
+# define RADEON_FFACE_SOLID (3 << 3)
+# define RADEON_FLAT_SHADE_VTX_LAST (3 << 6)
+# define RADEON_DIFFUSE_SHADE_FLAT (1 << 8)
+# define RADEON_DIFFUSE_SHADE_GOURAUD (2 << 8)
+# define RADEON_ALPHA_SHADE_FLAT (1 << 10)
+# define RADEON_ALPHA_SHADE_GOURAUD (2 << 10)
+# define RADEON_SPECULAR_SHADE_FLAT (1 << 12)
+# define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12)
+# define RADEON_FOG_SHADE_FLAT (1 << 14)
+# define RADEON_FOG_SHADE_GOURAUD (2 << 14)
+# define RADEON_VPORT_XY_XFORM_ENABLE (1 << 24)
+# define RADEON_VPORT_Z_XFORM_ENABLE (1 << 25)
+# define RADEON_VTX_PIX_CENTER_OGL (1 << 27)
+# define RADEON_ROUND_MODE_TRUNC (0 << 28)
+# define RADEON_ROUND_PREC_8TH_PIX (1 << 30)
+#define RADEON_SE_CNTL_STATUS 0x2140
+#define RADEON_SE_LINE_WIDTH 0x1db8
+#define RADEON_SE_VPORT_XSCALE 0x1d98
+#define RADEON_SE_ZBIAS_FACTOR 0x1db0
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210
+#define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254
+#define RADEON_SE_TCL_VECTOR_INDX_REG 0x2200
+# define RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT 16
+# define RADEON_VEC_INDX_DWORD_COUNT_SHIFT 28
+#define RADEON_SE_TCL_VECTOR_DATA_REG 0x2204
+#define RADEON_SE_TCL_SCALAR_INDX_REG 0x2208
+# define RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT 16
+#define RADEON_SE_TCL_SCALAR_DATA_REG 0x220C
+#define RADEON_SURFACE_ACCESS_FLAGS 0x0bf8
+#define RADEON_SURFACE_ACCESS_CLR 0x0bfc
+#define RADEON_SURFACE_CNTL 0x0b00
+# define RADEON_SURF_TRANSLATION_DIS (1 << 8)
+# define RADEON_NONSURF_AP0_SWP_MASK (3 << 20)
+# define RADEON_NONSURF_AP0_SWP_LITTLE (0 << 20)
+# define RADEON_NONSURF_AP0_SWP_BIG16 (1 << 20)
+# define RADEON_NONSURF_AP0_SWP_BIG32 (2 << 20)
+# define RADEON_NONSURF_AP1_SWP_MASK (3 << 22)
+# define RADEON_NONSURF_AP1_SWP_LITTLE (0 << 22)
+# define RADEON_NONSURF_AP1_SWP_BIG16 (1 << 22)
+# define RADEON_NONSURF_AP1_SWP_BIG32 (2 << 22)
+#define RADEON_SURFACE0_INFO 0x0b0c
+# define RADEON_SURF_PITCHSEL_MASK (0x1ff << 0)
+# define RADEON_SURF_TILE_MODE_MASK (3 << 16)
+# define RADEON_SURF_TILE_MODE_MACRO (0 << 16)
+# define RADEON_SURF_TILE_MODE_MICRO (1 << 16)
+# define RADEON_SURF_TILE_MODE_32BIT_Z (2 << 16)
+# define RADEON_SURF_TILE_MODE_16BIT_Z (3 << 16)
+#define RADEON_SURFACE0_LOWER_BOUND 0x0b04
+#define RADEON_SURFACE0_UPPER_BOUND 0x0b08
+# define RADEON_SURF_ADDRESS_FIXED_MASK (0x3ff << 0)
+#define RADEON_SURFACE1_INFO 0x0b1c
+#define RADEON_SURFACE1_LOWER_BOUND 0x0b14
+#define RADEON_SURFACE1_UPPER_BOUND 0x0b18
+#define RADEON_SURFACE2_INFO 0x0b2c
+#define RADEON_SURFACE2_LOWER_BOUND 0x0b24
+#define RADEON_SURFACE2_UPPER_BOUND 0x0b28
+#define RADEON_SURFACE3_INFO 0x0b3c
+#define RADEON_SURFACE3_LOWER_BOUND 0x0b34
+#define RADEON_SURFACE3_UPPER_BOUND 0x0b38
+#define RADEON_SURFACE4_INFO 0x0b4c
+#define RADEON_SURFACE4_LOWER_BOUND 0x0b44
+#define RADEON_SURFACE4_UPPER_BOUND 0x0b48
+#define RADEON_SURFACE5_INFO 0x0b5c
+#define RADEON_SURFACE5_LOWER_BOUND 0x0b54
+#define RADEON_SURFACE5_UPPER_BOUND 0x0b58
+#define RADEON_SURFACE6_INFO 0x0b6c
+#define RADEON_SURFACE6_LOWER_BOUND 0x0b64
+#define RADEON_SURFACE6_UPPER_BOUND 0x0b68
+#define RADEON_SURFACE7_INFO 0x0b7c
+#define RADEON_SURFACE7_LOWER_BOUND 0x0b74
+#define RADEON_SURFACE7_UPPER_BOUND 0x0b78
+#define RADEON_SW_SEMAPHORE 0x013c
+
+#define RADEON_WAIT_UNTIL 0x1720
+# define RADEON_WAIT_CRTC_PFLIP (1 << 0)
+# define RADEON_WAIT_2D_IDLE (1 << 14)
+# define RADEON_WAIT_3D_IDLE (1 << 15)
+# define RADEON_WAIT_2D_IDLECLEAN (1 << 16)
+# define RADEON_WAIT_3D_IDLECLEAN (1 << 17)
+# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18)
+
+#define RADEON_RB3D_ZMASKOFFSET 0x3234
+#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
+# define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
+# define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
+
+/* CP registers */
+#define RADEON_CP_ME_RAM_ADDR 0x07d4
+#define RADEON_CP_ME_RAM_RADDR 0x07d8
+#define RADEON_CP_ME_RAM_DATAH 0x07dc
+#define RADEON_CP_ME_RAM_DATAL 0x07e0
+
+#define RADEON_CP_RB_BASE 0x0700
+#define RADEON_CP_RB_CNTL 0x0704
+# define RADEON_BUF_SWAP_32BIT (2 << 16)
+# define RADEON_RB_NO_UPDATE (1 << 27)
+# define RADEON_RB_RPTR_WR_ENA (1 << 31)
+#define RADEON_CP_RB_RPTR_ADDR 0x070c
+#define RADEON_CP_RB_RPTR 0x0710
+#define RADEON_CP_RB_WPTR 0x0714
+
+#define RADEON_CP_RB_WPTR_DELAY 0x0718
+# define RADEON_PRE_WRITE_TIMER_SHIFT 0
+# define RADEON_PRE_WRITE_LIMIT_SHIFT 23
+
+#define RADEON_CP_IB_BASE 0x0738
+
+#define RADEON_CP_CSQ_CNTL 0x0740
+# define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0)
+# define RADEON_CSQ_PRIDIS_INDDIS (0 << 28)
+# define RADEON_CSQ_PRIPIO_INDDIS (1 << 28)
+# define RADEON_CSQ_PRIBM_INDDIS (2 << 28)
+# define RADEON_CSQ_PRIPIO_INDBM (3 << 28)
+# define RADEON_CSQ_PRIBM_INDBM (4 << 28)
+# define RADEON_CSQ_PRIPIO_INDPIO (15 << 28)
+
+#define R300_CP_RESYNC_ADDR 0x0778
+#define R300_CP_RESYNC_DATA 0x077c
+
+#define RADEON_AIC_CNTL 0x01d0
+# define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
+# define RS400_MSI_REARM (1 << 3)
+#define RADEON_AIC_STAT 0x01d4
+#define RADEON_AIC_PT_BASE 0x01d8
+#define RADEON_AIC_LO_ADDR 0x01dc
+#define RADEON_AIC_HI_ADDR 0x01e0
+#define RADEON_AIC_TLB_ADDR 0x01e4
+#define RADEON_AIC_TLB_DATA 0x01e8
+
+/* CP command packets */
+#define RADEON_CP_PACKET0 0x00000000
+# define RADEON_ONE_REG_WR (1 << 15)
+#define RADEON_CP_PACKET1 0x40000000
+#define RADEON_CP_PACKET2 0x80000000
+#define RADEON_CP_PACKET3 0xC0000000
+# define RADEON_CP_NOP 0x00001000
+# define RADEON_CP_NEXT_CHAR 0x00001900
+# define RADEON_CP_PLY_NEXTSCAN 0x00001D00
+# define RADEON_CP_SET_SCISSORS 0x00001E00
+ /* GEN_INDX_PRIM is unsupported starting with R300 */
+# define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300
+# define RADEON_WAIT_FOR_IDLE 0x00002600
+# define RADEON_3D_DRAW_VBUF 0x00002800
+# define RADEON_3D_DRAW_IMMD 0x00002900
+# define RADEON_3D_DRAW_INDX 0x00002A00
+# define RADEON_CP_LOAD_PALETTE 0x00002C00
+# define RADEON_3D_LOAD_VBPNTR 0x00002F00
+# define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000
+# define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100
+# define RADEON_3D_CLEAR_ZMASK 0x00003200
+# define RADEON_CP_INDX_BUFFER 0x00003300
+# define RADEON_CP_3D_DRAW_VBUF_2 0x00003400
+# define RADEON_CP_3D_DRAW_IMMD_2 0x00003500
+# define RADEON_CP_3D_DRAW_INDX_2 0x00003600
+# define RADEON_3D_CLEAR_HIZ 0x00003700
+# define RADEON_CP_3D_CLEAR_CMASK 0x00003802
+# define RADEON_CNTL_HOSTDATA_BLT 0x00009400
+# define RADEON_CNTL_PAINT_MULTI 0x00009A00
+# define RADEON_CNTL_BITBLT_MULTI 0x00009B00
+# define RADEON_CNTL_SET_SCISSORS 0xC0001E00
+
+# define R600_IT_INDIRECT_BUFFER_END 0x00001700
+# define R600_IT_SET_PREDICATION 0x00002000
+# define R600_IT_REG_RMW 0x00002100
+# define R600_IT_COND_EXEC 0x00002200
+# define R600_IT_PRED_EXEC 0x00002300
+# define R600_IT_START_3D_CMDBUF 0x00002400
+# define R600_IT_DRAW_INDEX_2 0x00002700
+# define R600_IT_CONTEXT_CONTROL 0x00002800
+# define R600_IT_DRAW_INDEX_IMMD_BE 0x00002900
+# define R600_IT_INDEX_TYPE 0x00002A00
+# define R600_IT_DRAW_INDEX 0x00002B00
+# define R600_IT_DRAW_INDEX_AUTO 0x00002D00
+# define R600_IT_DRAW_INDEX_IMMD 0x00002E00
+# define R600_IT_NUM_INSTANCES 0x00002F00
+# define R600_IT_STRMOUT_BUFFER_UPDATE 0x00003400
+# define R600_IT_INDIRECT_BUFFER_MP 0x00003800
+# define R600_IT_MEM_SEMAPHORE 0x00003900
+# define R600_IT_MPEG_INDEX 0x00003A00
+# define R600_IT_WAIT_REG_MEM 0x00003C00
+# define R600_IT_MEM_WRITE 0x00003D00
+# define R600_IT_INDIRECT_BUFFER 0x00003200
+# define R600_IT_SURFACE_SYNC 0x00004300
+# define R600_CB0_DEST_BASE_ENA (1 << 6)
+# define R600_TC_ACTION_ENA (1 << 23)
+# define R600_VC_ACTION_ENA (1 << 24)
+# define R600_CB_ACTION_ENA (1 << 25)
+# define R600_DB_ACTION_ENA (1 << 26)
+# define R600_SH_ACTION_ENA (1 << 27)
+# define R600_SMX_ACTION_ENA (1 << 28)
+# define R600_IT_ME_INITIALIZE 0x00004400
+# define R600_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+# define R600_IT_COND_WRITE 0x00004500
+# define R600_IT_EVENT_WRITE 0x00004600
+# define R600_IT_EVENT_WRITE_EOP 0x00004700
+# define R600_IT_ONE_REG_WRITE 0x00005700
+# define R600_IT_SET_CONFIG_REG 0x00006800
+# define R600_SET_CONFIG_REG_OFFSET 0x00008000
+# define R600_SET_CONFIG_REG_END 0x0000ac00
+# define R600_IT_SET_CONTEXT_REG 0x00006900
+# define R600_SET_CONTEXT_REG_OFFSET 0x00028000
+# define R600_SET_CONTEXT_REG_END 0x00029000
+# define R600_IT_SET_ALU_CONST 0x00006A00
+# define R600_SET_ALU_CONST_OFFSET 0x00030000
+# define R600_SET_ALU_CONST_END 0x00032000
+# define R600_IT_SET_BOOL_CONST 0x00006B00
+# define R600_SET_BOOL_CONST_OFFSET 0x0003e380
+# define R600_SET_BOOL_CONST_END 0x00040000
+# define R600_IT_SET_LOOP_CONST 0x00006C00
+# define R600_SET_LOOP_CONST_OFFSET 0x0003e200
+# define R600_SET_LOOP_CONST_END 0x0003e380
+# define R600_IT_SET_RESOURCE 0x00006D00
+# define R600_SET_RESOURCE_OFFSET 0x00038000
+# define R600_SET_RESOURCE_END 0x0003c000
+# define R600_SQ_TEX_VTX_INVALID_TEXTURE 0x0
+# define R600_SQ_TEX_VTX_INVALID_BUFFER 0x1
+# define R600_SQ_TEX_VTX_VALID_TEXTURE 0x2
+# define R600_SQ_TEX_VTX_VALID_BUFFER 0x3
+# define R600_IT_SET_SAMPLER 0x00006E00
+# define R600_SET_SAMPLER_OFFSET 0x0003c000
+# define R600_SET_SAMPLER_END 0x0003cff0
+# define R600_IT_SET_CTL_CONST 0x00006F00
+# define R600_SET_CTL_CONST_OFFSET 0x0003cff0
+# define R600_SET_CTL_CONST_END 0x0003e200
+# define R600_IT_SURFACE_BASE_UPDATE 0x00007300
+
+#define RADEON_CP_PACKET_MASK 0xC0000000
+#define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000
+#define RADEON_CP_PACKET0_REG_MASK 0x000007ff
+#define RADEON_CP_PACKET1_REG0_MASK 0x000007ff
+#define RADEON_CP_PACKET1_REG1_MASK 0x003ff800
+
+#define RADEON_VTX_Z_PRESENT (1 << 31)
+#define RADEON_VTX_PKCOLOR_PRESENT (1 << 3)
+
+#define RADEON_PRIM_TYPE_NONE (0 << 0)
+#define RADEON_PRIM_TYPE_POINT (1 << 0)
+#define RADEON_PRIM_TYPE_LINE (2 << 0)
+#define RADEON_PRIM_TYPE_LINE_STRIP (3 << 0)
+#define RADEON_PRIM_TYPE_TRI_LIST (4 << 0)
+#define RADEON_PRIM_TYPE_TRI_FAN (5 << 0)
+#define RADEON_PRIM_TYPE_TRI_STRIP (6 << 0)
+#define RADEON_PRIM_TYPE_TRI_TYPE2 (7 << 0)
+#define RADEON_PRIM_TYPE_RECT_LIST (8 << 0)
+#define RADEON_PRIM_TYPE_3VRT_POINT_LIST (9 << 0)
+#define RADEON_PRIM_TYPE_3VRT_LINE_LIST (10 << 0)
+#define RADEON_PRIM_TYPE_MASK 0xf
+#define RADEON_PRIM_WALK_IND (1 << 4)
+#define RADEON_PRIM_WALK_LIST (2 << 4)
+#define RADEON_PRIM_WALK_RING (3 << 4)
+#define RADEON_COLOR_ORDER_BGRA (0 << 6)
+#define RADEON_COLOR_ORDER_RGBA (1 << 6)
+#define RADEON_MAOS_ENABLE (1 << 7)
+#define RADEON_VTX_FMT_R128_MODE (0 << 8)
+#define RADEON_VTX_FMT_RADEON_MODE (1 << 8)
+#define RADEON_NUM_VERTICES_SHIFT 16
+
+#define RADEON_COLOR_FORMAT_CI8 2
+#define RADEON_COLOR_FORMAT_ARGB1555 3
+#define RADEON_COLOR_FORMAT_RGB565 4
+#define RADEON_COLOR_FORMAT_ARGB8888 6
+#define RADEON_COLOR_FORMAT_RGB332 7
+#define RADEON_COLOR_FORMAT_RGB8 9
+#define RADEON_COLOR_FORMAT_ARGB4444 15
+
+#define RADEON_TXFORMAT_I8 0
+#define RADEON_TXFORMAT_AI88 1
+#define RADEON_TXFORMAT_RGB332 2
+#define RADEON_TXFORMAT_ARGB1555 3
+#define RADEON_TXFORMAT_RGB565 4
+#define RADEON_TXFORMAT_ARGB4444 5
+#define RADEON_TXFORMAT_ARGB8888 6
+#define RADEON_TXFORMAT_RGBA8888 7
+#define RADEON_TXFORMAT_Y8 8
+#define RADEON_TXFORMAT_VYUY422 10
+#define RADEON_TXFORMAT_YVYU422 11
+#define RADEON_TXFORMAT_DXT1 12
+#define RADEON_TXFORMAT_DXT23 14
+#define RADEON_TXFORMAT_DXT45 15
+
+#define R200_PP_TXCBLEND_0 0x2f00
+#define R200_PP_TXCBLEND_1 0x2f10
+#define R200_PP_TXCBLEND_2 0x2f20
+#define R200_PP_TXCBLEND_3 0x2f30
+#define R200_PP_TXCBLEND_4 0x2f40
+#define R200_PP_TXCBLEND_5 0x2f50
+#define R200_PP_TXCBLEND_6 0x2f60
+#define R200_PP_TXCBLEND_7 0x2f70
+#define R200_SE_TCL_LIGHT_MODEL_CTL_0 0x2268
+#define R200_PP_TFACTOR_0 0x2ee0
+#define R200_SE_VTX_FMT_0 0x2088
+#define R200_SE_VAP_CNTL 0x2080
+#define R200_SE_TCL_MATRIX_SEL_0 0x2230
+#define R200_SE_TCL_TEX_PROC_CTL_2 0x22a8
+#define R200_SE_TCL_UCP_VERT_BLEND_CTL 0x22c0
+#define R200_PP_TXFILTER_5 0x2ca0
+#define R200_PP_TXFILTER_4 0x2c80
+#define R200_PP_TXFILTER_3 0x2c60
+#define R200_PP_TXFILTER_2 0x2c40
+#define R200_PP_TXFILTER_1 0x2c20
+#define R200_PP_TXFILTER_0 0x2c00
+#define R200_PP_TXOFFSET_5 0x2d78
+#define R200_PP_TXOFFSET_4 0x2d60
+#define R200_PP_TXOFFSET_3 0x2d48
+#define R200_PP_TXOFFSET_2 0x2d30
+#define R200_PP_TXOFFSET_1 0x2d18
+#define R200_PP_TXOFFSET_0 0x2d00
+
+#define R200_PP_CUBIC_FACES_0 0x2c18
+#define R200_PP_CUBIC_FACES_1 0x2c38
+#define R200_PP_CUBIC_FACES_2 0x2c58
+#define R200_PP_CUBIC_FACES_3 0x2c78
+#define R200_PP_CUBIC_FACES_4 0x2c98
+#define R200_PP_CUBIC_FACES_5 0x2cb8
+#define R200_PP_CUBIC_OFFSET_F1_0 0x2d04
+#define R200_PP_CUBIC_OFFSET_F2_0 0x2d08
+#define R200_PP_CUBIC_OFFSET_F3_0 0x2d0c
+#define R200_PP_CUBIC_OFFSET_F4_0 0x2d10
+#define R200_PP_CUBIC_OFFSET_F5_0 0x2d14
+#define R200_PP_CUBIC_OFFSET_F1_1 0x2d1c
+#define R200_PP_CUBIC_OFFSET_F2_1 0x2d20
+#define R200_PP_CUBIC_OFFSET_F3_1 0x2d24
+#define R200_PP_CUBIC_OFFSET_F4_1 0x2d28
+#define R200_PP_CUBIC_OFFSET_F5_1 0x2d2c
+#define R200_PP_CUBIC_OFFSET_F1_2 0x2d34
+#define R200_PP_CUBIC_OFFSET_F2_2 0x2d38
+#define R200_PP_CUBIC_OFFSET_F3_2 0x2d3c
+#define R200_PP_CUBIC_OFFSET_F4_2 0x2d40
+#define R200_PP_CUBIC_OFFSET_F5_2 0x2d44
+#define R200_PP_CUBIC_OFFSET_F1_3 0x2d4c
+#define R200_PP_CUBIC_OFFSET_F2_3 0x2d50
+#define R200_PP_CUBIC_OFFSET_F3_3 0x2d54
+#define R200_PP_CUBIC_OFFSET_F4_3 0x2d58
+#define R200_PP_CUBIC_OFFSET_F5_3 0x2d5c
+#define R200_PP_CUBIC_OFFSET_F1_4 0x2d64
+#define R200_PP_CUBIC_OFFSET_F2_4 0x2d68
+#define R200_PP_CUBIC_OFFSET_F3_4 0x2d6c
+#define R200_PP_CUBIC_OFFSET_F4_4 0x2d70
+#define R200_PP_CUBIC_OFFSET_F5_4 0x2d74
+#define R200_PP_CUBIC_OFFSET_F1_5 0x2d7c
+#define R200_PP_CUBIC_OFFSET_F2_5 0x2d80
+#define R200_PP_CUBIC_OFFSET_F3_5 0x2d84
+#define R200_PP_CUBIC_OFFSET_F4_5 0x2d88
+#define R200_PP_CUBIC_OFFSET_F5_5 0x2d8c
+
+#define R200_RE_AUX_SCISSOR_CNTL 0x26f0
+#define R200_SE_VTE_CNTL 0x20b0
+#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL 0x2250
+#define R200_PP_TAM_DEBUG3 0x2d9c
+#define R200_PP_CNTL_X 0x2cc4
+#define R200_SE_VAP_CNTL_STATUS 0x2140
+#define R200_RE_SCISSOR_TL_0 0x1cd8
+#define R200_RE_SCISSOR_TL_1 0x1ce0
+#define R200_RE_SCISSOR_TL_2 0x1ce8
+#define R200_RB3D_DEPTHXY_OFFSET 0x1d60
+#define R200_RE_AUX_SCISSOR_CNTL 0x26f0
+#define R200_SE_VTX_STATE_CNTL 0x2180
+#define R200_RE_POINTSIZE 0x2648
+#define R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0 0x2254
+
+#define RADEON_PP_TEX_SIZE_0 0x1d04 /* NPOT */
+#define RADEON_PP_TEX_SIZE_1 0x1d0c
+#define RADEON_PP_TEX_SIZE_2 0x1d14
+
+#define RADEON_PP_CUBIC_FACES_0 0x1d24
+#define RADEON_PP_CUBIC_FACES_1 0x1d28
+#define RADEON_PP_CUBIC_FACES_2 0x1d2c
+#define RADEON_PP_CUBIC_OFFSET_T0_0 0x1dd0 /* bits [31:5] */
+#define RADEON_PP_CUBIC_OFFSET_T1_0 0x1e00
+#define RADEON_PP_CUBIC_OFFSET_T2_0 0x1e14
+
+#define RADEON_SE_TCL_STATE_FLUSH 0x2284
+
+#define SE_VAP_CNTL__TCL_ENA_MASK 0x00000001
+#define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK 0x00010000
+#define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT 0x00000012
+#define SE_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100
+#define SE_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200
+#define SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK 0x00000001
+#define SE_VTX_FMT_0__VTX_W0_PRESENT_MASK 0x00000002
+#define SE_VTX_FMT_0__VTX_COLOR_0_FMT__SHIFT 0x0000000b
+#define R200_3D_DRAW_IMMD_2 0xC0003500
+#define R200_SE_VTX_FMT_1 0x208c
+#define R200_RE_CNTL 0x1c50
+
+#define R200_RB3D_BLENDCOLOR 0x3218
+
+#define R200_SE_TCL_POINT_SPRITE_CNTL 0x22c4
+
+#define R200_PP_TRI_PERF 0x2cf8
+
+#define R200_PP_AFS_0 0x2f80
+#define R200_PP_AFS_1 0x2f00 /* same as txcblend_0 */
+
+#define R200_VAP_PVS_CNTL_1 0x22D0
+
+#define RADEON_CRTC_CRNT_FRAME 0x0214
+#define RADEON_CRTC2_CRNT_FRAME 0x0314
+
+#define R500_D1CRTC_STATUS 0x609c
+#define R500_D2CRTC_STATUS 0x689c
+#define R500_CRTC_V_BLANK (1<<0)
+
+#define R500_D1CRTC_FRAME_COUNT 0x60a4
+#define R500_D2CRTC_FRAME_COUNT 0x68a4
+
+#define R500_D1MODE_V_COUNTER 0x6530
+#define R500_D2MODE_V_COUNTER 0x6d30
+
+#define R500_D1MODE_VBLANK_STATUS 0x6534
+#define R500_D2MODE_VBLANK_STATUS 0x6d34
+#define R500_VBLANK_OCCURED (1<<0)
+#define R500_VBLANK_ACK (1<<4)
+#define R500_VBLANK_STAT (1<<12)
+#define R500_VBLANK_INT (1<<16)
+
+#define R500_DxMODE_INT_MASK 0x6540
+#define R500_D1MODE_INT_MASK (1<<0)
+#define R500_D2MODE_INT_MASK (1<<8)
+
+#define R500_DISP_INTERRUPT_STATUS 0x7edc
+#define R500_D1_VBLANK_INTERRUPT (1 << 4)
+#define R500_D2_VBLANK_INTERRUPT (1 << 5)
+
+/* R6xx/R7xx registers */
+#define R600_MC_VM_FB_LOCATION 0x2180
+#define R600_MC_VM_AGP_TOP 0x2184
+#define R600_MC_VM_AGP_BOT 0x2188
+#define R600_MC_VM_AGP_BASE 0x218c
+#define R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2190
+#define R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194
+#define R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198
+
+#define R700_MC_VM_FB_LOCATION 0x2024
+#define R700_MC_VM_AGP_TOP 0x2028
+#define R700_MC_VM_AGP_BOT 0x202c
+#define R700_MC_VM_AGP_BASE 0x2030
+#define R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
+#define R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
+#define R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203c
+
+#define R600_MCD_RD_A_CNTL 0x219c
+#define R600_MCD_RD_B_CNTL 0x21a0
+
+#define R600_MCD_WR_A_CNTL 0x21a4
+#define R600_MCD_WR_B_CNTL 0x21a8
+
+#define R600_MCD_RD_SYS_CNTL 0x2200
+#define R600_MCD_WR_SYS_CNTL 0x2214
+
+#define R600_MCD_RD_GFX_CNTL 0x21fc
+#define R600_MCD_RD_HDP_CNTL 0x2204
+#define R600_MCD_RD_PDMA_CNTL 0x2208
+#define R600_MCD_RD_SEM_CNTL 0x220c
+#define R600_MCD_WR_GFX_CNTL 0x2210
+#define R600_MCD_WR_HDP_CNTL 0x2218
+#define R600_MCD_WR_PDMA_CNTL 0x221c
+#define R600_MCD_WR_SEM_CNTL 0x2220
+
+# define R600_MCD_L1_TLB (1 << 0)
+# define R600_MCD_L1_FRAG_PROC (1 << 1)
+# define R600_MCD_L1_STRICT_ORDERING (1 << 2)
+
+# define R600_MCD_SYSTEM_ACCESS_MODE_MASK (3 << 6)
+# define R600_MCD_SYSTEM_ACCESS_MODE_PA_ONLY (0 << 6)
+# define R600_MCD_SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 6)
+# define R600_MCD_SYSTEM_ACCESS_MODE_IN_SYS (2 << 6)
+# define R600_MCD_SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 6)
+
+# define R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 8)
+# define R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 8)
+
+# define R600_MCD_SEMAPHORE_MODE (1 << 10)
+# define R600_MCD_WAIT_L2_QUERY (1 << 11)
+# define R600_MCD_EFFECTIVE_L1_TLB_SIZE(x) ((x) << 12)
+# define R600_MCD_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15)
+
+#define R700_MC_VM_MD_L1_TLB0_CNTL 0x2654
+#define R700_MC_VM_MD_L1_TLB1_CNTL 0x2658
+#define R700_MC_VM_MD_L1_TLB2_CNTL 0x265c
+
+#define R700_MC_VM_MB_L1_TLB0_CNTL 0x2234
+#define R700_MC_VM_MB_L1_TLB1_CNTL 0x2238
+#define R700_MC_VM_MB_L1_TLB2_CNTL 0x223c
+#define R700_MC_VM_MB_L1_TLB3_CNTL 0x2240
+
+# define R700_ENABLE_L1_TLB (1 << 0)
+# define R700_ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
+# define R700_SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
+# define R700_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
+# define R700_EFFECTIVE_L1_TLB_SIZE(x) ((x) << 15)
+# define R700_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 18)
+
+#define R700_MC_ARB_RAMCFG 0x2760
+# define R700_NOOFBANK_SHIFT 0
+# define R700_NOOFBANK_MASK 0x3
+# define R700_NOOFRANK_SHIFT 2
+# define R700_NOOFRANK_MASK 0x1
+# define R700_NOOFROWS_SHIFT 3
+# define R700_NOOFROWS_MASK 0x7
+# define R700_NOOFCOLS_SHIFT 6
+# define R700_NOOFCOLS_MASK 0x3
+# define R700_CHANSIZE_SHIFT 8
+# define R700_CHANSIZE_MASK 0x1
+# define R700_BURSTLENGTH_SHIFT 9
+# define R700_BURSTLENGTH_MASK 0x1
+#define R600_RAMCFG 0x2408
+# define R600_NOOFBANK_SHIFT 0
+# define R600_NOOFBANK_MASK 0x1
+# define R600_NOOFRANK_SHIFT 1
+# define R600_NOOFRANK_MASK 0x1
+# define R600_NOOFROWS_SHIFT 2
+# define R600_NOOFROWS_MASK 0x7
+# define R600_NOOFCOLS_SHIFT 5
+# define R600_NOOFCOLS_MASK 0x3
+# define R600_CHANSIZE_SHIFT 7
+# define R600_CHANSIZE_MASK 0x1
+# define R600_BURSTLENGTH_SHIFT 8
+# define R600_BURSTLENGTH_MASK 0x1
+
+#define R600_VM_L2_CNTL 0x1400
+# define R600_VM_L2_CACHE_EN (1 << 0)
+# define R600_VM_L2_FRAG_PROC (1 << 1)
+# define R600_VM_ENABLE_PTE_CACHE_LRU_W (1 << 9)
+# define R600_VM_L2_CNTL_QUEUE_SIZE(x) ((x) << 13)
+# define R700_VM_L2_CNTL_QUEUE_SIZE(x) ((x) << 14)
+
+#define R600_VM_L2_CNTL2 0x1404
+# define R600_VM_L2_CNTL2_INVALIDATE_ALL_L1_TLBS (1 << 0)
+# define R600_VM_L2_CNTL2_INVALIDATE_L2_CACHE (1 << 1)
+#define R600_VM_L2_CNTL3 0x1408
+# define R600_VM_L2_CNTL3_BANK_SELECT_0(x) ((x) << 0)
+# define R600_VM_L2_CNTL3_BANK_SELECT_1(x) ((x) << 5)
+# define R600_VM_L2_CNTL3_CACHE_UPDATE_MODE(x) ((x) << 10)
+# define R700_VM_L2_CNTL3_BANK_SELECT(x) ((x) << 0)
+# define R700_VM_L2_CNTL3_CACHE_UPDATE_MODE(x) ((x) << 6)
+
+#define R600_VM_L2_STATUS 0x140c
+
+#define R600_VM_CONTEXT0_CNTL 0x1410
+# define R600_VM_ENABLE_CONTEXT (1 << 0)
+# define R600_VM_PAGE_TABLE_DEPTH_FLAT (0 << 1)
+
+#define R600_VM_CONTEXT0_CNTL2 0x1430
+#define R600_VM_CONTEXT0_REQUEST_RESPONSE 0x1470
+#define R600_VM_CONTEXT0_INVALIDATION_LOW_ADDR 0x1490
+#define R600_VM_CONTEXT0_INVALIDATION_HIGH_ADDR 0x14b0
+#define R600_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x1574
+#define R600_VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x1594
+#define R600_VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x15b4
+
+#define R700_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153c
+#define R700_VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155c
+#define R700_VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157c
+
+#define R600_HDP_HOST_PATH_CNTL 0x2c00
+
+#define R600_GRBM_CNTL 0x8000
+# define R600_GRBM_READ_TIMEOUT(x) ((x) << 0)
+
+#define R600_GRBM_STATUS 0x8010
+# define R600_CMDFIFO_AVAIL_MASK 0x1f
+# define R700_CMDFIFO_AVAIL_MASK 0xf
+# define R600_GUI_ACTIVE (1 << 31)
+#define R600_GRBM_STATUS2 0x8014
+#define R600_GRBM_SOFT_RESET 0x8020
+# define R600_SOFT_RESET_CP (1 << 0)
+#define R600_WAIT_UNTIL 0x8040
+
+#define R600_CP_SEM_WAIT_TIMER 0x85bc
+#define R600_CP_ME_CNTL 0x86d8
+# define R600_CP_ME_HALT (1 << 28)
+#define R600_CP_QUEUE_THRESHOLDS 0x8760
+# define R600_ROQ_IB1_START(x) ((x) << 0)
+# define R600_ROQ_IB2_START(x) ((x) << 8)
+#define R600_CP_MEQ_THRESHOLDS 0x8764
+# define R700_STQ_SPLIT(x) ((x) << 0)
+# define R600_MEQ_END(x) ((x) << 16)
+# define R600_ROQ_END(x) ((x) << 24)
+#define R600_CP_PERFMON_CNTL 0x87fc
+#define R600_CP_RB_BASE 0xc100
+#define R600_CP_RB_CNTL 0xc104
+# define R600_RB_BUFSZ(x) ((x) << 0)
+# define R600_RB_BLKSZ(x) ((x) << 8)
+# define R600_BUF_SWAP_32BIT (2 << 16)
+# define R600_RB_NO_UPDATE (1 << 27)
+# define R600_RB_RPTR_WR_ENA (1 << 31)
+#define R600_CP_RB_RPTR_WR 0xc108
+#define R600_CP_RB_RPTR_ADDR 0xc10c
+#define R600_CP_RB_RPTR_ADDR_HI 0xc110
+#define R600_CP_RB_WPTR 0xc114
+#define R600_CP_RB_WPTR_ADDR 0xc118
+#define R600_CP_RB_WPTR_ADDR_HI 0xc11c
+#define R600_CP_RB_RPTR 0x8700
+#define R600_CP_RB_WPTR_DELAY 0x8704
+#define R600_CP_PFP_UCODE_ADDR 0xc150
+#define R600_CP_PFP_UCODE_DATA 0xc154
+#define R600_CP_ME_RAM_RADDR 0xc158
+#define R600_CP_ME_RAM_WADDR 0xc15c
+#define R600_CP_ME_RAM_DATA 0xc160
+#define R600_CP_DEBUG 0xc1fc
+
+#define R600_PA_CL_ENHANCE 0x8a14
+# define R600_CLIP_VTX_REORDER_ENA (1 << 0)
+# define R600_NUM_CLIP_SEQ(x) ((x) << 1)
+#define R600_PA_SC_LINE_STIPPLE_STATE 0x8b10
+#define R600_PA_SC_MULTI_CHIP_CNTL 0x8b20
+#define R700_PA_SC_FORCE_EOV_MAX_CNTS 0x8b24
+# define R700_FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
+# define R700_FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
+#define R600_PA_SC_AA_SAMPLE_LOCS_2S 0x8b40
+#define R600_PA_SC_AA_SAMPLE_LOCS_4S 0x8b44
+#define R600_PA_SC_AA_SAMPLE_LOCS_8S_WD0 0x8b48
+#define R600_PA_SC_AA_SAMPLE_LOCS_8S_WD1 0x8b4c
+# define R600_S0_X(x) ((x) << 0)
+# define R600_S0_Y(x) ((x) << 4)
+# define R600_S1_X(x) ((x) << 8)
+# define R600_S1_Y(x) ((x) << 12)
+# define R600_S2_X(x) ((x) << 16)
+# define R600_S2_Y(x) ((x) << 20)
+# define R600_S3_X(x) ((x) << 24)
+# define R600_S3_Y(x) ((x) << 28)
+# define R600_S4_X(x) ((x) << 0)
+# define R600_S4_Y(x) ((x) << 4)
+# define R600_S5_X(x) ((x) << 8)
+# define R600_S5_Y(x) ((x) << 12)
+# define R600_S6_X(x) ((x) << 16)
+# define R600_S6_Y(x) ((x) << 20)
+# define R600_S7_X(x) ((x) << 24)
+# define R600_S7_Y(x) ((x) << 28)
+#define R600_PA_SC_FIFO_SIZE 0x8bd0
+# define R600_SC_PRIM_FIFO_SIZE(x) ((x) << 0)
+# define R600_SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 8)
+# define R600_SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 16)
+#define R700_PA_SC_FIFO_SIZE_R7XX 0x8bcc
+# define R700_SC_PRIM_FIFO_SIZE(x) ((x) << 0)
+# define R700_SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
+# define R700_SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
+#define R600_PA_SC_ENHANCE 0x8bf0
+# define R600_FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
+# define R600_FORCE_EOV_MAX_TILE_CNT(x) ((x) << 12)
+#define R600_PA_SC_CLIPRECT_RULE 0x2820c
+#define R700_PA_SC_EDGERULE 0x28230
+#define R600_PA_SC_LINE_STIPPLE 0x28a0c
+#define R600_PA_SC_MODE_CNTL 0x28a4c
+#define R600_PA_SC_AA_CONFIG 0x28c04
+
+#define R600_SX_EXPORT_BUFFER_SIZES 0x900c
+# define R600_COLOR_BUFFER_SIZE(x) ((x) << 0)
+# define R600_POSITION_BUFFER_SIZE(x) ((x) << 8)
+# define R600_SMX_BUFFER_SIZE(x) ((x) << 16)
+#define R600_SX_DEBUG_1 0x9054
+# define R600_SMX_EVENT_RELEASE (1 << 0)
+# define R600_ENABLE_NEW_SMX_ADDRESS (1 << 16)
+#define R700_SX_DEBUG_1 0x9058
+# define R700_ENABLE_NEW_SMX_ADDRESS (1 << 16)
+#define R600_SX_MISC 0x28350
+
+#define R600_DB_DEBUG 0x9830
+# define R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31)
+#define R600_DB_WATERMARKS 0x9838
+# define R600_DEPTH_FREE(x) ((x) << 0)
+# define R600_DEPTH_FLUSH(x) ((x) << 5)
+# define R600_DEPTH_PENDING_FREE(x) ((x) << 15)
+# define R600_DEPTH_CACHELINE_FREE(x) ((x) << 20)
+#define R700_DB_DEBUG3 0x98b0
+# define R700_DB_CLK_OFF_DELAY(x) ((x) << 11)
+#define RV700_DB_DEBUG4 0x9b8c
+# define RV700_DISABLE_TILE_COVERED_FOR_PS_ITER (1 << 6)
+
+#define R600_VGT_CACHE_INVALIDATION 0x88c4
+# define R600_CACHE_INVALIDATION(x) ((x) << 0)
+# define R600_VC_ONLY 0
+# define R600_TC_ONLY 1
+# define R600_VC_AND_TC 2
+# define R700_AUTO_INVLD_EN(x) ((x) << 6)
+# define R700_NO_AUTO 0
+# define R700_ES_AUTO 1
+# define R700_GS_AUTO 2
+# define R700_ES_AND_GS_AUTO 3
+#define R600_VGT_GS_PER_ES 0x88c8
+#define R600_VGT_ES_PER_GS 0x88cc
+#define R600_VGT_GS_PER_VS 0x88e8
+#define R600_VGT_GS_VERTEX_REUSE 0x88d4
+#define R600_VGT_NUM_INSTANCES 0x8974
+#define R600_VGT_STRMOUT_EN 0x28ab0
+#define R600_VGT_EVENT_INITIATOR 0x28a90
+# define R600_CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
+#define R600_VGT_VERTEX_REUSE_BLOCK_CNTL 0x28c58
+# define R600_VTX_REUSE_DEPTH_MASK 0xff
+#define R600_VGT_OUT_DEALLOC_CNTL 0x28c5c
+# define R600_DEALLOC_DIST_MASK 0x7f
+
+#define R600_CB_COLOR0_BASE 0x28040
+#define R600_CB_COLOR1_BASE 0x28044
+#define R600_CB_COLOR2_BASE 0x28048
+#define R600_CB_COLOR3_BASE 0x2804c
+#define R600_CB_COLOR4_BASE 0x28050
+#define R600_CB_COLOR5_BASE 0x28054
+#define R600_CB_COLOR6_BASE 0x28058
+#define R600_CB_COLOR7_BASE 0x2805c
+#define R600_CB_COLOR7_FRAG 0x280fc
+
+#define R600_CB_COLOR0_SIZE 0x28060
+#define R600_CB_COLOR0_VIEW 0x28080
+#define R600_CB_COLOR0_INFO 0x280a0
+#define R600_CB_COLOR0_TILE 0x280c0
+#define R600_CB_COLOR0_FRAG 0x280e0
+#define R600_CB_COLOR0_MASK 0x28100
+
+#define AVIVO_D1MODE_VLINE_START_END 0x6538
+#define AVIVO_D2MODE_VLINE_START_END 0x6d38
+#define R600_CP_COHER_BASE 0x85f8
+#define R600_DB_DEPTH_BASE 0x2800c
+#define R600_SQ_PGM_START_FS 0x28894
+#define R600_SQ_PGM_START_ES 0x28880
+#define R600_SQ_PGM_START_VS 0x28858
+#define R600_SQ_PGM_RESOURCES_VS 0x28868
+#define R600_SQ_PGM_CF_OFFSET_VS 0x288d0
+#define R600_SQ_PGM_START_GS 0x2886c
+#define R600_SQ_PGM_START_PS 0x28840
+#define R600_SQ_PGM_RESOURCES_PS 0x28850
+#define R600_SQ_PGM_EXPORTS_PS 0x28854
+#define R600_SQ_PGM_CF_OFFSET_PS 0x288cc
+#define R600_VGT_DMA_BASE 0x287e8
+#define R600_VGT_DMA_BASE_HI 0x287e4
+#define R600_VGT_STRMOUT_BASE_OFFSET_0 0x28b10
+#define R600_VGT_STRMOUT_BASE_OFFSET_1 0x28b14
+#define R600_VGT_STRMOUT_BASE_OFFSET_2 0x28b18
+#define R600_VGT_STRMOUT_BASE_OFFSET_3 0x28b1c
+#define R600_VGT_STRMOUT_BASE_OFFSET_HI_0 0x28b44
+#define R600_VGT_STRMOUT_BASE_OFFSET_HI_1 0x28b48
+#define R600_VGT_STRMOUT_BASE_OFFSET_HI_2 0x28b4c
+#define R600_VGT_STRMOUT_BASE_OFFSET_HI_3 0x28b50
+#define R600_VGT_STRMOUT_BUFFER_BASE_0 0x28ad8
+#define R600_VGT_STRMOUT_BUFFER_BASE_1 0x28ae8
+#define R600_VGT_STRMOUT_BUFFER_BASE_2 0x28af8
+#define R600_VGT_STRMOUT_BUFFER_BASE_3 0x28b08
+#define R600_VGT_STRMOUT_BUFFER_OFFSET_0 0x28adc
+#define R600_VGT_STRMOUT_BUFFER_OFFSET_1 0x28aec
+#define R600_VGT_STRMOUT_BUFFER_OFFSET_2 0x28afc
+#define R600_VGT_STRMOUT_BUFFER_OFFSET_3 0x28b0c
+
+#define R600_VGT_PRIMITIVE_TYPE 0x8958
+
+#define R600_PA_SC_SCREEN_SCISSOR_TL 0x28030
+#define R600_PA_SC_GENERIC_SCISSOR_TL 0x28240
+#define R600_PA_SC_WINDOW_SCISSOR_TL 0x28204
+
+#define R600_TC_CNTL 0x9608
+# define R600_TC_L2_SIZE(x) ((x) << 5)
+# define R600_L2_DISABLE_LATE_HIT (1 << 9)
+
+#define R600_ARB_POP 0x2418
+# define R600_ENABLE_TC128 (1 << 30)
+#define R600_ARB_GDEC_RD_CNTL 0x246c
+
+#define R600_TA_CNTL_AUX 0x9508
+# define R600_DISABLE_CUBE_WRAP (1 << 0)
+# define R600_DISABLE_CUBE_ANISO (1 << 1)
+# define R700_GETLOD_SELECT(x) ((x) << 2)
+# define R600_SYNC_GRADIENT (1 << 24)
+# define R600_SYNC_WALKER (1 << 25)
+# define R600_SYNC_ALIGNER (1 << 26)
+# define R600_BILINEAR_PRECISION_6_BIT (0 << 31)
+# define R600_BILINEAR_PRECISION_8_BIT (1 << 31)
+
+#define R700_TCP_CNTL 0x9610
+
+#define R600_SMX_DC_CTL0 0xa020
+# define R700_USE_HASH_FUNCTION (1 << 0)
+# define R700_CACHE_DEPTH(x) ((x) << 1)
+# define R700_FLUSH_ALL_ON_EVENT (1 << 10)
+# define R700_STALL_ON_EVENT (1 << 11)
+#define R700_SMX_EVENT_CTL 0xa02c
+# define R700_ES_FLUSH_CTL(x) ((x) << 0)
+# define R700_GS_FLUSH_CTL(x) ((x) << 3)
+# define R700_ACK_FLUSH_CTL(x) ((x) << 6)
+# define R700_SYNC_FLUSH_CTL (1 << 8)
+
+#define R600_SQ_CONFIG 0x8c00
+# define R600_VC_ENABLE (1 << 0)
+# define R600_EXPORT_SRC_C (1 << 1)
+# define R600_DX9_CONSTS (1 << 2)
+# define R600_ALU_INST_PREFER_VECTOR (1 << 3)
+# define R600_DX10_CLAMP (1 << 4)
+# define R600_CLAUSE_SEQ_PRIO(x) ((x) << 8)
+# define R600_PS_PRIO(x) ((x) << 24)
+# define R600_VS_PRIO(x) ((x) << 26)
+# define R600_GS_PRIO(x) ((x) << 28)
+# define R600_ES_PRIO(x) ((x) << 30)
+#define R600_SQ_GPR_RESOURCE_MGMT_1 0x8c04
+# define R600_NUM_PS_GPRS(x) ((x) << 0)
+# define R600_NUM_VS_GPRS(x) ((x) << 16)
+# define R700_DYN_GPR_ENABLE (1 << 27)
+# define R600_NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
+#define R600_SQ_GPR_RESOURCE_MGMT_2 0x8c08
+# define R600_NUM_GS_GPRS(x) ((x) << 0)
+# define R600_NUM_ES_GPRS(x) ((x) << 16)
+#define R600_SQ_THREAD_RESOURCE_MGMT 0x8c0c
+# define R600_NUM_PS_THREADS(x) ((x) << 0)
+# define R600_NUM_VS_THREADS(x) ((x) << 8)
+# define R600_NUM_GS_THREADS(x) ((x) << 16)
+# define R600_NUM_ES_THREADS(x) ((x) << 24)
+#define R600_SQ_STACK_RESOURCE_MGMT_1 0x8c10
+# define R600_NUM_PS_STACK_ENTRIES(x) ((x) << 0)
+# define R600_NUM_VS_STACK_ENTRIES(x) ((x) << 16)
+#define R600_SQ_STACK_RESOURCE_MGMT_2 0x8c14
+# define R600_NUM_GS_STACK_ENTRIES(x) ((x) << 0)
+# define R600_NUM_ES_STACK_ENTRIES(x) ((x) << 16)
+#define R600_SQ_MS_FIFO_SIZES 0x8cf0
+# define R600_CACHE_FIFO_SIZE(x) ((x) << 0)
+# define R600_FETCH_FIFO_HIWATER(x) ((x) << 8)
+# define R600_DONE_FIFO_HIWATER(x) ((x) << 16)
+# define R600_ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_0 0x8db0
+# define R700_SIMDA_RING0(x) ((x) << 0)
+# define R700_SIMDA_RING1(x) ((x) << 8)
+# define R700_SIMDB_RING0(x) ((x) << 16)
+# define R700_SIMDB_RING1(x) ((x) << 24)
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_1 0x8db4
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_2 0x8db8
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_3 0x8dbc
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_4 0x8dc0
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_5 0x8dc4
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_6 0x8dc8
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_7 0x8dcc
+
+#define R600_SPI_PS_IN_CONTROL_0 0x286cc
+# define R600_NUM_INTERP(x) ((x) << 0)
+# define R600_POSITION_ENA (1 << 8)
+# define R600_POSITION_CENTROID (1 << 9)
+# define R600_POSITION_ADDR(x) ((x) << 10)
+# define R600_PARAM_GEN(x) ((x) << 15)
+# define R600_PARAM_GEN_ADDR(x) ((x) << 19)
+# define R600_BARYC_SAMPLE_CNTL(x) ((x) << 26)
+# define R600_PERSP_GRADIENT_ENA (1 << 28)
+# define R600_LINEAR_GRADIENT_ENA (1 << 29)
+# define R600_POSITION_SAMPLE (1 << 30)
+# define R600_BARYC_AT_SAMPLE_ENA (1 << 31)
+#define R600_SPI_PS_IN_CONTROL_1 0x286d0
+# define R600_GEN_INDEX_PIX (1 << 0)
+# define R600_GEN_INDEX_PIX_ADDR(x) ((x) << 1)
+# define R600_FRONT_FACE_ENA (1 << 8)
+# define R600_FRONT_FACE_CHAN(x) ((x) << 9)
+# define R600_FRONT_FACE_ALL_BITS (1 << 11)
+# define R600_FRONT_FACE_ADDR(x) ((x) << 12)
+# define R600_FOG_ADDR(x) ((x) << 17)
+# define R600_FIXED_PT_POSITION_ENA (1 << 24)
+# define R600_FIXED_PT_POSITION_ADDR(x) ((x) << 25)
+# define R700_POSITION_ULC (1 << 30)
+#define R600_SPI_INPUT_Z 0x286d8
+
+#define R600_SPI_CONFIG_CNTL 0x9100
+# define R600_GPR_WRITE_PRIORITY(x) ((x) << 0)
+# define R600_DISABLE_INTERP_1 (1 << 5)
+#define R600_SPI_CONFIG_CNTL_1 0x913c
+# define R600_VTX_DONE_DELAY(x) ((x) << 0)
+# define R600_INTERP_ONE_PRIM_PER_ROW (1 << 4)
+
+#define R600_GB_TILING_CONFIG 0x98f0
+# define R600_PIPE_TILING(x) ((x) << 1)
+# define R600_BANK_TILING(x) ((x) << 4)
+# define R600_GROUP_SIZE(x) ((x) << 6)
+# define R600_ROW_TILING(x) ((x) << 8)
+# define R600_BANK_SWAPS(x) ((x) << 11)
+# define R600_SAMPLE_SPLIT(x) ((x) << 14)
+# define R600_BACKEND_MAP(x) ((x) << 16)
+#define R600_DCP_TILING_CONFIG 0x6ca0
+#define R600_HDP_TILING_CONFIG 0x2f3c
+
+#define R600_CC_RB_BACKEND_DISABLE 0x98f4
+#define R700_CC_SYS_RB_BACKEND_DISABLE 0x3f88
+# define R600_BACKEND_DISABLE(x) ((x) << 16)
+
+#define R600_CC_GC_SHADER_PIPE_CONFIG 0x8950
+#define R600_GC_USER_SHADER_PIPE_CONFIG 0x8954
+# define R600_INACTIVE_QD_PIPES(x) ((x) << 8)
+# define R600_INACTIVE_QD_PIPES_MASK (0xff << 8)
+# define R600_INACTIVE_SIMDS(x) ((x) << 16)
+# define R600_INACTIVE_SIMDS_MASK (0xff << 16)
+
+#define R700_CGTS_SYS_TCC_DISABLE 0x3f90
+#define R700_CGTS_USER_SYS_TCC_DISABLE 0x3f94
+#define R700_CGTS_TCC_DISABLE 0x9148
+#define R700_CGTS_USER_TCC_DISABLE 0x914c
+
+/* Constants */
+#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+
+#define RADEON_LAST_FRAME_REG RADEON_SCRATCH_REG0
+#define RADEON_LAST_DISPATCH_REG RADEON_SCRATCH_REG1
+#define RADEON_LAST_CLEAR_REG RADEON_SCRATCH_REG2
+#define RADEON_LAST_SWI_REG RADEON_SCRATCH_REG3
+#define RADEON_LAST_DISPATCH 1
+
+#define R600_LAST_FRAME_REG R600_SCRATCH_REG0
+#define R600_LAST_DISPATCH_REG R600_SCRATCH_REG1
+#define R600_LAST_CLEAR_REG R600_SCRATCH_REG2
+#define R600_LAST_SWI_REG R600_SCRATCH_REG3
+
+#define RADEON_MAX_VB_AGE 0x7fffffff
+#define RADEON_MAX_VB_VERTS (0xffff)
+
+#define RADEON_RING_HIGH_MARK 128
+
+#define RADEON_PCIGART_TABLE_SIZE (32*1024)
+
+#define RADEON_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
+#define RADEON_WRITE(reg, val) \
+do { \
+ if (reg < 0x10000) { \
+ DRM_WRITE32(dev_priv->mmio, (reg), (val)); \
+ } else { \
+ DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, (reg)); \
+ DRM_WRITE32(dev_priv->mmio, RADEON_MM_DATA, (val)); \
+ } \
+} while (0)
+#define RADEON_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) )
+#define RADEON_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) )
+
+#define RADEON_WRITE_PLL(addr, val) \
+do { \
+ RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, \
+ ((addr) & 0x1f) | RADEON_PLL_WR_EN ); \
+ RADEON_WRITE(RADEON_CLOCK_CNTL_DATA, (val)); \
+} while (0)
+
+#define RADEON_WRITE_PCIE(addr, val) \
+do { \
+ RADEON_WRITE8(RADEON_PCIE_INDEX, \
+ ((addr) & 0xff)); \
+ RADEON_WRITE(RADEON_PCIE_DATA, (val)); \
+} while (0)
+
+#define R500_WRITE_MCIND(addr, val) \
+do { \
+ RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff)); \
+ RADEON_WRITE(R520_MC_IND_DATA, (val)); \
+ RADEON_WRITE(R520_MC_IND_INDEX, 0); \
+} while (0)
+
+#define RS480_WRITE_MCIND(addr, val) \
+do { \
+ RADEON_WRITE(RS480_NB_MC_INDEX, \
+ ((addr) & 0xff) | RS480_NB_MC_IND_WR_EN); \
+ RADEON_WRITE(RS480_NB_MC_DATA, (val)); \
+ RADEON_WRITE(RS480_NB_MC_INDEX, 0xff); \
+} while (0)
+
+#define RS690_WRITE_MCIND(addr, val) \
+do { \
+ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK)); \
+ RADEON_WRITE(RS690_MC_DATA, val); \
+ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); \
+} while (0)
+
+#define RS600_WRITE_MCIND(addr, val) \
+do { \
+ RADEON_WRITE(RS600_MC_INDEX, RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 | ((addr) & RS600_MC_ADDR_MASK)); \
+ RADEON_WRITE(RS600_MC_DATA, val); \
+} while (0)
+
+#define IGP_WRITE_MCIND(addr, val) \
+do { \
+ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || \
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) \
+ RS690_WRITE_MCIND(addr, val); \
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) \
+ RS600_WRITE_MCIND(addr, val); \
+ else \
+ RS480_WRITE_MCIND(addr, val); \
+} while (0)
+
+#define CP_PACKET0( reg, n ) \
+ (RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
+#define CP_PACKET0_TABLE( reg, n ) \
+ (RADEON_CP_PACKET0 | RADEON_ONE_REG_WR | ((n) << 16) | ((reg) >> 2))
+#define CP_PACKET1( reg0, reg1 ) \
+ (RADEON_CP_PACKET1 | (((reg1) >> 2) << 15) | ((reg0) >> 2))
+#define CP_PACKET2() \
+ (RADEON_CP_PACKET2)
+#define CP_PACKET3( pkt, n ) \
+ (RADEON_CP_PACKET3 | (pkt) | ((n) << 16))
+
+/* ================================================================
+ * Engine control helper macros
+ */
+
+#define RADEON_WAIT_UNTIL_2D_IDLE() do { \
+ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
+ OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \
+ RADEON_WAIT_HOST_IDLECLEAN) ); \
+} while (0)
+
+#define RADEON_WAIT_UNTIL_3D_IDLE() do { \
+ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
+ OUT_RING( (RADEON_WAIT_3D_IDLECLEAN | \
+ RADEON_WAIT_HOST_IDLECLEAN) ); \
+} while (0)
+
+#define RADEON_WAIT_UNTIL_IDLE() do { \
+ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
+ OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \
+ RADEON_WAIT_3D_IDLECLEAN | \
+ RADEON_WAIT_HOST_IDLECLEAN) ); \
+} while (0)
+
+#define RADEON_WAIT_UNTIL_PAGE_FLIPPED() do { \
+ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
+ OUT_RING( RADEON_WAIT_CRTC_PFLIP ); \
+} while (0)
+
+#define RADEON_FLUSH_CACHE() do { \
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
+ OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
+ OUT_RING(RADEON_RB3D_DC_FLUSH); \
+ } else { \
+ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
+ OUT_RING(R300_RB3D_DC_FLUSH); \
+ } \
+} while (0)
+
+#define RADEON_PURGE_CACHE() do { \
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
+ OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
+ OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \
+ } else { \
+ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
+ OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); \
+ } \
+} while (0)
+
+#define RADEON_FLUSH_ZCACHE() do { \
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
+ OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
+ OUT_RING(RADEON_RB3D_ZC_FLUSH); \
+ } else { \
+ OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \
+ OUT_RING(R300_ZC_FLUSH); \
+ } \
+} while (0)
+
+#define RADEON_PURGE_ZCACHE() do { \
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
+ OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
+ OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \
+ } else { \
+ OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \
+ OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \
+ } \
+} while (0)
+
+/* ================================================================
+ * Misc helper macros
+ */
+
+/* Perfbox functionality only.
+ */
+#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \
+do { \
+ if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) { \
+ u32 head = GET_RING_HEAD( dev_priv ); \
+ if (head == dev_priv->ring.tail) \
+ dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE; \
+ } \
+} while (0)
+
+#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \
+do { \
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; \
+ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; \
+ if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \
+ int __ret; \
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) \
+ __ret = r600_do_cp_idle(dev_priv); \
+ else \
+ __ret = radeon_do_cp_idle(dev_priv); \
+ if ( __ret ) return __ret; \
+ sarea_priv->last_dispatch = 0; \
+ radeon_freelist_reset( dev ); \
+ } \
+} while (0)
+
+#define RADEON_DISPATCH_AGE( age ) do { \
+ OUT_RING( CP_PACKET0( RADEON_LAST_DISPATCH_REG, 0 ) ); \
+ OUT_RING( age ); \
+} while (0)
+
+#define RADEON_FRAME_AGE( age ) do { \
+ OUT_RING( CP_PACKET0( RADEON_LAST_FRAME_REG, 0 ) ); \
+ OUT_RING( age ); \
+} while (0)
+
+#define RADEON_CLEAR_AGE( age ) do { \
+ OUT_RING( CP_PACKET0( RADEON_LAST_CLEAR_REG, 0 ) ); \
+ OUT_RING( age ); \
+} while (0)
+
+#define R600_DISPATCH_AGE(age) do { \
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); \
+ OUT_RING((R600_LAST_DISPATCH_REG - R600_SET_CONFIG_REG_OFFSET) >> 2); \
+ OUT_RING(age); \
+} while (0)
+
+#define R600_FRAME_AGE(age) do { \
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); \
+ OUT_RING((R600_LAST_FRAME_REG - R600_SET_CONFIG_REG_OFFSET) >> 2); \
+ OUT_RING(age); \
+} while (0)
+
+#define R600_CLEAR_AGE(age) do { \
+ OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); \
+ OUT_RING((R600_LAST_CLEAR_REG - R600_SET_CONFIG_REG_OFFSET) >> 2); \
+ OUT_RING(age); \
+} while (0)
+
+/* ================================================================
+ * Ring control
+ */
+
+#define RADEON_VERBOSE 0
+
+#define RING_LOCALS int write, _nr, _align_nr; unsigned int mask; u32 *ring;
+
+#define RADEON_RING_ALIGN 16
+
+#define BEGIN_RING( n ) do { \
+ if ( RADEON_VERBOSE ) { \
+ DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
+ } \
+ _align_nr = RADEON_RING_ALIGN - ((dev_priv->ring.tail + n) & (RADEON_RING_ALIGN-1)); \
+ _align_nr += n; \
+ if (dev_priv->ring.space <= (_align_nr * sizeof(u32))) { \
+ COMMIT_RING(); \
+ radeon_wait_ring( dev_priv, _align_nr * sizeof(u32)); \
+ } \
+ _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \
+ ring = dev_priv->ring.start; \
+ write = dev_priv->ring.tail; \
+ mask = dev_priv->ring.tail_mask; \
+} while (0)
+
+#define ADVANCE_RING() do { \
+ if ( RADEON_VERBOSE ) { \
+ DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \
+ write, dev_priv->ring.tail ); \
+ } \
+ if (((dev_priv->ring.tail + _nr) & mask) != write) { \
+ DRM_ERROR( \
+ "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
+ ((dev_priv->ring.tail + _nr) & mask), \
+ write, __LINE__); \
+ } else \
+ dev_priv->ring.tail = write; \
+} while (0)
+
+extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
+
+#define COMMIT_RING() do { \
+ radeon_commit_ring(dev_priv); \
+ } while(0)
+
+#define OUT_RING( x ) do { \
+ if ( RADEON_VERBOSE ) { \
+ DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \
+ (unsigned int)(x), write ); \
+ } \
+ ring[write++] = (x); \
+ write &= mask; \
+} while (0)
+
+#define OUT_RING_REG( reg, val ) do { \
+ OUT_RING( CP_PACKET0( reg, 0 ) ); \
+ OUT_RING( val ); \
+} while (0)
+
+#define OUT_RING_TABLE( tab, sz ) do { \
+ int _size = (sz); \
+ int *_tab = (int *)(tab); \
+ \
+ if (write + _size > mask) { \
+ int _i = (mask+1) - write; \
+ _size -= _i; \
+ while (_i > 0 ) { \
+ *(int *)(ring + write) = *_tab++; \
+ write++; \
+ _i--; \
+ } \
+ write = 0; \
+ _tab += _i; \
+ } \
+ while (_size > 0) { \
+ *(ring + write) = *_tab++; \
+ write++; \
+ _size--; \
+ } \
+ write &= mask; \
+} while (0)
+
+/**
+ * Copy given number of dwords from drm buffer to the ring buffer.
+ */
+#define OUT_RING_DRM_BUFFER(buf, sz) do { \
+ int _size = (sz) * 4; \
+ struct drm_buffer *_buf = (buf); \
+ int _part_size; \
+ while (_size > 0) { \
+ _part_size = _size; \
+ \
+ if (write + _part_size/4 > mask) \
+ _part_size = ((mask + 1) - write)*4; \
+ \
+ if (drm_buffer_index(_buf) + _part_size > PAGE_SIZE) \
+ _part_size = PAGE_SIZE - drm_buffer_index(_buf);\
+ \
+ \
+ \
+ memcpy(ring + write, &_buf->data[drm_buffer_page(_buf)] \
+ [drm_buffer_index(_buf)], _part_size); \
+ \
+ _size -= _part_size; \
+ write = (write + _part_size/4) & mask; \
+ drm_buffer_advance(_buf, _part_size); \
+ } \
+} while (0)
+
+
+#endif /* CPTCFG_DRM_RADEON_UMS */
+
+#endif /* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
new file mode 100644
index 0000000..bd4959c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+
+extern void
+radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
+ struct drm_connector *drm_connector);
+extern void
+radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+ struct drm_connector *drm_connector);
+
+
+static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *clone_encoder;
+ uint32_t index_mask = 0;
+ int count;
+
+ /* DIG routing gets problematic */
+ if (rdev->family >= CHIP_R600)
+ return index_mask;
+ /* LVDS/TV are too wacky */
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return index_mask;
+ /* DVO requires 2x ppll clocks depending on tmds chip */
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
+ return index_mask;
+
+ count = -1;
+ list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
+ count++;
+
+ if (clone_encoder == encoder)
+ continue;
+ if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ continue;
+ if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT)
+ continue;
+ else
+ index_mask |= (1 << count);
+ }
+ return index_mask;
+}
+
+void radeon_setup_encoder_clones(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ encoder->possible_clones = radeon_encoder_clones(encoder);
+ }
+}
+
+uint32_t
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t ret = 0;
+
+ switch (supported_device) {
+ case ATOM_DEVICE_CRT1_SUPPORT:
+ case ATOM_DEVICE_TV1_SUPPORT:
+ case ATOM_DEVICE_TV2_SUPPORT:
+ case ATOM_DEVICE_CRT2_SUPPORT:
+ case ATOM_DEVICE_CV_SUPPORT:
+ switch (dac) {
+ case 1: /* dac a */
+ if ((rdev->family == CHIP_RS300) ||
+ (rdev->family == CHIP_RS400) ||
+ (rdev->family == CHIP_RS480))
+ ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
+ else if (ASIC_IS_AVIVO(rdev))
+ ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1;
+ else
+ ret = ENCODER_INTERNAL_DAC1_ENUM_ID1;
+ break;
+ case 2: /* dac b */
+ if (ASIC_IS_AVIVO(rdev))
+ ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1;
+ else {
+ /*if (rdev->family == CHIP_R200)
+ ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+ else*/
+ ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
+ }
+ break;
+ case 3: /* external dac */
+ if (ASIC_IS_AVIVO(rdev))
+ ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
+ else
+ ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+ break;
+ }
+ break;
+ case ATOM_DEVICE_LCD1_SUPPORT:
+ if (ASIC_IS_AVIVO(rdev))
+ ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
+ else
+ ret = ENCODER_INTERNAL_LVDS_ENUM_ID1;
+ break;
+ case ATOM_DEVICE_DFP1_SUPPORT:
+ if ((rdev->family == CHIP_RS300) ||
+ (rdev->family == CHIP_RS400) ||
+ (rdev->family == CHIP_RS480))
+ ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+ else if (ASIC_IS_AVIVO(rdev))
+ ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1;
+ else
+ ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1;
+ break;
+ case ATOM_DEVICE_LCD2_SUPPORT:
+ case ATOM_DEVICE_DFP2_SUPPORT:
+ if ((rdev->family == CHIP_RS600) ||
+ (rdev->family == CHIP_RS690) ||
+ (rdev->family == CHIP_RS740))
+ ret = ENCODER_INTERNAL_DDI_ENUM_ID1;
+ else if (ASIC_IS_AVIVO(rdev))
+ ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
+ else
+ ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+ break;
+ case ATOM_DEVICE_DFP3_SUPPORT:
+ ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
+ break;
+ }
+
+ return ret;
+}
+
+void
+radeon_link_encoder_connector(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+
+ /* walk the list and link encoders to connectors */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ radeon_connector = to_radeon_connector(connector);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ if (radeon_encoder->devices & radeon_connector->devices) {
+ drm_mode_connector_attach_encoder(connector, encoder);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (rdev->is_atom_bios)
+ radeon_atom_backlight_init(radeon_encoder, connector);
+ else
+ radeon_legacy_backlight_init(radeon_encoder, connector);
+ rdev->mode_info.bl_encoder = radeon_encoder;
+ }
+ }
+ }
+ }
+}
+
+void radeon_encoder_set_active_device(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ radeon_encoder->active_device = radeon_encoder->devices & radeon_connector->devices;
+ DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
+ radeon_encoder->active_device, radeon_encoder->devices,
+ radeon_connector->devices, encoder->encoder_type);
+ }
+ }
+}
+
+struct drm_connector *
+radeon_get_connector_for_encoder(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ radeon_connector = to_radeon_connector(connector);
+ if (radeon_encoder->active_device & radeon_connector->devices)
+ return connector;
+ }
+ return NULL;
+}
+
+struct drm_connector *
+radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ radeon_connector = to_radeon_connector(connector);
+ if (radeon_encoder->devices & radeon_connector->devices)
+ return connector;
+ }
+ return NULL;
+}
+
+struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *other_encoder;
+ struct radeon_encoder *other_radeon_encoder;
+
+ if (radeon_encoder->is_ext_encoder)
+ return NULL;
+
+ list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+ if (other_encoder == encoder)
+ continue;
+ other_radeon_encoder = to_radeon_encoder(other_encoder);
+ if (other_radeon_encoder->is_ext_encoder &&
+ (radeon_encoder->devices & other_radeon_encoder->devices))
+ return other_encoder;
+ }
+ return NULL;
+}
+
+u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
+{
+ struct drm_encoder *other_encoder = radeon_get_external_encoder(encoder);
+
+ if (other_encoder) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_TRAVIS:
+ case ENCODER_OBJECT_ID_NUTMEG:
+ return radeon_encoder->encoder_id;
+ default:
+ return ENCODER_OBJECT_ID_NONE;
+ }
+ }
+ return ENCODER_OBJECT_ID_NONE;
+}
+
+void radeon_panel_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+ unsigned hblank = native_mode->htotal - native_mode->hdisplay;
+ unsigned vblank = native_mode->vtotal - native_mode->vdisplay;
+ unsigned hover = native_mode->hsync_start - native_mode->hdisplay;
+ unsigned vover = native_mode->vsync_start - native_mode->vdisplay;
+ unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start;
+ unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start;
+
+ adjusted_mode->clock = native_mode->clock;
+ adjusted_mode->flags = native_mode->flags;
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ adjusted_mode->hdisplay = native_mode->hdisplay;
+ adjusted_mode->vdisplay = native_mode->vdisplay;
+ }
+
+ adjusted_mode->htotal = native_mode->hdisplay + hblank;
+ adjusted_mode->hsync_start = native_mode->hdisplay + hover;
+ adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width;
+
+ adjusted_mode->vtotal = native_mode->vdisplay + vblank;
+ adjusted_mode->vsync_start = native_mode->vdisplay + vover;
+ adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width;
+
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ adjusted_mode->crtc_hdisplay = native_mode->hdisplay;
+ adjusted_mode->crtc_vdisplay = native_mode->vdisplay;
+ }
+
+ adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank;
+ adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover;
+ adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width;
+
+ adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank;
+ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover;
+ adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width;
+
+}
+
+bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
+ u32 pixel_clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+
+ connector = radeon_get_connector_for_encoder(encoder);
+ /* if we don't have an active device yet, just use one of
+ * the connectors tied to the encoder.
+ */
+ if (!connector)
+ connector = radeon_get_connector_for_encoder_init(encoder);
+ radeon_connector = to_radeon_connector(connector);
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ if (radeon_connector->use_digital) {
+ /* HDMI 1.3 supports up to 340 Mhz over single link */
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (pixel_clock > 340000)
+ return true;
+ else
+ return false;
+ } else {
+ if (pixel_clock > 165000)
+ return true;
+ else
+ return false;
+ }
+ } else
+ return false;
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ dig_connector = radeon_connector->con_priv;
+ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+ return false;
+ else {
+ /* HDMI 1.3 supports up to 340 Mhz over single link */
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (pixel_clock > 340000)
+ return true;
+ else
+ return false;
+ } else {
+ if (pixel_clock > 165000)
+ return true;
+ else
+ return false;
+ }
+ }
+ default:
+ return false;
+ }
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
new file mode 100644
index 0000000..36e9803
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+
+/* this file defines the CHIP_ and family flags used in the pciids,
+ * its is common between kms and non-kms because duplicating it and
+ * changing one place is fail.
+ */
+#ifndef RADEON_FAMILY_H
+#define RADEON_FAMILY_H
+/*
+ * Radeon chip families
+ */
+enum radeon_family {
+ CHIP_R100 = 0,
+ CHIP_RV100,
+ CHIP_RS100,
+ CHIP_RV200,
+ CHIP_RS200,
+ CHIP_R200,
+ CHIP_RV250,
+ CHIP_RS300,
+ CHIP_RV280,
+ CHIP_R300,
+ CHIP_R350,
+ CHIP_RV350,
+ CHIP_RV380,
+ CHIP_R420,
+ CHIP_R423,
+ CHIP_RV410,
+ CHIP_RS400,
+ CHIP_RS480,
+ CHIP_RS600,
+ CHIP_RS690,
+ CHIP_RS740,
+ CHIP_RV515,
+ CHIP_R520,
+ CHIP_RV530,
+ CHIP_RV560,
+ CHIP_RV570,
+ CHIP_R580,
+ CHIP_R600,
+ CHIP_RV610,
+ CHIP_RV630,
+ CHIP_RV670,
+ CHIP_RV620,
+ CHIP_RV635,
+ CHIP_RS780,
+ CHIP_RS880,
+ CHIP_RV770,
+ CHIP_RV730,
+ CHIP_RV710,
+ CHIP_RV740,
+ CHIP_CEDAR,
+ CHIP_REDWOOD,
+ CHIP_JUNIPER,
+ CHIP_CYPRESS,
+ CHIP_HEMLOCK,
+ CHIP_PALM,
+ CHIP_SUMO,
+ CHIP_SUMO2,
+ CHIP_BARTS,
+ CHIP_TURKS,
+ CHIP_CAICOS,
+ CHIP_CAYMAN,
+ CHIP_ARUBA,
+ CHIP_TAHITI,
+ CHIP_PITCAIRN,
+ CHIP_VERDE,
+ CHIP_OLAND,
+ CHIP_HAINAN,
+ CHIP_LAST,
+};
+
+/*
+ * Chip flags
+ */
+enum radeon_chip_flags {
+ RADEON_FAMILY_MASK = 0x0000ffffUL,
+ RADEON_FLAGS_MASK = 0xffff0000UL,
+ RADEON_IS_MOBILITY = 0x00010000UL,
+ RADEON_IS_IGP = 0x00020000UL,
+ RADEON_SINGLE_CRTC = 0x00040000UL,
+ RADEON_IS_AGP = 0x00080000UL,
+ RADEON_HAS_HIERZ = 0x00100000UL,
+ RADEON_IS_PCIE = 0x00200000UL,
+ RADEON_NEW_MEMMAP = 0x00400000UL,
+ RADEON_IS_PCI = 0x00800000UL,
+ RADEON_IS_IGPGART = 0x01000000UL,
+};
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
new file mode 100644
index 0000000..b174674
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * David Airlie
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#include <drm/drm_fb_helper.h>
+
+#include <linux/vga_switcheroo.h>
+
+/* object hierarchy -
+ this contains a helper + a radeon fb
+ the helper contains a pointer to radeon framebuffer baseclass.
+*/
+struct radeon_fbdev {
+ struct drm_fb_helper helper;
+ struct radeon_framebuffer rfb;
+ struct list_head fbdev_list;
+ struct radeon_device *rdev;
+};
+
+static struct fb_ops radeonfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+
+int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
+{
+ int aligned = width;
+ int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
+ int pitch_mask = 0;
+
+ switch (bpp / 8) {
+ case 1:
+ pitch_mask = align_large ? 255 : 127;
+ break;
+ case 2:
+ pitch_mask = align_large ? 127 : 31;
+ break;
+ case 3:
+ case 4:
+ pitch_mask = align_large ? 63 : 15;
+ break;
+ }
+
+ aligned += pitch_mask;
+ aligned &= ~pitch_mask;
+ return aligned;
+}
+
+static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
+{
+ struct radeon_bo *rbo = gem_to_radeon_bo(gobj);
+ int ret;
+
+ ret = radeon_bo_reserve(rbo, false);
+ if (likely(ret == 0)) {
+ radeon_bo_kunmap(rbo);
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
+ }
+ drm_gem_object_unreference_unlocked(gobj);
+}
+
+static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct radeon_device *rdev = rfbdev->rdev;
+ struct drm_gem_object *gobj = NULL;
+ struct radeon_bo *rbo = NULL;
+ bool fb_tiled = false; /* useful for testing */
+ u32 tiling_flags = 0;
+ int ret;
+ int aligned_size, size;
+ int height = mode_cmd->height;
+ u32 bpp, depth;
+
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ /* need to align pitch with crtc limits */
+ mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
+ fb_tiled) * ((bpp + 1) / 8);
+
+ if (rdev->family >= CHIP_R600)
+ height = ALIGN(mode_cmd->height, 8);
+ size = mode_cmd->pitches[0] * height;
+ aligned_size = ALIGN(size, PAGE_SIZE);
+ ret = radeon_gem_object_create(rdev, aligned_size, 0,
+ RADEON_GEM_DOMAIN_VRAM,
+ false, true,
+ &gobj);
+ if (ret) {
+ printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
+ aligned_size);
+ return -ENOMEM;
+ }
+ rbo = gem_to_radeon_bo(gobj);
+
+ if (fb_tiled)
+ tiling_flags = RADEON_TILING_MACRO;
+
+#ifdef __BIG_ENDIAN
+ switch (bpp) {
+ case 32:
+ tiling_flags |= RADEON_TILING_SWAP_32BIT;
+ break;
+ case 16:
+ tiling_flags |= RADEON_TILING_SWAP_16BIT;
+ default:
+ break;
+ }
+#endif
+
+ if (tiling_flags) {
+ ret = radeon_bo_set_tiling_flags(rbo,
+ tiling_flags | RADEON_TILING_SURFACE,
+ mode_cmd->pitches[0]);
+ if (ret)
+ dev_err(rdev->dev, "FB failed to set tiling flags\n");
+ }
+
+
+ ret = radeon_bo_reserve(rbo, false);
+ if (unlikely(ret != 0))
+ goto out_unref;
+ /* Only 27 bit offset for legacy CRTC */
+ ret = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
+ ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+ NULL);
+ if (ret) {
+ radeon_bo_unreserve(rbo);
+ goto out_unref;
+ }
+ if (fb_tiled)
+ radeon_bo_check_tiling(rbo, 0, 0);
+ ret = radeon_bo_kmap(rbo, NULL);
+ radeon_bo_unreserve(rbo);
+ if (ret) {
+ goto out_unref;
+ }
+
+ *gobj_p = gobj;
+ return 0;
+out_unref:
+ radeonfb_destroy_pinned_object(gobj);
+ *gobj_p = NULL;
+ return ret;
+}
+
+static int radeonfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
+ struct radeon_device *rdev = rfbdev->rdev;
+ struct fb_info *info;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_gem_object *gobj = NULL;
+ struct radeon_bo *rbo = NULL;
+ struct device *device = &rdev->pdev->dev;
+ int ret;
+ unsigned long tmp;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ /* avivo can't scanout real 24bpp */
+ if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
+ sizes->surface_bpp = 32;
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon object %d\n", ret);
+ return ret;
+ }
+
+ rbo = gem_to_radeon_bo(gobj);
+
+ /* okay we have an object now allocate the framebuffer */
+ info = framebuffer_alloc(0, device);
+ if (info == NULL) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+
+ info->par = rfbdev;
+
+ ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+ if (ret) {
+ DRM_ERROR("failed to initalise framebuffer %d\n", ret);
+ goto out_unref;
+ }
+
+ fb = &rfbdev->rfb.base;
+
+ /* setup helper */
+ rfbdev->helper.fb = fb;
+ rfbdev->helper.fbdev = info;
+
+ memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
+
+ strcpy(info->fix.id, "radeondrmfb");
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &radeonfb_ops;
+
+ tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
+ info->fix.smem_start = rdev->mc.aper_base + tmp;
+ info->fix.smem_len = radeon_bo_size(rbo);
+ info->screen_base = rbo->kptr;
+ info->screen_size = radeon_bo_size(rbo);
+
+ drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
+
+ /* setup aperture base/size for vesafb takeover */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+ info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
+ info->apertures->ranges[0].size = rdev->mc.aper_size;
+
+ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+
+ if (info->screen_base == NULL) {
+ ret = -ENOSPC;
+ goto out_unref;
+ }
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+
+ DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
+ DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
+ DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
+ DRM_INFO("fb depth is %d\n", fb->depth);
+ DRM_INFO(" pitch is %d\n", fb->pitches[0]);
+
+ vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
+ return 0;
+
+out_unref:
+ if (rbo) {
+
+ }
+ if (fb && ret) {
+ drm_gem_object_unreference(gobj);
+ drm_framebuffer_unregister_private(fb);
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+ }
+ return ret;
+}
+
+void radeon_fb_output_poll_changed(struct radeon_device *rdev)
+{
+ drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
+}
+
+static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
+{
+ struct fb_info *info;
+ struct radeon_framebuffer *rfb = &rfbdev->rfb;
+
+ if (rfbdev->helper.fbdev) {
+ info = rfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (rfb->obj) {
+ radeonfb_destroy_pinned_object(rfb->obj);
+ rfb->obj = NULL;
+ }
+ drm_fb_helper_fini(&rfbdev->helper);
+ drm_framebuffer_unregister_private(&rfb->base);
+ drm_framebuffer_cleanup(&rfb->base);
+
+ return 0;
+}
+
+static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
+ .gamma_set = radeon_crtc_fb_gamma_set,
+ .gamma_get = radeon_crtc_fb_gamma_get,
+ .fb_probe = radeonfb_create,
+};
+
+int radeon_fbdev_init(struct radeon_device *rdev)
+{
+ struct radeon_fbdev *rfbdev;
+ int bpp_sel = 32;
+ int ret;
+
+ /* select 8 bpp console on RN50 or 16MB cards */
+ if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+ bpp_sel = 8;
+
+ rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL);
+ if (!rfbdev)
+ return -ENOMEM;
+
+ rfbdev->rdev = rdev;
+ rdev->mode_info.rfbdev = rfbdev;
+ rfbdev->helper.funcs = &radeon_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
+ rdev->num_crtc,
+ RADEONFB_CONN_LIMIT);
+ if (ret) {
+ kfree(rfbdev);
+ return ret;
+ }
+
+ drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(rdev->ddev);
+
+ drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
+ return 0;
+}
+
+void radeon_fbdev_fini(struct radeon_device *rdev)
+{
+ if (!rdev->mode_info.rfbdev)
+ return;
+
+ radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
+ kfree(rdev->mode_info.rfbdev);
+ rdev->mode_info.rfbdev = NULL;
+}
+
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
+{
+ fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+}
+
+int radeon_fbdev_total_size(struct radeon_device *rdev)
+{
+ struct radeon_bo *robj;
+ int size = 0;
+
+ robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj);
+ size += radeon_bo_size(robj);
+ return size;
+}
+
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+{
+ if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
+ return true;
+ return false;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
new file mode 100644
index 0000000..ddb8f8e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -0,0 +1,949 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Jerome Glisse <glisse@freedesktop.org>
+ * Dave Airlie
+ */
+#include <linux/seq_file.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_trace.h"
+
+/*
+ * Fences
+ * Fences mark an event in the GPUs pipeline and are used
+ * for GPU/CPU synchronization. When the fence is written,
+ * it is expected that all buffers associated with that fence
+ * are no longer in use by the associated ring on the GPU and
+ * that the the relevant GPU caches have been flushed. Whether
+ * we use a scratch register or memory location depends on the asic
+ * and whether writeback is enabled.
+ */
+
+/**
+ * radeon_fence_write - write a fence value
+ *
+ * @rdev: radeon_device pointer
+ * @seq: sequence number to write
+ * @ring: ring index the fence is associated with
+ *
+ * Writes a fence value to memory or a scratch register (all asics).
+ */
+static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
+{
+ struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
+ if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
+ if (drv->cpu_addr) {
+ *drv->cpu_addr = cpu_to_le32(seq);
+ }
+ } else {
+ WREG32(drv->scratch_reg, seq);
+ }
+}
+
+/**
+ * radeon_fence_read - read a fence value
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Reads a fence value from memory or a scratch register (all asics).
+ * Returns the value of the fence read from memory or register.
+ */
+static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
+{
+ struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
+ u32 seq = 0;
+
+ if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
+ if (drv->cpu_addr) {
+ seq = le32_to_cpu(*drv->cpu_addr);
+ } else {
+ seq = lower_32_bits(atomic64_read(&drv->last_seq));
+ }
+ } else {
+ seq = RREG32(drv->scratch_reg);
+ }
+ return seq;
+}
+
+/**
+ * radeon_fence_emit - emit a fence on the requested ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ * @ring: ring index the fence is associated with
+ *
+ * Emits a fence command on the requested ring (all asics).
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int radeon_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence **fence,
+ int ring)
+{
+ /* we are protected by the ring emission mutex */
+ *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
+ if ((*fence) == NULL) {
+ return -ENOMEM;
+ }
+ kref_init(&((*fence)->kref));
+ (*fence)->rdev = rdev;
+ (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
+ (*fence)->ring = ring;
+ radeon_fence_ring_emit(rdev, ring, *fence);
+ trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
+ return 0;
+}
+
+/**
+ * radeon_fence_process - process a fence
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Checks the current fence value and wakes the fence queue
+ * if the sequence number has increased (all asics).
+ */
+void radeon_fence_process(struct radeon_device *rdev, int ring)
+{
+ uint64_t seq, last_seq, last_emitted;
+ unsigned count_loop = 0;
+ bool wake = false;
+
+ /* Note there is a scenario here for an infinite loop but it's
+ * very unlikely to happen. For it to happen, the current polling
+ * process need to be interrupted by another process and another
+ * process needs to update the last_seq btw the atomic read and
+ * xchg of the current process.
+ *
+ * More over for this to go in infinite loop there need to be
+ * continuously new fence signaled ie radeon_fence_read needs
+ * to return a different value each time for both the currently
+ * polling process and the other process that xchg the last_seq
+ * btw atomic read and xchg of the current process. And the
+ * value the other process set as last seq must be higher than
+ * the seq value we just read. Which means that current process
+ * need to be interrupted after radeon_fence_read and before
+ * atomic xchg.
+ *
+ * To be even more safe we count the number of time we loop and
+ * we bail after 10 loop just accepting the fact that we might
+ * have temporarly set the last_seq not to the true real last
+ * seq but to an older one.
+ */
+ last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
+ do {
+ last_emitted = rdev->fence_drv[ring].sync_seq[ring];
+ seq = radeon_fence_read(rdev, ring);
+ seq |= last_seq & 0xffffffff00000000LL;
+ if (seq < last_seq) {
+ seq &= 0xffffffff;
+ seq |= last_emitted & 0xffffffff00000000LL;
+ }
+
+ if (seq <= last_seq || seq > last_emitted) {
+ break;
+ }
+ /* If we loop over we don't want to return without
+ * checking if a fence is signaled as it means that the
+ * seq we just read is different from the previous on.
+ */
+ wake = true;
+ last_seq = seq;
+ if ((count_loop++) > 10) {
+ /* We looped over too many time leave with the
+ * fact that we might have set an older fence
+ * seq then the current real last seq as signaled
+ * by the hw.
+ */
+ break;
+ }
+ } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
+
+ if (wake) {
+ rdev->fence_drv[ring].last_activity = jiffies;
+ wake_up_all(&rdev->fence_queue);
+ }
+}
+
+/**
+ * radeon_fence_destroy - destroy a fence
+ *
+ * @kref: fence kref
+ *
+ * Frees the fence object (all asics).
+ */
+static void radeon_fence_destroy(struct kref *kref)
+{
+ struct radeon_fence *fence;
+
+ fence = container_of(kref, struct radeon_fence, kref);
+ kfree(fence);
+}
+
+/**
+ * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
+ *
+ * @rdev: radeon device pointer
+ * @seq: sequence number
+ * @ring: ring index the fence is associated with
+ *
+ * Check if the last singled fence sequnce number is >= the requested
+ * sequence number (all asics).
+ * Returns true if the fence has signaled (current fence value
+ * is >= requested value) or false if it has not (current fence
+ * value is < the requested value. Helper function for
+ * radeon_fence_signaled().
+ */
+static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
+ u64 seq, unsigned ring)
+{
+ if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+ return true;
+ }
+ /* poll new last sequence at least once */
+ radeon_fence_process(rdev, ring);
+ if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+ return true;
+ }
+ return false;
+}
+
+/**
+ * radeon_fence_signaled - check if a fence has signaled
+ *
+ * @fence: radeon fence object
+ *
+ * Check if the requested fence has signaled (all asics).
+ * Returns true if the fence has signaled or false if it has not.
+ */
+bool radeon_fence_signaled(struct radeon_fence *fence)
+{
+ if (!fence) {
+ return true;
+ }
+ if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
+ return true;
+ }
+ if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * radeon_fence_wait_seq - wait for a specific sequence number
+ *
+ * @rdev: radeon device pointer
+ * @target_seq: sequence number we want to wait for
+ * @ring: ring index the fence is associated with
+ * @intr: use interruptable sleep
+ * @lock_ring: whether the ring should be locked or not
+ *
+ * Wait for the requested sequence number to be written (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the sequence number. Helper function
+ * for radeon_fence_wait(), et al.
+ * Returns 0 if the sequence number has passed, error for all other cases.
+ * -EDEADLK is returned when a GPU lockup has been detected and the ring is
+ * marked as not ready so no further jobs get scheduled until a successful
+ * reset.
+ */
+static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
+ unsigned ring, bool intr, bool lock_ring)
+{
+ unsigned long timeout, last_activity;
+ uint64_t seq;
+ unsigned i;
+ bool signaled;
+ int r;
+
+ while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+ if (!rdev->ring[ring].ready) {
+ return -EBUSY;
+ }
+
+ timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
+ if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
+ /* the normal case, timeout is somewhere before last_activity */
+ timeout = rdev->fence_drv[ring].last_activity - timeout;
+ } else {
+ /* either jiffies wrapped around, or no fence was signaled in the last 500ms
+ * anyway we will just wait for the minimum amount and then check for a lockup
+ */
+ timeout = 1;
+ }
+ seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
+ /* Save current last activity valuee, used to check for GPU lockups */
+ last_activity = rdev->fence_drv[ring].last_activity;
+
+ trace_radeon_fence_wait_begin(rdev->ddev, seq);
+ radeon_irq_kms_sw_irq_get(rdev, ring);
+ if (intr) {
+ r = wait_event_interruptible_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
+ timeout);
+ } else {
+ r = wait_event_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
+ timeout);
+ }
+ radeon_irq_kms_sw_irq_put(rdev, ring);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+ trace_radeon_fence_wait_end(rdev->ddev, seq);
+
+ if (unlikely(!signaled)) {
+ /* we were interrupted for some reason and fence
+ * isn't signaled yet, resume waiting */
+ if (r) {
+ continue;
+ }
+
+ /* check if sequence value has changed since last_activity */
+ if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+ continue;
+ }
+
+ if (lock_ring) {
+ mutex_lock(&rdev->ring_lock);
+ }
+
+ /* test if somebody else has already decided that this is a lockup */
+ if (last_activity != rdev->fence_drv[ring].last_activity) {
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
+ continue;
+ }
+
+ if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+ /* good news we believe it's a lockup */
+ dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
+ target_seq, seq);
+
+ /* change last activity so nobody else think there is a lockup */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ rdev->fence_drv[i].last_activity = jiffies;
+ }
+
+ /* mark the ring as not ready any more */
+ rdev->ring[ring].ready = false;
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
+ return -EDEADLK;
+ }
+
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * radeon_fence_wait - wait for a fence to signal
+ *
+ * @fence: radeon fence object
+ * @intr: use interruptable sleep
+ *
+ * Wait for the requested fence to signal (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the fence.
+ * Returns 0 if the fence has passed, error for all other cases.
+ */
+int radeon_fence_wait(struct radeon_fence *fence, bool intr)
+{
+ int r;
+
+ if (fence == NULL) {
+ WARN(1, "Querying an invalid fence : %p !\n", fence);
+ return -EINVAL;
+ }
+
+ r = radeon_fence_wait_seq(fence->rdev, fence->seq,
+ fence->ring, intr, true);
+ if (r) {
+ return r;
+ }
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ return 0;
+}
+
+static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
+{
+ unsigned i;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * radeon_fence_wait_any_seq - wait for a sequence number on any ring
+ *
+ * @rdev: radeon device pointer
+ * @target_seq: sequence number(s) we want to wait for
+ * @intr: use interruptable sleep
+ *
+ * Wait for the requested sequence number(s) to be written by any ring
+ * (all asics). Sequnce number array is indexed by ring id.
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the sequence number. Helper function
+ * for radeon_fence_wait_any(), et al.
+ * Returns 0 if the sequence number has passed, error for all other cases.
+ */
+static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
+ u64 *target_seq, bool intr)
+{
+ unsigned long timeout, last_activity, tmp;
+ unsigned i, ring = RADEON_NUM_RINGS;
+ bool signaled;
+ int r;
+
+ for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!target_seq[i]) {
+ continue;
+ }
+
+ /* use the most recent one as indicator */
+ if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
+ last_activity = rdev->fence_drv[i].last_activity;
+ }
+
+ /* For lockup detection just pick the lowest ring we are
+ * actively waiting for
+ */
+ if (i < ring) {
+ ring = i;
+ }
+ }
+
+ /* nothing to wait for ? */
+ if (ring == RADEON_NUM_RINGS) {
+ return -ENOENT;
+ }
+
+ while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
+ timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
+ if (time_after(last_activity, timeout)) {
+ /* the normal case, timeout is somewhere before last_activity */
+ timeout = last_activity - timeout;
+ } else {
+ /* either jiffies wrapped around, or no fence was signaled in the last 500ms
+ * anyway we will just wait for the minimum amount and then check for a lockup
+ */
+ timeout = 1;
+ }
+
+ trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (target_seq[i]) {
+ radeon_irq_kms_sw_irq_get(rdev, i);
+ }
+ }
+ if (intr) {
+ r = wait_event_interruptible_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
+ timeout);
+ } else {
+ r = wait_event_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
+ timeout);
+ }
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (target_seq[i]) {
+ radeon_irq_kms_sw_irq_put(rdev, i);
+ }
+ }
+ if (unlikely(r < 0)) {
+ return r;
+ }
+ trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
+
+ if (unlikely(!signaled)) {
+ /* we were interrupted for some reason and fence
+ * isn't signaled yet, resume waiting */
+ if (r) {
+ continue;
+ }
+
+ mutex_lock(&rdev->ring_lock);
+ for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
+ tmp = rdev->fence_drv[i].last_activity;
+ }
+ }
+ /* test if somebody else has already decided that this is a lockup */
+ if (last_activity != tmp) {
+ last_activity = tmp;
+ mutex_unlock(&rdev->ring_lock);
+ continue;
+ }
+
+ if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+ /* good news we believe it's a lockup */
+ dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
+ target_seq[ring]);
+
+ /* change last activity so nobody else think there is a lockup */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ rdev->fence_drv[i].last_activity = jiffies;
+ }
+
+ /* mark the ring as not ready any more */
+ rdev->ring[ring].ready = false;
+ mutex_unlock(&rdev->ring_lock);
+ return -EDEADLK;
+ }
+ mutex_unlock(&rdev->ring_lock);
+ }
+ }
+ return 0;
+}
+
+/**
+ * radeon_fence_wait_any - wait for a fence to signal on any ring
+ *
+ * @rdev: radeon device pointer
+ * @fences: radeon fence object(s)
+ * @intr: use interruptable sleep
+ *
+ * Wait for any requested fence to signal (all asics). Fence
+ * array is indexed by ring id. @intr selects whether to use
+ * interruptable (true) or non-interruptable (false) sleep when
+ * waiting for the fences. Used by the suballocator.
+ * Returns 0 if any fence has passed, error for all other cases.
+ */
+int radeon_fence_wait_any(struct radeon_device *rdev,
+ struct radeon_fence **fences,
+ bool intr)
+{
+ uint64_t seq[RADEON_NUM_RINGS];
+ unsigned i;
+ int r;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ seq[i] = 0;
+
+ if (!fences[i]) {
+ continue;
+ }
+
+ if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
+ /* something was allready signaled */
+ return 0;
+ }
+
+ seq[i] = fences[i]->seq;
+ }
+
+ r = radeon_fence_wait_any_seq(rdev, seq, intr);
+ if (r) {
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * radeon_fence_wait_next_locked - wait for the next fence to signal
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Wait for the next fence on the requested ring to signal (all asics).
+ * Returns 0 if the next fence has passed, error for all other cases.
+ * Caller must hold ring lock.
+ */
+int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
+{
+ uint64_t seq;
+
+ seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
+ if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
+ /* nothing to wait for, last_seq is
+ already the last emited fence */
+ return -ENOENT;
+ }
+ return radeon_fence_wait_seq(rdev, seq, ring, false, false);
+}
+
+/**
+ * radeon_fence_wait_empty_locked - wait for all fences to signal
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Wait for all fences on the requested ring to signal (all asics).
+ * Returns 0 if the fences have passed, error for all other cases.
+ * Caller must hold ring lock.
+ */
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+{
+ uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
+ int r;
+
+ r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ if (r) {
+ if (r == -EDEADLK) {
+ return -EDEADLK;
+ }
+ dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
+ ring, r);
+ }
+ return 0;
+}
+
+/**
+ * radeon_fence_ref - take a ref on a fence
+ *
+ * @fence: radeon fence object
+ *
+ * Take a reference on a fence (all asics).
+ * Returns the fence.
+ */
+struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
+{
+ kref_get(&fence->kref);
+ return fence;
+}
+
+/**
+ * radeon_fence_unref - remove a ref on a fence
+ *
+ * @fence: radeon fence object
+ *
+ * Remove a reference on a fence (all asics).
+ */
+void radeon_fence_unref(struct radeon_fence **fence)
+{
+ struct radeon_fence *tmp = *fence;
+
+ *fence = NULL;
+ if (tmp) {
+ kref_put(&tmp->kref, radeon_fence_destroy);
+ }
+}
+
+/**
+ * radeon_fence_count_emitted - get the count of emitted fences
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Get the number of fences emitted on the requested ring (all asics).
+ * Returns the number of emitted fences on the ring. Used by the
+ * dynpm code to ring track activity.
+ */
+unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
+{
+ uint64_t emitted;
+
+ /* We are not protected by ring lock when reading the last sequence
+ * but it's ok to report slightly wrong fence count here.
+ */
+ radeon_fence_process(rdev, ring);
+ emitted = rdev->fence_drv[ring].sync_seq[ring]
+ - atomic64_read(&rdev->fence_drv[ring].last_seq);
+ /* to avoid 32bits warp around */
+ if (emitted > 0x10000000) {
+ emitted = 0x10000000;
+ }
+ return (unsigned)emitted;
+}
+
+/**
+ * radeon_fence_need_sync - do we need a semaphore
+ *
+ * @fence: radeon fence object
+ * @dst_ring: which ring to check against
+ *
+ * Check if the fence needs to be synced against another ring
+ * (all asics). If so, we need to emit a semaphore.
+ * Returns true if we need to sync with another ring, false if
+ * not.
+ */
+bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
+{
+ struct radeon_fence_driver *fdrv;
+
+ if (!fence) {
+ return false;
+ }
+
+ if (fence->ring == dst_ring) {
+ return false;
+ }
+
+ /* we are protected by the ring mutex */
+ fdrv = &fence->rdev->fence_drv[dst_ring];
+ if (fence->seq <= fdrv->sync_seq[fence->ring]) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * radeon_fence_note_sync - record the sync point
+ *
+ * @fence: radeon fence object
+ * @dst_ring: which ring to check against
+ *
+ * Note the sequence number at which point the fence will
+ * be synced with the requested ring (all asics).
+ */
+void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
+{
+ struct radeon_fence_driver *dst, *src;
+ unsigned i;
+
+ if (!fence) {
+ return;
+ }
+
+ if (fence->ring == dst_ring) {
+ return;
+ }
+
+ /* we are protected by the ring mutex */
+ src = &fence->rdev->fence_drv[fence->ring];
+ dst = &fence->rdev->fence_drv[dst_ring];
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (i == dst_ring) {
+ continue;
+ }
+ dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
+ }
+}
+
+/**
+ * radeon_fence_driver_start_ring - make the fence driver
+ * ready for use on the requested ring.
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index to start the fence driver on
+ *
+ * Make the fence driver ready for processing (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has.
+ * Returns 0 for success, errors for failure.
+ */
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
+{
+ uint64_t index;
+ int r;
+
+ radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+ if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
+ rdev->fence_drv[ring].scratch_reg = 0;
+ if (ring != R600_RING_TYPE_UVD_INDEX) {
+ index = R600_WB_EVENT_OFFSET + ring * 4;
+ rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+ rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
+ index;
+
+ } else {
+ /* put fence directly behind firmware */
+ index = ALIGN(rdev->uvd_fw->size, 8);
+ rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
+ rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
+ }
+
+ } else {
+ r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
+ if (r) {
+ dev_err(rdev->dev, "fence failed to get scratch register\n");
+ return r;
+ }
+ index = RADEON_WB_SCRATCH_OFFSET +
+ rdev->fence_drv[ring].scratch_reg -
+ rdev->scratch.reg_base;
+ rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+ rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
+ }
+ radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
+ rdev->fence_drv[ring].initialized = true;
+ dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
+ ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
+ return 0;
+}
+
+/**
+ * radeon_fence_driver_init_ring - init the fence driver
+ * for the requested ring.
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index to start the fence driver on
+ *
+ * Init the fence driver for the requested ring (all asics).
+ * Helper function for radeon_fence_driver_init().
+ */
+static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
+{
+ int i;
+
+ rdev->fence_drv[ring].scratch_reg = -1;
+ rdev->fence_drv[ring].cpu_addr = NULL;
+ rdev->fence_drv[ring].gpu_addr = 0;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ rdev->fence_drv[ring].sync_seq[i] = 0;
+ atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
+ rdev->fence_drv[ring].last_activity = jiffies;
+ rdev->fence_drv[ring].initialized = false;
+}
+
+/**
+ * radeon_fence_driver_init - init the fence driver
+ * for all possible rings.
+ *
+ * @rdev: radeon device pointer
+ *
+ * Init the fence driver for all possible rings (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has using
+ * radeon_fence_driver_start_ring().
+ * Returns 0 for success.
+ */
+int radeon_fence_driver_init(struct radeon_device *rdev)
+{
+ int ring;
+
+ init_waitqueue_head(&rdev->fence_queue);
+ for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+ radeon_fence_driver_init_ring(rdev, ring);
+ }
+ if (radeon_debugfs_fence_init(rdev)) {
+ dev_err(rdev->dev, "fence debugfs file creation failed\n");
+ }
+ return 0;
+}
+
+/**
+ * radeon_fence_driver_fini - tear down the fence driver
+ * for all possible rings.
+ *
+ * @rdev: radeon device pointer
+ *
+ * Tear down the fence driver for all possible rings (all asics).
+ */
+void radeon_fence_driver_fini(struct radeon_device *rdev)
+{
+ int ring, r;
+
+ mutex_lock(&rdev->ring_lock);
+ for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+ if (!rdev->fence_drv[ring].initialized)
+ continue;
+ r = radeon_fence_wait_empty_locked(rdev, ring);
+ if (r) {
+ /* no need to trigger GPU reset as we are unloading */
+ radeon_fence_driver_force_completion(rdev);
+ }
+ wake_up_all(&rdev->fence_queue);
+ radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+ rdev->fence_drv[ring].initialized = false;
+ }
+ mutex_unlock(&rdev->ring_lock);
+}
+
+/**
+ * radeon_fence_driver_force_completion - force all fence waiter to complete
+ *
+ * @rdev: radeon device pointer
+ *
+ * In case of GPU reset failure make sure no process keep waiting on fence
+ * that will never complete.
+ */
+void radeon_fence_driver_force_completion(struct radeon_device *rdev)
+{
+ int ring;
+
+ for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+ if (!rdev->fence_drv[ring].initialized)
+ continue;
+ radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
+ }
+}
+
+
+/*
+ * Fence debugfs
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int i, j;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!rdev->fence_drv[i].initialized)
+ continue;
+
+ seq_printf(m, "--- ring %d ---\n", i);
+ seq_printf(m, "Last signaled fence 0x%016llx\n",
+ (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
+ seq_printf(m, "Last emitted 0x%016llx\n",
+ rdev->fence_drv[i].sync_seq[i]);
+
+ for (j = 0; j < RADEON_NUM_RINGS; ++j) {
+ if (i != j && rdev->fence_drv[j].initialized)
+ seq_printf(m, "Last sync to ring %d 0x%016llx\n",
+ j, rdev->fence_drv[i].sync_seq[j]);
+ }
+ }
+ return 0;
+}
+
+static struct drm_info_list radeon_debugfs_fence_list[] = {
+ {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
+};
+#endif
+
+int radeon_debugfs_fence_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
new file mode 100644
index 0000000..5ce190b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -0,0 +1,1287 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_reg.h"
+
+/*
+ * GART
+ * The GART (Graphics Aperture Remapping Table) is an aperture
+ * in the GPU's address space. System pages can be mapped into
+ * the aperture and look like contiguous pages from the GPU's
+ * perspective. A page table maps the pages in the aperture
+ * to the actual backing pages in system memory.
+ *
+ * Radeon GPUs support both an internal GART, as described above,
+ * and AGP. AGP works similarly, but the GART table is configured
+ * and maintained by the northbridge rather than the driver.
+ * Radeon hw has a separate AGP aperture that is programmed to
+ * point to the AGP aperture provided by the northbridge and the
+ * requests are passed through to the northbridge aperture.
+ * Both AGP and internal GART can be used at the same time, however
+ * that is not currently supported by the driver.
+ *
+ * This file handles the common internal GART management.
+ */
+
+/*
+ * Common GART table functions.
+ */
+/**
+ * radeon_gart_table_ram_alloc - allocate system ram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate system memory for GART page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
+ * gart table to be in system memory.
+ * Returns 0 for success, -ENOMEM for failure.
+ */
+int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
+{
+ void *ptr;
+
+ ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
+ &rdev->gart.table_addr);
+ if (ptr == NULL) {
+ return -ENOMEM;
+ }
+#ifdef CONFIG_X86
+ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
+ rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+ set_memory_uc((unsigned long)ptr,
+ rdev->gart.table_size >> PAGE_SHIFT);
+ }
+#endif
+ rdev->gart.ptr = ptr;
+ memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
+ return 0;
+}
+
+/**
+ * radeon_gart_table_ram_free - free system ram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Free system memory for GART page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
+ * gart table to be in system memory.
+ */
+void radeon_gart_table_ram_free(struct radeon_device *rdev)
+{
+ if (rdev->gart.ptr == NULL) {
+ return;
+ }
+#ifdef CONFIG_X86
+ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
+ rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+ set_memory_wb((unsigned long)rdev->gart.ptr,
+ rdev->gart.table_size >> PAGE_SHIFT);
+ }
+#endif
+ pci_free_consistent(rdev->pdev, rdev->gart.table_size,
+ (void *)rdev->gart.ptr,
+ rdev->gart.table_addr);
+ rdev->gart.ptr = NULL;
+ rdev->gart.table_addr = 0;
+}
+
+/**
+ * radeon_gart_table_vram_alloc - allocate vram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate video memory for GART page table
+ * (pcie r4xx, r5xx+). These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
+int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->gart.robj == NULL) {
+ r = radeon_bo_create(rdev, rdev->gart.table_size,
+ PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ NULL, &rdev->gart.robj);
+ if (r) {
+ return r;
+ }
+ }
+ return 0;
+}
+
+/**
+ * radeon_gart_table_vram_pin - pin gart page table in vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Pin the GART page table in vram so it will not be moved
+ * by the memory manager (pcie r4xx, r5xx+). These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
+int radeon_gart_table_vram_pin(struct radeon_device *rdev)
+{
+ uint64_t gpu_addr;
+ int r;
+
+ r = radeon_bo_reserve(rdev->gart.robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->gart.robj,
+ RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->gart.robj);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
+ if (r)
+ radeon_bo_unpin(rdev->gart.robj);
+ radeon_bo_unreserve(rdev->gart.robj);
+ rdev->gart.table_addr = gpu_addr;
+ return r;
+}
+
+/**
+ * radeon_gart_table_vram_unpin - unpin gart page table in vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Unpin the GART page table in vram (pcie r4xx, r5xx+).
+ * These asics require the gart table to be in video memory.
+ */
+void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->gart.robj == NULL) {
+ return;
+ }
+ r = radeon_bo_reserve(rdev->gart.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.robj);
+ radeon_bo_unpin(rdev->gart.robj);
+ radeon_bo_unreserve(rdev->gart.robj);
+ rdev->gart.ptr = NULL;
+ }
+}
+
+/**
+ * radeon_gart_table_vram_free - free gart page table vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Free the video memory used for the GART page table
+ * (pcie r4xx, r5xx+). These asics require the gart table to
+ * be in video memory.
+ */
+void radeon_gart_table_vram_free(struct radeon_device *rdev)
+{
+ if (rdev->gart.robj == NULL) {
+ return;
+ }
+ radeon_gart_table_vram_unpin(rdev);
+ radeon_bo_unref(&rdev->gart.robj);
+}
+
+/*
+ * Common gart functions.
+ */
+/**
+ * radeon_gart_unbind - unbind pages from the gart page table
+ *
+ * @rdev: radeon_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to unbind
+ *
+ * Unbinds the requested pages from the gart page table and
+ * replaces them with the dummy page (all asics).
+ */
+void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+ int pages)
+{
+ unsigned t;
+ unsigned p;
+ int i, j;
+ u64 page_base;
+
+ if (!rdev->gart.ready) {
+ WARN(1, "trying to unbind memory from uninitialized GART !\n");
+ return;
+ }
+ t = offset / RADEON_GPU_PAGE_SIZE;
+ p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+ for (i = 0; i < pages; i++, p++) {
+ if (rdev->gart.pages[p]) {
+ rdev->gart.pages[p] = NULL;
+ rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
+ page_base = rdev->gart.pages_addr[p];
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ if (rdev->gart.ptr) {
+ radeon_gart_set_page(rdev, t, page_base);
+ }
+ page_base += RADEON_GPU_PAGE_SIZE;
+ }
+ }
+ }
+ mb();
+ radeon_gart_tlb_flush(rdev);
+}
+
+/**
+ * radeon_gart_bind - bind pages into the gart page table
+ *
+ * @rdev: radeon_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @pagelist: pages to bind
+ * @dma_addr: DMA addresses of pages
+ *
+ * Binds the requested pages to the gart page table
+ * (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+ int pages, struct page **pagelist, dma_addr_t *dma_addr)
+{
+ unsigned t;
+ unsigned p;
+ uint64_t page_base;
+ int i, j;
+
+ if (!rdev->gart.ready) {
+ WARN(1, "trying to bind memory to uninitialized GART !\n");
+ return -EINVAL;
+ }
+ t = offset / RADEON_GPU_PAGE_SIZE;
+ p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+
+ for (i = 0; i < pages; i++, p++) {
+ rdev->gart.pages_addr[p] = dma_addr[i];
+ rdev->gart.pages[p] = pagelist[i];
+ if (rdev->gart.ptr) {
+ page_base = rdev->gart.pages_addr[p];
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ radeon_gart_set_page(rdev, t, page_base);
+ page_base += RADEON_GPU_PAGE_SIZE;
+ }
+ }
+ }
+ mb();
+ radeon_gart_tlb_flush(rdev);
+ return 0;
+}
+
+/**
+ * radeon_gart_restore - bind all pages in the gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Binds all pages in the gart page table (all asics).
+ * Used to rebuild the gart table on device startup or resume.
+ */
+void radeon_gart_restore(struct radeon_device *rdev)
+{
+ int i, j, t;
+ u64 page_base;
+
+ if (!rdev->gart.ptr) {
+ return;
+ }
+ for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
+ page_base = rdev->gart.pages_addr[i];
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ radeon_gart_set_page(rdev, t, page_base);
+ page_base += RADEON_GPU_PAGE_SIZE;
+ }
+ }
+ mb();
+ radeon_gart_tlb_flush(rdev);
+}
+
+/**
+ * radeon_gart_init - init the driver info for managing the gart
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate the dummy page and init the gart driver info (all asics).
+ * Returns 0 for success, error for failure.
+ */
+int radeon_gart_init(struct radeon_device *rdev)
+{
+ int r, i;
+
+ if (rdev->gart.pages) {
+ return 0;
+ }
+ /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
+ if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
+ DRM_ERROR("Page size is smaller than GPU page size!\n");
+ return -EINVAL;
+ }
+ r = radeon_dummy_page_init(rdev);
+ if (r)
+ return r;
+ /* Compute table size */
+ rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
+ rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
+ DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
+ rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
+ /* Allocate pages table */
+ rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
+ if (rdev->gart.pages == NULL) {
+ radeon_gart_fini(rdev);
+ return -ENOMEM;
+ }
+ rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
+ rdev->gart.num_cpu_pages);
+ if (rdev->gart.pages_addr == NULL) {
+ radeon_gart_fini(rdev);
+ return -ENOMEM;
+ }
+ /* set GART entry to point to the dummy page by default */
+ for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
+ rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
+ }
+ return 0;
+}
+
+/**
+ * radeon_gart_fini - tear down the driver info for managing the gart
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the gart driver info and free the dummy page (all asics).
+ */
+void radeon_gart_fini(struct radeon_device *rdev)
+{
+ if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
+ /* unbind pages */
+ radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
+ }
+ rdev->gart.ready = false;
+ vfree(rdev->gart.pages);
+ vfree(rdev->gart.pages_addr);
+ rdev->gart.pages = NULL;
+ rdev->gart.pages_addr = NULL;
+
+ radeon_dummy_page_fini(rdev);
+}
+
+/*
+ * GPUVM
+ * GPUVM is similar to the legacy gart on older asics, however
+ * rather than there being a single global gart table
+ * for the entire GPU, there are multiple VM page tables active
+ * at any given time. The VM page tables can contain a mix
+ * vram pages and system memory pages and system memory pages
+ * can be mapped as snooped (cached system pages) or unsnooped
+ * (uncached system pages).
+ * Each VM has an ID associated with it and there is a page table
+ * associated with each VMID. When execting a command buffer,
+ * the kernel tells the the ring what VMID to use for that command
+ * buffer. VMIDs are allocated dynamically as commands are submitted.
+ * The userspace drivers maintain their own address space and the kernel
+ * sets up their pages tables accordingly when they submit their
+ * command buffers and a VMID is assigned.
+ * Cayman/Trinity support up to 8 active VMs at any given time;
+ * SI supports 16.
+ */
+
+/*
+ * vm helpers
+ *
+ * TODO bind a default page at vm initialization for default address
+ */
+
+/**
+ * radeon_vm_num_pde - return the number of page directory entries
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the number of page directory entries (cayman+).
+ */
+static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
+{
+ return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
+}
+
+/**
+ * radeon_vm_directory_size - returns the size of the page directory in bytes
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the size of the page directory in bytes (cayman+).
+ */
+static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
+{
+ return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
+}
+
+/**
+ * radeon_vm_manager_init - init the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Init the vm manager (cayman+).
+ * Returns 0 for success, error for failure.
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev)
+{
+ struct radeon_vm *vm;
+ struct radeon_bo_va *bo_va;
+ int r;
+ unsigned size;
+
+ if (!rdev->vm_manager.enabled) {
+ /* allocate enough for 2 full VM pts */
+ size = radeon_vm_directory_size(rdev);
+ size += rdev->vm_manager.max_pfn * 8;
+ size *= 2;
+ r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
+ RADEON_GPU_PAGE_ALIGN(size),
+ RADEON_GPU_PAGE_SIZE,
+ RADEON_GEM_DOMAIN_VRAM);
+ if (r) {
+ dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
+ (rdev->vm_manager.max_pfn * 8) >> 10);
+ return r;
+ }
+
+ r = radeon_asic_vm_init(rdev);
+ if (r)
+ return r;
+
+ rdev->vm_manager.enabled = true;
+
+ r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
+ if (r)
+ return r;
+ }
+
+ /* restore page table */
+ list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
+ if (vm->page_directory == NULL)
+ continue;
+
+ list_for_each_entry(bo_va, &vm->va, vm_list) {
+ bo_va->valid = false;
+ }
+ }
+ return 0;
+}
+
+/**
+ * radeon_vm_free_pt - free the page table for a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to unbind
+ *
+ * Free the page table of a specific vm (cayman+).
+ *
+ * Global and local mutex must be lock!
+ */
+static void radeon_vm_free_pt(struct radeon_device *rdev,
+ struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va;
+ int i;
+
+ if (!vm->page_directory)
+ return;
+
+ list_del_init(&vm->list);
+ radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+
+ list_for_each_entry(bo_va, &vm->va, vm_list) {
+ bo_va->valid = false;
+ }
+
+ if (vm->page_tables == NULL)
+ return;
+
+ for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
+ radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
+
+ kfree(vm->page_tables);
+}
+
+/**
+ * radeon_vm_manager_fini - tear down the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the VM manager (cayman+).
+ */
+void radeon_vm_manager_fini(struct radeon_device *rdev)
+{
+ struct radeon_vm *vm, *tmp;
+ int i;
+
+ if (!rdev->vm_manager.enabled)
+ return;
+
+ mutex_lock(&rdev->vm_manager.lock);
+ /* free all allocated page tables */
+ list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
+ mutex_lock(&vm->mutex);
+ radeon_vm_free_pt(rdev, vm);
+ mutex_unlock(&vm->mutex);
+ }
+ for (i = 0; i < RADEON_NUM_VM; ++i) {
+ radeon_fence_unref(&rdev->vm_manager.active[i]);
+ }
+ radeon_asic_vm_fini(rdev);
+ mutex_unlock(&rdev->vm_manager.lock);
+
+ radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
+ radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
+ rdev->vm_manager.enabled = false;
+}
+
+/**
+ * radeon_vm_evict - evict page table to make room for new one
+ *
+ * @rdev: radeon_device pointer
+ * @vm: VM we want to allocate something for
+ *
+ * Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
+ * Returns 0 for success, -ENOMEM for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ struct radeon_vm *vm_evict;
+
+ if (list_empty(&rdev->vm_manager.lru_vm))
+ return -ENOMEM;
+
+ vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
+ struct radeon_vm, list);
+ if (vm_evict == vm)
+ return -ENOMEM;
+
+ mutex_lock(&vm_evict->mutex);
+ radeon_vm_free_pt(rdev, vm_evict);
+ mutex_unlock(&vm_evict->mutex);
+ return 0;
+}
+
+/**
+ * radeon_vm_alloc_pt - allocates a page table for a VM
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to bind
+ *
+ * Allocate a page table for the requested vm (cayman+).
+ * Returns 0 for success, error for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ unsigned pd_size, pts_size;
+ u64 *pd_addr;
+ int r;
+
+ if (vm == NULL) {
+ return -EINVAL;
+ }
+
+ if (vm->page_directory != NULL) {
+ return 0;
+ }
+
+retry:
+ pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
+ r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
+ &vm->page_directory, pd_size,
+ RADEON_GPU_PAGE_SIZE, false);
+ if (r == -ENOMEM) {
+ r = radeon_vm_evict(rdev, vm);
+ if (r)
+ return r;
+ goto retry;
+
+ } else if (r) {
+ return r;
+ }
+
+ vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
+
+ /* Initially clear the page directory */
+ pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
+ memset(pd_addr, 0, pd_size);
+
+ pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
+ vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
+
+ if (vm->page_tables == NULL) {
+ DRM_ERROR("Cannot allocate memory for page table array\n");
+ radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * radeon_vm_add_to_lru - add VMs page table to LRU list
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to add to LRU
+ *
+ * Add the allocated page table to the LRU list (cayman+).
+ *
+ * Global mutex must be locked!
+ */
+void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ list_del_init(&vm->list);
+ list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+}
+
+/**
+ * radeon_vm_grab_id - allocate the next free VMID
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ *
+ * Allocate an id for the vm (cayman+).
+ * Returns the fence we need to sync to (if any).
+ *
+ * Global and local mutex must be locked!
+ */
+struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
+ struct radeon_vm *vm, int ring)
+{
+ struct radeon_fence *best[RADEON_NUM_RINGS] = {};
+ unsigned choices[2] = {};
+ unsigned i;
+
+ /* check if the id is still valid */
+ if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
+ return NULL;
+
+ /* we definately need to flush */
+ radeon_fence_unref(&vm->last_flush);
+
+ /* skip over VMID 0, since it is the system VM */
+ for (i = 1; i < rdev->vm_manager.nvm; ++i) {
+ struct radeon_fence *fence = rdev->vm_manager.active[i];
+
+ if (fence == NULL) {
+ /* found a free one */
+ vm->id = i;
+ return NULL;
+ }
+
+ if (radeon_fence_is_earlier(fence, best[fence->ring])) {
+ best[fence->ring] = fence;
+ choices[fence->ring == ring ? 0 : 1] = i;
+ }
+ }
+
+ for (i = 0; i < 2; ++i) {
+ if (choices[i]) {
+ vm->id = choices[i];
+ return rdev->vm_manager.active[choices[i]];
+ }
+ }
+
+ /* should never happen */
+ BUG();
+ return NULL;
+}
+
+/**
+ * radeon_vm_fence - remember fence for vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm we want to fence
+ * @fence: fence to remember
+ *
+ * Fence the vm (cayman+).
+ * Set the fence used to protect page table and id.
+ *
+ * Global and local mutex must be locked!
+ */
+void radeon_vm_fence(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_fence *fence)
+{
+ radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
+ rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
+
+ radeon_fence_unref(&vm->fence);
+ vm->fence = radeon_fence_ref(fence);
+}
+
+/**
+ * radeon_vm_bo_find - find the bo_va for a specific vm & bo
+ *
+ * @vm: requested vm
+ * @bo: requested buffer object
+ *
+ * Find @bo inside the requested vm (cayman+).
+ * Search inside the @bos vm list for the requested vm
+ * Returns the found bo_va or NULL if none is found
+ *
+ * Object has to be reserved!
+ */
+struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
+ struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va;
+
+ list_for_each_entry(bo_va, &bo->va, bo_list) {
+ if (bo_va->vm == vm) {
+ return bo_va;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * radeon_vm_bo_add - add a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Add @bo into the requested vm (cayman+).
+ * Add @bo to the list of bos associated with the vm
+ * Returns newly added bo_va or NULL for failure
+ *
+ * Object has to be reserved!
+ */
+struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va;
+
+ bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ if (bo_va == NULL) {
+ return NULL;
+ }
+ bo_va->vm = vm;
+ bo_va->bo = bo;
+ bo_va->soffset = 0;
+ bo_va->eoffset = 0;
+ bo_va->flags = 0;
+ bo_va->valid = false;
+ bo_va->ref_count = 1;
+ INIT_LIST_HEAD(&bo_va->bo_list);
+ INIT_LIST_HEAD(&bo_va->vm_list);
+
+ mutex_lock(&vm->mutex);
+ list_add(&bo_va->vm_list, &vm->va);
+ list_add_tail(&bo_va->bo_list, &bo->va);
+ mutex_unlock(&vm->mutex);
+
+ return bo_va;
+}
+
+/**
+ * radeon_vm_bo_set_addr - set bos virtual address inside a vm
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: bo_va to store the address
+ * @soffset: requested offset of the buffer in the VM address space
+ * @flags: attributes of pages (read/write/valid/etc.)
+ *
+ * Set offset of @bo_va (cayman+).
+ * Validate and set the offset requested within the vm address space.
+ * Returns 0 for success, error for failure.
+ *
+ * Object has to be reserved!
+ */
+int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va,
+ uint64_t soffset,
+ uint32_t flags)
+{
+ uint64_t size = radeon_bo_size(bo_va->bo);
+ uint64_t eoffset, last_offset = 0;
+ struct radeon_vm *vm = bo_va->vm;
+ struct radeon_bo_va *tmp;
+ struct list_head *head;
+ unsigned last_pfn;
+
+ if (soffset) {
+ /* make sure object fit at this offset */
+ eoffset = soffset + size;
+ if (soffset >= eoffset) {
+ return -EINVAL;
+ }
+
+ last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
+ if (last_pfn > rdev->vm_manager.max_pfn) {
+ dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+ last_pfn, rdev->vm_manager.max_pfn);
+ return -EINVAL;
+ }
+
+ } else {
+ eoffset = last_pfn = 0;
+ }
+
+ mutex_lock(&vm->mutex);
+ head = &vm->va;
+ last_offset = 0;
+ list_for_each_entry(tmp, &vm->va, vm_list) {
+ if (bo_va == tmp) {
+ /* skip over currently modified bo */
+ continue;
+ }
+
+ if (soffset >= last_offset && eoffset <= tmp->soffset) {
+ /* bo can be added before this one */
+ break;
+ }
+ if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
+ /* bo and tmp overlap, invalid offset */
+ dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
+ bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
+ (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
+ mutex_unlock(&vm->mutex);
+ return -EINVAL;
+ }
+ last_offset = tmp->eoffset;
+ head = &tmp->vm_list;
+ }
+
+ bo_va->soffset = soffset;
+ bo_va->eoffset = eoffset;
+ bo_va->flags = flags;
+ bo_va->valid = false;
+ list_move(&bo_va->vm_list, head);
+
+ mutex_unlock(&vm->mutex);
+ return 0;
+}
+
+/**
+ * radeon_vm_map_gart - get the physical address of a gart page
+ *
+ * @rdev: radeon_device pointer
+ * @addr: the unmapped addr
+ *
+ * Look up the physical address of the page that the pte resolves
+ * to (cayman+).
+ * Returns the physical address of the page.
+ */
+uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
+{
+ uint64_t result;
+
+ /* page table offset */
+ result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
+
+ /* in case cpu page size != gpu page size*/
+ result |= addr & (~PAGE_MASK);
+
+ return result;
+}
+
+/**
+ * radeon_vm_update_pdes - make sure that page directory is valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory (cayman+).
+ * Returns 0 for success, error for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+static int radeon_vm_update_pdes(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_ib *ib,
+ uint64_t start, uint64_t end)
+{
+ static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
+
+ uint64_t last_pde = ~0, last_pt = ~0;
+ unsigned count = 0;
+ uint64_t pt_idx;
+ int r;
+
+ start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+ end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+
+ /* walk over the address space and update the page directory */
+ for (pt_idx = start; pt_idx <= end; ++pt_idx) {
+ uint64_t pde, pt;
+
+ if (vm->page_tables[pt_idx])
+ continue;
+
+retry:
+ r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
+ &vm->page_tables[pt_idx],
+ RADEON_VM_PTE_COUNT * 8,
+ RADEON_GPU_PAGE_SIZE, false);
+
+ if (r == -ENOMEM) {
+ r = radeon_vm_evict(rdev, vm);
+ if (r)
+ return r;
+ goto retry;
+ } else if (r) {
+ return r;
+ }
+
+ pde = vm->pd_gpu_addr + pt_idx * 8;
+
+ pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
+
+ if (((last_pde + 8 * count) != pde) ||
+ ((last_pt + incr * count) != pt)) {
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, ib, last_pde,
+ last_pt, count, incr,
+ RADEON_VM_PAGE_VALID);
+ }
+
+ count = 1;
+ last_pde = pde;
+ last_pt = pt;
+ } else {
+ ++count;
+ }
+ }
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
+ incr, RADEON_VM_PAGE_VALID);
+
+ }
+
+ return 0;
+}
+
+/**
+ * radeon_vm_update_ptes - make sure that page tables are valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @dst: destination address to map to
+ * @flags: mapping flags
+ *
+ * Update the page tables in the range @start - @end (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+static void radeon_vm_update_ptes(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_ib *ib,
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint32_t flags)
+{
+ static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
+
+ uint64_t last_pte = ~0, last_dst = ~0;
+ unsigned count = 0;
+ uint64_t addr;
+
+ start = start / RADEON_GPU_PAGE_SIZE;
+ end = end / RADEON_GPU_PAGE_SIZE;
+
+ /* walk over the address space and update the page tables */
+ for (addr = start; addr < end; ) {
+ uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
+ unsigned nptes;
+ uint64_t pte;
+
+ if ((addr & ~mask) == (end & ~mask))
+ nptes = end - addr;
+ else
+ nptes = RADEON_VM_PTE_COUNT - (addr & mask);
+
+ pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
+ pte += (addr & mask) * 8;
+
+ if ((last_pte + 8 * count) != pte) {
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, ib, last_pte,
+ last_dst, count,
+ RADEON_GPU_PAGE_SIZE,
+ flags);
+ }
+
+ count = nptes;
+ last_pte = pte;
+ last_dst = dst;
+ } else {
+ count += nptes;
+ }
+
+ addr += nptes;
+ dst += nptes * RADEON_GPU_PAGE_SIZE;
+ }
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, ib, last_pte,
+ last_dst, count,
+ RADEON_GPU_PAGE_SIZE, flags);
+ }
+}
+
+/**
+ * radeon_vm_bo_update_pte - map a bo into the vm page table
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ * @mem: ttm mem
+ *
+ * Fill in the page table entries for @bo (cayman+).
+ * Returns 0 for success, -EINVAL for failure.
+ *
+ * Object have to be reserved & global and local mutex must be locked!
+ */
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ struct ttm_mem_reg *mem)
+{
+ unsigned ridx = rdev->asic->vm.pt_ring_index;
+ struct radeon_ib ib;
+ struct radeon_bo_va *bo_va;
+ unsigned nptes, npdes, ndw;
+ uint64_t addr;
+ int r;
+
+ /* nothing to do if vm isn't bound */
+ if (vm->page_directory == NULL)
+ return 0;
+
+ bo_va = radeon_vm_bo_find(vm, bo);
+ if (bo_va == NULL) {
+ dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+ return -EINVAL;
+ }
+
+ if (!bo_va->soffset) {
+ dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
+ bo, vm);
+ return -EINVAL;
+ }
+
+ if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
+ return 0;
+
+ bo_va->flags &= ~RADEON_VM_PAGE_VALID;
+ bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
+ if (mem) {
+ addr = mem->start << PAGE_SHIFT;
+ if (mem->mem_type != TTM_PL_SYSTEM) {
+ bo_va->flags |= RADEON_VM_PAGE_VALID;
+ bo_va->valid = true;
+ }
+ if (mem->mem_type == TTM_PL_TT) {
+ bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+ } else {
+ addr += rdev->vm_manager.vram_base_offset;
+ }
+ } else {
+ addr = 0;
+ bo_va->valid = false;
+ }
+
+ nptes = radeon_bo_ngpu_pages(bo);
+
+ /* assume two extra pdes in case the mapping overlaps the borders */
+ npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
+
+ /* padding, etc. */
+ ndw = 64;
+
+ if (RADEON_VM_BLOCK_SIZE > 11)
+ /* reserve space for one header for every 2k dwords */
+ ndw += (nptes >> 11) * 4;
+ else
+ /* reserve space for one header for
+ every (1 << BLOCK_SIZE) entries */
+ ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
+
+ /* reserve space for pte addresses */
+ ndw += nptes * 2;
+
+ /* reserve space for one header for every 2k dwords */
+ ndw += (npdes >> 11) * 4;
+
+ /* reserve space for pde addresses */
+ ndw += npdes * 2;
+
+ /* update too big for an IB */
+ if (ndw > 0xfffff)
+ return -ENOMEM;
+
+ r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4);
+ ib.length_dw = 0;
+
+ r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ return r;
+ }
+
+ radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
+ addr, bo_va->flags);
+
+ radeon_ib_sync_to(&ib, vm->fence);
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ return r;
+ }
+ radeon_fence_unref(&vm->fence);
+ vm->fence = radeon_fence_ref(ib.fence);
+ radeon_ib_free(rdev, &ib);
+ radeon_fence_unref(&vm->last_flush);
+
+ return 0;
+}
+
+/**
+ * radeon_vm_bo_rmv - remove a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: requested bo_va
+ *
+ * Remove @bo_va->bo from the requested vm (cayman+).
+ * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
+ * remove the ptes for @bo_va in the page table.
+ * Returns 0 for success.
+ *
+ * Object have to be reserved!
+ */
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va)
+{
+ int r = 0;
+
+ mutex_lock(&rdev->vm_manager.lock);
+ mutex_lock(&bo_va->vm->mutex);
+ if (bo_va->soffset) {
+ r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
+ }
+ mutex_unlock(&rdev->vm_manager.lock);
+ list_del(&bo_va->vm_list);
+ mutex_unlock(&bo_va->vm->mutex);
+ list_del(&bo_va->bo_list);
+
+ kfree(bo_va);
+ return r;
+}
+
+/**
+ * radeon_vm_bo_invalidate - mark the bo as invalid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Mark @bo as invalid (cayman+).
+ */
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+ struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va;
+
+ list_for_each_entry(bo_va, &bo->va, bo_list) {
+ bo_va->valid = false;
+ }
+}
+
+/**
+ * radeon_vm_init - initialize a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Init @vm fields (cayman+).
+ */
+void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ vm->id = 0;
+ vm->fence = NULL;
+ mutex_init(&vm->mutex);
+ INIT_LIST_HEAD(&vm->list);
+ INIT_LIST_HEAD(&vm->va);
+}
+
+/**
+ * radeon_vm_fini - tear down a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Tear down @vm (cayman+).
+ * Unbind the VM and remove all bos from the vm bo list
+ */
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ int r;
+
+ mutex_lock(&rdev->vm_manager.lock);
+ mutex_lock(&vm->mutex);
+ radeon_vm_free_pt(rdev, vm);
+ mutex_unlock(&rdev->vm_manager.lock);
+
+ if (!list_empty(&vm->va)) {
+ dev_err(rdev->dev, "still active bo inside vm\n");
+ }
+ list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
+ list_del_init(&bo_va->vm_list);
+ r = radeon_bo_reserve(bo_va->bo, false);
+ if (!r) {
+ list_del_init(&bo_va->bo_list);
+ radeon_bo_unreserve(bo_va->bo);
+ kfree(bo_va);
+ }
+ }
+ radeon_fence_unref(&vm->fence);
+ radeon_fence_unref(&vm->last_flush);
+ mutex_unlock(&vm->mutex);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
new file mode 100644
index 0000000..aa79603
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -0,0 +1,627 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+int radeon_gem_object_init(struct drm_gem_object *obj)
+{
+ BUG();
+
+ return 0;
+}
+
+void radeon_gem_object_free(struct drm_gem_object *gobj)
+{
+ struct radeon_bo *robj = gem_to_radeon_bo(gobj);
+
+ if (robj) {
+ if (robj->gem_base.import_attach)
+ drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
+ radeon_bo_unref(&robj);
+ }
+}
+
+int radeon_gem_object_create(struct radeon_device *rdev, int size,
+ int alignment, int initial_domain,
+ bool discardable, bool kernel,
+ struct drm_gem_object **obj)
+{
+ struct radeon_bo *robj;
+ unsigned long max_size;
+ int r;
+
+ *obj = NULL;
+ /* At least align on page size */
+ if (alignment < PAGE_SIZE) {
+ alignment = PAGE_SIZE;
+ }
+
+ /* maximun bo size is the minimun btw visible vram and gtt size */
+ max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
+ if (size > max_size) {
+ printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
+ __func__, __LINE__, size >> 20, max_size >> 20);
+ return -ENOMEM;
+ }
+
+retry:
+ r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
+ if (r) {
+ if (r != -ERESTARTSYS) {
+ if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
+ initial_domain |= RADEON_GEM_DOMAIN_GTT;
+ goto retry;
+ }
+ DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
+ size, initial_domain, alignment, r);
+ }
+ return r;
+ }
+ *obj = &robj->gem_base;
+ robj->pid = task_pid_nr(current);
+
+ mutex_lock(&rdev->gem.mutex);
+ list_add_tail(&robj->list, &rdev->gem.objects);
+ mutex_unlock(&rdev->gem.mutex);
+
+ return 0;
+}
+
+int radeon_gem_set_domain(struct drm_gem_object *gobj,
+ uint32_t rdomain, uint32_t wdomain)
+{
+ struct radeon_bo *robj;
+ uint32_t domain;
+ int r;
+
+ /* FIXME: reeimplement */
+ robj = gem_to_radeon_bo(gobj);
+ /* work out where to validate the buffer to */
+ domain = wdomain;
+ if (!domain) {
+ domain = rdomain;
+ }
+ if (!domain) {
+ /* Do nothings */
+ printk(KERN_WARNING "Set domain without domain !\n");
+ return 0;
+ }
+ if (domain == RADEON_GEM_DOMAIN_CPU) {
+ /* Asking for cpu access wait for object idle */
+ r = radeon_bo_wait(robj, NULL, false);
+ if (r) {
+ printk(KERN_ERR "Failed to wait for object !\n");
+ return r;
+ }
+ }
+ return 0;
+}
+
+int radeon_gem_init(struct radeon_device *rdev)
+{
+ INIT_LIST_HEAD(&rdev->gem.objects);
+ return 0;
+}
+
+void radeon_gem_fini(struct radeon_device *rdev)
+{
+ radeon_bo_force_delete(rdev);
+}
+
+/*
+ * Call from drm_gem_handle_create which appear in both new and open ioctl
+ * case.
+ */
+int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+ struct radeon_bo *rbo = gem_to_radeon_bo(obj);
+ struct radeon_device *rdev = rbo->rdev;
+ struct radeon_fpriv *fpriv = file_priv->driver_priv;
+ struct radeon_vm *vm = &fpriv->vm;
+ struct radeon_bo_va *bo_va;
+ int r;
+
+ if (rdev->family < CHIP_CAYMAN) {
+ return 0;
+ }
+
+ r = radeon_bo_reserve(rbo, false);
+ if (r) {
+ return r;
+ }
+
+ bo_va = radeon_vm_bo_find(vm, rbo);
+ if (!bo_va) {
+ bo_va = radeon_vm_bo_add(rdev, vm, rbo);
+ } else {
+ ++bo_va->ref_count;
+ }
+ radeon_bo_unreserve(rbo);
+
+ return 0;
+}
+
+void radeon_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
+{
+ struct radeon_bo *rbo = gem_to_radeon_bo(obj);
+ struct radeon_device *rdev = rbo->rdev;
+ struct radeon_fpriv *fpriv = file_priv->driver_priv;
+ struct radeon_vm *vm = &fpriv->vm;
+ struct radeon_bo_va *bo_va;
+ int r;
+
+ if (rdev->family < CHIP_CAYMAN) {
+ return;
+ }
+
+ r = radeon_bo_reserve(rbo, true);
+ if (r) {
+ dev_err(rdev->dev, "leaking bo va because "
+ "we fail to reserve bo (%d)\n", r);
+ return;
+ }
+ bo_va = radeon_vm_bo_find(vm, rbo);
+ if (bo_va) {
+ if (--bo_va->ref_count == 0) {
+ radeon_vm_bo_rmv(rdev, bo_va);
+ }
+ }
+ radeon_bo_unreserve(rbo);
+}
+
+static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
+{
+ if (r == -EDEADLK) {
+ r = radeon_gpu_reset(rdev);
+ if (!r)
+ r = -EAGAIN;
+ }
+ return r;
+}
+
+/*
+ * GEM ioctls.
+ */
+int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_radeon_gem_info *args = data;
+ struct ttm_mem_type_manager *man;
+ unsigned i;
+
+ man = &rdev->mman.bdev.man[TTM_PL_VRAM];
+
+ args->vram_size = rdev->mc.real_vram_size;
+ args->vram_visible = (u64)man->size << PAGE_SHIFT;
+ if (rdev->stollen_vga_memory)
+ args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
+ args->vram_visible -= radeon_fbdev_total_size(rdev);
+ args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
+ for(i = 0; i < RADEON_NUM_RINGS; ++i)
+ args->gart_size -= rdev->ring[i].ring_size;
+ return 0;
+}
+
+int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ /* TODO: implement */
+ DRM_ERROR("unimplemented %s\n", __func__);
+ return -ENOSYS;
+}
+
+int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ /* TODO: implement */
+ DRM_ERROR("unimplemented %s\n", __func__);
+ return -ENOSYS;
+}
+
+int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_radeon_gem_create *args = data;
+ struct drm_gem_object *gobj;
+ uint32_t handle;
+ int r;
+
+ down_read(&rdev->exclusive_lock);
+ /* create a gem object to contain this object in */
+ args->size = roundup(args->size, PAGE_SIZE);
+ r = radeon_gem_object_create(rdev, args->size, args->alignment,
+ args->initial_domain, false,
+ false, &gobj);
+ if (r) {
+ up_read(&rdev->exclusive_lock);
+ r = radeon_gem_handle_lockup(rdev, r);
+ return r;
+ }
+ r = drm_gem_handle_create(filp, gobj, &handle);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(gobj);
+ if (r) {
+ up_read(&rdev->exclusive_lock);
+ r = radeon_gem_handle_lockup(rdev, r);
+ return r;
+ }
+ args->handle = handle;
+ up_read(&rdev->exclusive_lock);
+ return 0;
+}
+
+int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ /* transition the BO to a domain -
+ * just validate the BO into a certain domain */
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_radeon_gem_set_domain *args = data;
+ struct drm_gem_object *gobj;
+ struct radeon_bo *robj;
+ int r;
+
+ /* for now if someone requests domain CPU -
+ * just make sure the buffer is finished with */
+ down_read(&rdev->exclusive_lock);
+
+ /* just do a BO wait for now */
+ gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (gobj == NULL) {
+ up_read(&rdev->exclusive_lock);
+ return -ENOENT;
+ }
+ robj = gem_to_radeon_bo(gobj);
+
+ r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
+
+ drm_gem_object_unreference_unlocked(gobj);
+ up_read(&rdev->exclusive_lock);
+ r = radeon_gem_handle_lockup(robj->rdev, r);
+ return r;
+}
+
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p)
+{
+ struct drm_gem_object *gobj;
+ struct radeon_bo *robj;
+
+ gobj = drm_gem_object_lookup(dev, filp, handle);
+ if (gobj == NULL) {
+ return -ENOENT;
+ }
+ robj = gem_to_radeon_bo(gobj);
+ *offset_p = radeon_bo_mmap_offset(robj);
+ drm_gem_object_unreference_unlocked(gobj);
+ return 0;
+}
+
+int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_radeon_gem_mmap *args = data;
+
+ return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
+}
+
+int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_radeon_gem_busy *args = data;
+ struct drm_gem_object *gobj;
+ struct radeon_bo *robj;
+ int r;
+ uint32_t cur_placement = 0;
+
+ gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (gobj == NULL) {
+ return -ENOENT;
+ }
+ robj = gem_to_radeon_bo(gobj);
+ r = radeon_bo_wait(robj, &cur_placement, true);
+ switch (cur_placement) {
+ case TTM_PL_VRAM:
+ args->domain = RADEON_GEM_DOMAIN_VRAM;
+ break;
+ case TTM_PL_TT:
+ args->domain = RADEON_GEM_DOMAIN_GTT;
+ break;
+ case TTM_PL_SYSTEM:
+ args->domain = RADEON_GEM_DOMAIN_CPU;
+ default:
+ break;
+ }
+ drm_gem_object_unreference_unlocked(gobj);
+ r = radeon_gem_handle_lockup(rdev, r);
+ return r;
+}
+
+int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_radeon_gem_wait_idle *args = data;
+ struct drm_gem_object *gobj;
+ struct radeon_bo *robj;
+ int r;
+
+ gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (gobj == NULL) {
+ return -ENOENT;
+ }
+ robj = gem_to_radeon_bo(gobj);
+ r = radeon_bo_wait(robj, NULL, false);
+ /* callback hw specific functions if any */
+ if (rdev->asic->ioctl_wait_idle)
+ robj->rdev->asic->ioctl_wait_idle(rdev, robj);
+ drm_gem_object_unreference_unlocked(gobj);
+ r = radeon_gem_handle_lockup(rdev, r);
+ return r;
+}
+
+int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_radeon_gem_set_tiling *args = data;
+ struct drm_gem_object *gobj;
+ struct radeon_bo *robj;
+ int r = 0;
+
+ DRM_DEBUG("%d \n", args->handle);
+ gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (gobj == NULL)
+ return -ENOENT;
+ robj = gem_to_radeon_bo(gobj);
+ r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
+ drm_gem_object_unreference_unlocked(gobj);
+ return r;
+}
+
+int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_radeon_gem_get_tiling *args = data;
+ struct drm_gem_object *gobj;
+ struct radeon_bo *rbo;
+ int r = 0;
+
+ DRM_DEBUG("\n");
+ gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (gobj == NULL)
+ return -ENOENT;
+ rbo = gem_to_radeon_bo(gobj);
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ goto out;
+ radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
+ radeon_bo_unreserve(rbo);
+out:
+ drm_gem_object_unreference_unlocked(gobj);
+ return r;
+}
+
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_radeon_gem_va *args = data;
+ struct drm_gem_object *gobj;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_fpriv *fpriv = filp->driver_priv;
+ struct radeon_bo *rbo;
+ struct radeon_bo_va *bo_va;
+ u32 invalid_flags;
+ int r = 0;
+
+ if (!rdev->vm_manager.enabled) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -ENOTTY;
+ }
+
+ /* !! DONT REMOVE !!
+ * We don't support vm_id yet, to be sure we don't have have broken
+ * userspace, reject anyone trying to use non 0 value thus moving
+ * forward we can use those fields without breaking existant userspace
+ */
+ if (args->vm_id) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+
+ if (args->offset < RADEON_VA_RESERVED_SIZE) {
+ dev_err(&dev->pdev->dev,
+ "offset 0x%lX is in reserved area 0x%X\n",
+ (unsigned long)args->offset,
+ RADEON_VA_RESERVED_SIZE);
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+
+ /* don't remove, we need to enforce userspace to set the snooped flag
+ * otherwise we will endup with broken userspace and we won't be able
+ * to enable this feature without adding new interface
+ */
+ invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
+ if ((args->flags & invalid_flags)) {
+ dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
+ args->flags, invalid_flags);
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+ if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
+ dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+
+ switch (args->operation) {
+ case RADEON_VA_MAP:
+ case RADEON_VA_UNMAP:
+ break;
+ default:
+ dev_err(&dev->pdev->dev, "unsupported operation %d\n",
+ args->operation);
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+
+ gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (gobj == NULL) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -ENOENT;
+ }
+ rbo = gem_to_radeon_bo(gobj);
+ r = radeon_bo_reserve(rbo, false);
+ if (r) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ drm_gem_object_unreference_unlocked(gobj);
+ return r;
+ }
+ bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
+ if (!bo_va) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ drm_gem_object_unreference_unlocked(gobj);
+ return -ENOENT;
+ }
+
+ switch (args->operation) {
+ case RADEON_VA_MAP:
+ if (bo_va->soffset) {
+ args->operation = RADEON_VA_RESULT_VA_EXIST;
+ args->offset = bo_va->soffset;
+ goto out;
+ }
+ r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
+ break;
+ case RADEON_VA_UNMAP:
+ r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
+ break;
+ default:
+ break;
+ }
+ args->operation = RADEON_VA_RESULT_OK;
+ if (r) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ }
+out:
+ radeon_bo_unreserve(rbo);
+ drm_gem_object_unreference_unlocked(gobj);
+ return r;
+}
+
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_gem_object *gobj;
+ uint32_t handle;
+ int r;
+
+ args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+ args->size = args->pitch * args->height;
+ args->size = ALIGN(args->size, PAGE_SIZE);
+
+ r = radeon_gem_object_create(rdev, args->size, 0,
+ RADEON_GEM_DOMAIN_VRAM,
+ false, ttm_bo_type_device,
+ &gobj);
+ if (r)
+ return -ENOMEM;
+
+ r = drm_gem_handle_create(file_priv, gobj, &handle);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(gobj);
+ if (r) {
+ return r;
+ }
+ args->handle = handle;
+ return 0;
+}
+
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file_priv, handle);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_bo *rbo;
+ unsigned i = 0;
+
+ mutex_lock(&rdev->gem.mutex);
+ list_for_each_entry(rbo, &rdev->gem.objects, list) {
+ unsigned domain;
+ const char *placement;
+
+ domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
+ switch (domain) {
+ case RADEON_GEM_DOMAIN_VRAM:
+ placement = "VRAM";
+ break;
+ case RADEON_GEM_DOMAIN_GTT:
+ placement = " GTT";
+ break;
+ case RADEON_GEM_DOMAIN_CPU:
+ default:
+ placement = " CPU";
+ break;
+ }
+ seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
+ i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
+ placement, (unsigned long)rbo->pid);
+ i++;
+ }
+ mutex_unlock(&rdev->gem.mutex);
+ return 0;
+}
+
+static struct drm_info_list radeon_debugfs_gem_list[] = {
+ {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
+};
+#endif
+
+int radeon_gem_debugfs_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
+#endif
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
new file mode 100644
index 0000000..fc60b74
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -0,0 +1,1188 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include <linux/export.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+
+extern int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num);
+extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
+
+/**
+ * radeon_ddc_probe
+ *
+ */
+bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
+{
+ u8 out = 0x0;
+ u8 buf[8];
+ int ret;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &out,
+ },
+ {
+ .addr = DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = 8,
+ .buf = buf,
+ }
+ };
+
+ /* on hw with routers, select right port */
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
+
+ if (use_aux) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+ ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2);
+ } else {
+ ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
+ }
+
+ if (ret != 2)
+ /* Couldn't find an accessible DDC on this connector */
+ return false;
+ /* Probe also for valid EDID header
+ * EDID header starts with:
+ * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
+ * Only the first 6 bytes must be valid as
+ * drm_edid_block_valid() can fix the last 2 bytes */
+ if (drm_edid_header_is_valid(buf) < 6) {
+ /* Couldn't find an accessible EDID on this
+ * connector */
+ return false;
+ }
+ return true;
+}
+
+/* bit banging i2c */
+
+static int pre_xfer(struct i2c_adapter *i2c_adap)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ uint32_t temp;
+
+ /* RV410 appears to have a bug where the hw i2c in reset
+ * holds the i2c port in a bad state - switch hw i2c away before
+ * doing DDC - do this for all r200s/r300s/r400s for safety sake
+ */
+ if (rec->hw_capable) {
+ if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
+ u32 reg;
+
+ if (rdev->family >= CHIP_RV350)
+ reg = RADEON_GPIO_MONID;
+ else if ((rdev->family == CHIP_R300) ||
+ (rdev->family == CHIP_R350))
+ reg = RADEON_GPIO_DVI_DDC;
+ else
+ reg = RADEON_GPIO_CRT2_DDC;
+
+ mutex_lock(&rdev->dc_hw_i2c_mutex);
+ if (rec->a_clk_reg == reg) {
+ WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+ R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
+ } else {
+ WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+ R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
+ }
+ mutex_unlock(&rdev->dc_hw_i2c_mutex);
+ }
+ }
+
+ /* switch the pads to ddc mode */
+ if (ASIC_IS_DCE3(rdev) && rec->hw_capable) {
+ temp = RREG32(rec->mask_clk_reg);
+ temp &= ~(1 << 16);
+ WREG32(rec->mask_clk_reg, temp);
+ }
+
+ /* clear the output pin values */
+ temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
+ WREG32(rec->a_clk_reg, temp);
+
+ temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
+ WREG32(rec->a_data_reg, temp);
+
+ /* set the pins to input */
+ temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+ WREG32(rec->en_clk_reg, temp);
+
+ temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+ WREG32(rec->en_data_reg, temp);
+
+ /* mask the gpio pins for software use */
+ temp = RREG32(rec->mask_clk_reg) | rec->mask_clk_mask;
+ WREG32(rec->mask_clk_reg, temp);
+ temp = RREG32(rec->mask_clk_reg);
+
+ temp = RREG32(rec->mask_data_reg) | rec->mask_data_mask;
+ WREG32(rec->mask_data_reg, temp);
+ temp = RREG32(rec->mask_data_reg);
+
+ return 0;
+}
+
+static void post_xfer(struct i2c_adapter *i2c_adap)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ uint32_t temp;
+
+ /* unmask the gpio pins for software use */
+ temp = RREG32(rec->mask_clk_reg) & ~rec->mask_clk_mask;
+ WREG32(rec->mask_clk_reg, temp);
+ temp = RREG32(rec->mask_clk_reg);
+
+ temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask;
+ WREG32(rec->mask_data_reg, temp);
+ temp = RREG32(rec->mask_data_reg);
+}
+
+static int get_clock(void *i2c_priv)
+{
+ struct radeon_i2c_chan *i2c = i2c_priv;
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ uint32_t val;
+
+ /* read the value off the pin */
+ val = RREG32(rec->y_clk_reg);
+ val &= rec->y_clk_mask;
+
+ return (val != 0);
+}
+
+
+static int get_data(void *i2c_priv)
+{
+ struct radeon_i2c_chan *i2c = i2c_priv;
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ uint32_t val;
+
+ /* read the value off the pin */
+ val = RREG32(rec->y_data_reg);
+ val &= rec->y_data_mask;
+
+ return (val != 0);
+}
+
+static void set_clock(void *i2c_priv, int clock)
+{
+ struct radeon_i2c_chan *i2c = i2c_priv;
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ uint32_t val;
+
+ /* set pin direction */
+ val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+ val |= clock ? 0 : rec->en_clk_mask;
+ WREG32(rec->en_clk_reg, val);
+}
+
+static void set_data(void *i2c_priv, int data)
+{
+ struct radeon_i2c_chan *i2c = i2c_priv;
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ uint32_t val;
+
+ /* set pin direction */
+ val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+ val |= data ? 0 : rec->en_data_mask;
+ WREG32(rec->en_data_reg, val);
+}
+
+/* hw i2c */
+
+static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
+{
+ u32 sclk = rdev->pm.current_sclk;
+ u32 prescale = 0;
+ u32 nm;
+ u8 n, m, loop;
+ int i2c_clock;
+
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ case CHIP_R200:
+ case CHIP_RV250:
+ case CHIP_RS300:
+ case CHIP_RV280:
+ case CHIP_R300:
+ case CHIP_R350:
+ case CHIP_RV350:
+ i2c_clock = 60;
+ nm = (sclk * 10) / (i2c_clock * 4);
+ for (loop = 1; loop < 255; loop++) {
+ if ((nm / loop) < loop)
+ break;
+ }
+ n = loop - 1;
+ m = loop - 2;
+ prescale = m | (n << 8);
+ break;
+ case CHIP_RV380:
+ case CHIP_RS400:
+ case CHIP_RS480:
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
+ prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
+ break;
+ case CHIP_RS600:
+ case CHIP_RS690:
+ case CHIP_RS740:
+ /* todo */
+ break;
+ case CHIP_RV515:
+ case CHIP_R520:
+ case CHIP_RV530:
+ case CHIP_RV560:
+ case CHIP_RV570:
+ case CHIP_R580:
+ i2c_clock = 50;
+ if (rdev->family == CHIP_R520)
+ prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock));
+ else
+ prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
+ break;
+ case CHIP_R600:
+ case CHIP_RV610:
+ case CHIP_RV630:
+ case CHIP_RV670:
+ /* todo */
+ break;
+ case CHIP_RV620:
+ case CHIP_RV635:
+ case CHIP_RS780:
+ case CHIP_RS880:
+ case CHIP_RV770:
+ case CHIP_RV730:
+ case CHIP_RV710:
+ case CHIP_RV740:
+ /* todo */
+ break;
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ case CHIP_JUNIPER:
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ /* todo */
+ break;
+ default:
+ DRM_ERROR("i2c: unhandled radeon chip\n");
+ break;
+ }
+ return prescale;
+}
+
+
+/* hw i2c engine for r1xx-4xx hardware
+ * hw can buffer up to 15 bytes
+ */
+static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ struct i2c_msg *p;
+ int i, j, k, ret = num;
+ u32 prescale;
+ u32 i2c_cntl_0, i2c_cntl_1, i2c_data;
+ u32 tmp, reg;
+
+ mutex_lock(&rdev->dc_hw_i2c_mutex);
+ /* take the pm lock since we need a constant sclk */
+ mutex_lock(&rdev->pm.mutex);
+
+ prescale = radeon_get_i2c_prescale(rdev);
+
+ reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) |
+ RADEON_I2C_DRIVE_EN |
+ RADEON_I2C_START |
+ RADEON_I2C_STOP |
+ RADEON_I2C_GO);
+
+ if (rdev->is_atom_bios) {
+ tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+ WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
+ }
+
+ if (rec->mm_i2c) {
+ i2c_cntl_0 = RADEON_I2C_CNTL_0;
+ i2c_cntl_1 = RADEON_I2C_CNTL_1;
+ i2c_data = RADEON_I2C_DATA;
+ } else {
+ i2c_cntl_0 = RADEON_DVI_I2C_CNTL_0;
+ i2c_cntl_1 = RADEON_DVI_I2C_CNTL_1;
+ i2c_data = RADEON_DVI_I2C_DATA;
+
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ case CHIP_RS300:
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_DVI_DDC:
+ /* no gpio select bit */
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ case CHIP_R200:
+ /* only bit 4 on r200 */
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_DVI_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+ break;
+ case RADEON_GPIO_MONID:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ case CHIP_RV250:
+ case CHIP_RV280:
+ /* bits 3 and 4 */
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_DVI_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+ break;
+ case RADEON_GPIO_VGA_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
+ break;
+ case RADEON_GPIO_CRT2_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ case CHIP_R300:
+ case CHIP_R350:
+ /* only bit 4 on r300/r350 */
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_VGA_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+ break;
+ case RADEON_GPIO_DVI_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ case CHIP_RV350:
+ case CHIP_RV380:
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
+ case CHIP_RS400:
+ case CHIP_RS480:
+ /* bits 3 and 4 */
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_VGA_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+ break;
+ case RADEON_GPIO_DVI_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
+ break;
+ case RADEON_GPIO_MONID:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ default:
+ DRM_ERROR("unsupported asic\n");
+ ret = -EINVAL;
+ goto done;
+ break;
+ }
+ }
+
+ /* check for bus probe */
+ p = &msgs[0];
+ if ((num == 1) && (p->len == 0)) {
+ WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+ RADEON_I2C_NACK |
+ RADEON_I2C_HALT |
+ RADEON_I2C_SOFT_RST));
+ WREG32(i2c_data, (p->addr << 1) & 0xff);
+ WREG32(i2c_data, 0);
+ WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+ (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+ RADEON_I2C_EN |
+ (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+ WREG32(i2c_cntl_0, reg);
+ for (k = 0; k < 32; k++) {
+ udelay(10);
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_GO)
+ continue;
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+ WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ goto done;
+ }
+
+ for (i = 0; i < num; i++) {
+ p = &msgs[i];
+ for (j = 0; j < p->len; j++) {
+ if (p->flags & I2C_M_RD) {
+ WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+ RADEON_I2C_NACK |
+ RADEON_I2C_HALT |
+ RADEON_I2C_SOFT_RST));
+ WREG32(i2c_data, ((p->addr << 1) & 0xff) | 0x1);
+ WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+ (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+ RADEON_I2C_EN |
+ (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+ WREG32(i2c_cntl_0, reg | RADEON_I2C_RECEIVE);
+ for (k = 0; k < 32; k++) {
+ udelay(10);
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_GO)
+ continue;
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c read error 0x%08x\n", tmp);
+ WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ p->buf[j] = RREG32(i2c_data) & 0xff;
+ } else {
+ WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+ RADEON_I2C_NACK |
+ RADEON_I2C_HALT |
+ RADEON_I2C_SOFT_RST));
+ WREG32(i2c_data, (p->addr << 1) & 0xff);
+ WREG32(i2c_data, p->buf[j]);
+ WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+ (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+ RADEON_I2C_EN |
+ (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+ WREG32(i2c_cntl_0, reg);
+ for (k = 0; k < 32; k++) {
+ udelay(10);
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_GO)
+ continue;
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+ WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ }
+ }
+ }
+
+done:
+ WREG32(i2c_cntl_0, 0);
+ WREG32(i2c_cntl_1, 0);
+ WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+ RADEON_I2C_NACK |
+ RADEON_I2C_HALT |
+ RADEON_I2C_SOFT_RST));
+
+ if (rdev->is_atom_bios) {
+ tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+ tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
+ WREG32(RADEON_BIOS_6_SCRATCH, tmp);
+ }
+
+ mutex_unlock(&rdev->pm.mutex);
+ mutex_unlock(&rdev->dc_hw_i2c_mutex);
+
+ return ret;
+}
+
+/* hw i2c engine for r5xx hardware
+ * hw can buffer up to 15 bytes
+ */
+static int r500_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ struct i2c_msg *p;
+ int i, j, remaining, current_count, buffer_offset, ret = num;
+ u32 prescale;
+ u32 tmp, reg;
+ u32 saved1, saved2;
+
+ mutex_lock(&rdev->dc_hw_i2c_mutex);
+ /* take the pm lock since we need a constant sclk */
+ mutex_lock(&rdev->pm.mutex);
+
+ prescale = radeon_get_i2c_prescale(rdev);
+
+ /* clear gpio mask bits */
+ tmp = RREG32(rec->mask_clk_reg);
+ tmp &= ~rec->mask_clk_mask;
+ WREG32(rec->mask_clk_reg, tmp);
+ tmp = RREG32(rec->mask_clk_reg);
+
+ tmp = RREG32(rec->mask_data_reg);
+ tmp &= ~rec->mask_data_mask;
+ WREG32(rec->mask_data_reg, tmp);
+ tmp = RREG32(rec->mask_data_reg);
+
+ /* clear pin values */
+ tmp = RREG32(rec->a_clk_reg);
+ tmp &= ~rec->a_clk_mask;
+ WREG32(rec->a_clk_reg, tmp);
+ tmp = RREG32(rec->a_clk_reg);
+
+ tmp = RREG32(rec->a_data_reg);
+ tmp &= ~rec->a_data_mask;
+ WREG32(rec->a_data_reg, tmp);
+ tmp = RREG32(rec->a_data_reg);
+
+ /* set the pins to input */
+ tmp = RREG32(rec->en_clk_reg);
+ tmp &= ~rec->en_clk_mask;
+ WREG32(rec->en_clk_reg, tmp);
+ tmp = RREG32(rec->en_clk_reg);
+
+ tmp = RREG32(rec->en_data_reg);
+ tmp &= ~rec->en_data_mask;
+ WREG32(rec->en_data_reg, tmp);
+ tmp = RREG32(rec->en_data_reg);
+
+ /* */
+ tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+ WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
+ saved1 = RREG32(AVIVO_DC_I2C_CONTROL1);
+ saved2 = RREG32(0x494);
+ WREG32(0x494, saved2 | 0x1);
+
+ WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C);
+ for (i = 0; i < 50; i++) {
+ udelay(1);
+ if (RREG32(AVIVO_DC_I2C_ARBITRATION) & AVIVO_DC_I2C_SW_CAN_USE_I2C)
+ break;
+ }
+ if (i == 50) {
+ DRM_ERROR("failed to get i2c bus\n");
+ ret = -EBUSY;
+ goto done;
+ }
+
+ reg = AVIVO_DC_I2C_START | AVIVO_DC_I2C_STOP | AVIVO_DC_I2C_EN;
+ switch (rec->mask_clk_reg) {
+ case AVIVO_DC_GPIO_DDC1_MASK:
+ reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC1);
+ break;
+ case AVIVO_DC_GPIO_DDC2_MASK:
+ reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC2);
+ break;
+ case AVIVO_DC_GPIO_DDC3_MASK:
+ reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* check for bus probe */
+ p = &msgs[0];
+ if ((num == 1) && (p->len == 0)) {
+ WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+ AVIVO_DC_I2C_NACK |
+ AVIVO_DC_I2C_HALT));
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+ udelay(1);
+ WREG32(AVIVO_DC_I2C_RESET, 0);
+
+ WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
+ WREG32(AVIVO_DC_I2C_DATA, 0);
+
+ WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+ WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+ AVIVO_DC_I2C_DATA_COUNT(1) |
+ (prescale << 16)));
+ WREG32(AVIVO_DC_I2C_CONTROL1, reg);
+ WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+ for (j = 0; j < 200; j++) {
+ udelay(50);
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_GO)
+ continue;
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ goto done;
+ }
+
+ for (i = 0; i < num; i++) {
+ p = &msgs[i];
+ remaining = p->len;
+ buffer_offset = 0;
+ if (p->flags & I2C_M_RD) {
+ while (remaining) {
+ if (remaining > 15)
+ current_count = 15;
+ else
+ current_count = remaining;
+ WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+ AVIVO_DC_I2C_NACK |
+ AVIVO_DC_I2C_HALT));
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+ udelay(1);
+ WREG32(AVIVO_DC_I2C_RESET, 0);
+
+ WREG32(AVIVO_DC_I2C_DATA, ((p->addr << 1) & 0xff) | 0x1);
+ WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+ WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+ AVIVO_DC_I2C_DATA_COUNT(current_count) |
+ (prescale << 16)));
+ WREG32(AVIVO_DC_I2C_CONTROL1, reg | AVIVO_DC_I2C_RECEIVE);
+ WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+ for (j = 0; j < 200; j++) {
+ udelay(50);
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_GO)
+ continue;
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c read error 0x%08x\n", tmp);
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ for (j = 0; j < current_count; j++)
+ p->buf[buffer_offset + j] = RREG32(AVIVO_DC_I2C_DATA) & 0xff;
+ remaining -= current_count;
+ buffer_offset += current_count;
+ }
+ } else {
+ while (remaining) {
+ if (remaining > 15)
+ current_count = 15;
+ else
+ current_count = remaining;
+ WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+ AVIVO_DC_I2C_NACK |
+ AVIVO_DC_I2C_HALT));
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+ udelay(1);
+ WREG32(AVIVO_DC_I2C_RESET, 0);
+
+ WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
+ for (j = 0; j < current_count; j++)
+ WREG32(AVIVO_DC_I2C_DATA, p->buf[buffer_offset + j]);
+
+ WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+ WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+ AVIVO_DC_I2C_DATA_COUNT(current_count) |
+ (prescale << 16)));
+ WREG32(AVIVO_DC_I2C_CONTROL1, reg);
+ WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+ for (j = 0; j < 200; j++) {
+ udelay(50);
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_GO)
+ continue;
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ remaining -= current_count;
+ buffer_offset += current_count;
+ }
+ }
+ }
+
+done:
+ WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+ AVIVO_DC_I2C_NACK |
+ AVIVO_DC_I2C_HALT));
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+ udelay(1);
+ WREG32(AVIVO_DC_I2C_RESET, 0);
+
+ WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_DONE_USING_I2C);
+ WREG32(AVIVO_DC_I2C_CONTROL1, saved1);
+ WREG32(0x494, saved2);
+ tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+ tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
+ WREG32(RADEON_BIOS_6_SCRATCH, tmp);
+
+ mutex_unlock(&rdev->pm.mutex);
+ mutex_unlock(&rdev->dc_hw_i2c_mutex);
+
+ return ret;
+}
+
+static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ int ret = 0;
+
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ case CHIP_R200:
+ case CHIP_RV250:
+ case CHIP_RS300:
+ case CHIP_RV280:
+ case CHIP_R300:
+ case CHIP_R350:
+ case CHIP_RV350:
+ case CHIP_RV380:
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
+ case CHIP_RS400:
+ case CHIP_RS480:
+ ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
+ break;
+ case CHIP_RS600:
+ case CHIP_RS690:
+ case CHIP_RS740:
+ /* XXX fill in hw i2c implementation */
+ break;
+ case CHIP_RV515:
+ case CHIP_R520:
+ case CHIP_RV530:
+ case CHIP_RV560:
+ case CHIP_RV570:
+ case CHIP_R580:
+ if (rec->mm_i2c)
+ ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
+ else
+ ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
+ break;
+ case CHIP_R600:
+ case CHIP_RV610:
+ case CHIP_RV630:
+ case CHIP_RV670:
+ /* XXX fill in hw i2c implementation */
+ break;
+ case CHIP_RV620:
+ case CHIP_RV635:
+ case CHIP_RS780:
+ case CHIP_RS880:
+ case CHIP_RV770:
+ case CHIP_RV730:
+ case CHIP_RV710:
+ case CHIP_RV740:
+ /* XXX fill in hw i2c implementation */
+ break;
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ case CHIP_JUNIPER:
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ /* XXX fill in hw i2c implementation */
+ break;
+ default:
+ DRM_ERROR("i2c: unhandled radeon chip\n");
+ ret = -EIO;
+ break;
+ }
+
+ return ret;
+}
+
+static u32 radeon_hw_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm radeon_i2c_algo = {
+ .master_xfer = radeon_hw_i2c_xfer,
+ .functionality = radeon_hw_i2c_func,
+};
+
+static const struct i2c_algorithm radeon_atom_i2c_algo = {
+ .master_xfer = radeon_atom_hw_i2c_xfer,
+ .functionality = radeon_atom_hw_i2c_func,
+};
+
+struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_i2c_chan *i2c;
+ int ret;
+
+ /* don't add the mm_i2c bus unless hw_i2c is enabled */
+ if (rec->mm_i2c && (radeon_hw_i2c == 0))
+ return NULL;
+
+ i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
+ if (i2c == NULL)
+ return NULL;
+
+ i2c->rec = *rec;
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.class = I2C_CLASS_DDC;
+ i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ if (rec->mm_i2c ||
+ (rec->hw_capable &&
+ radeon_hw_i2c &&
+ ((rdev->family <= CHIP_RS480) ||
+ ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) {
+ /* set the radeon hw i2c adapter */
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "Radeon i2c hw bus %s", name);
+ i2c->adapter.algo = &radeon_i2c_algo;
+ ret = i2c_add_adapter(&i2c->adapter);
+ if (ret) {
+ DRM_ERROR("Failed to register hw i2c %s\n", name);
+ goto out_free;
+ }
+ } else if (rec->hw_capable &&
+ radeon_hw_i2c &&
+ ASIC_IS_DCE3(rdev)) {
+ /* hw i2c using atom */
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "Radeon i2c hw bus %s", name);
+ i2c->adapter.algo = &radeon_atom_i2c_algo;
+ ret = i2c_add_adapter(&i2c->adapter);
+ if (ret) {
+ DRM_ERROR("Failed to register hw i2c %s\n", name);
+ goto out_free;
+ }
+ } else {
+ /* set the radeon bit adapter */
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "Radeon i2c bit bus %s", name);
+ i2c->adapter.algo_data = &i2c->algo.bit;
+ i2c->algo.bit.pre_xfer = pre_xfer;
+ i2c->algo.bit.post_xfer = post_xfer;
+ i2c->algo.bit.setsda = set_data;
+ i2c->algo.bit.setscl = set_clock;
+ i2c->algo.bit.getsda = get_data;
+ i2c->algo.bit.getscl = get_clock;
+ i2c->algo.bit.udelay = 10;
+ i2c->algo.bit.timeout = usecs_to_jiffies(2200); /* from VESA */
+ i2c->algo.bit.data = i2c;
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ if (ret) {
+ DRM_ERROR("Failed to register bit i2c %s\n", name);
+ goto out_free;
+ }
+ }
+
+ return i2c;
+out_free:
+ kfree(i2c);
+ return NULL;
+
+}
+
+struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name)
+{
+ struct radeon_i2c_chan *i2c;
+ int ret;
+
+ i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
+ if (i2c == NULL)
+ return NULL;
+
+ i2c->rec = *rec;
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.class = I2C_CLASS_DDC;
+ i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->dev = dev;
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "Radeon aux bus %s", name);
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ i2c->adapter.algo_data = &i2c->algo.dp;
+ i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
+ i2c->algo.dp.address = 0;
+ ret = i2c_dp_aux_add_bus(&i2c->adapter);
+ if (ret) {
+ DRM_INFO("Failed to register i2c %s\n", name);
+ goto out_free;
+ }
+
+ return i2c;
+out_free:
+ kfree(i2c);
+ return NULL;
+
+}
+
+void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
+{
+ if (!i2c)
+ return;
+ i2c_del_adapter(&i2c->adapter);
+ kfree(i2c);
+}
+
+/* Add the default buses */
+void radeon_i2c_init(struct radeon_device *rdev)
+{
+ if (rdev->is_atom_bios)
+ radeon_atombios_i2c_init(rdev);
+ else
+ radeon_combios_i2c_init(rdev);
+}
+
+/* remove all the buses */
+void radeon_i2c_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+ if (rdev->i2c_bus[i]) {
+ radeon_i2c_destroy(rdev->i2c_bus[i]);
+ rdev->i2c_bus[i] = NULL;
+ }
+ }
+}
+
+/* Add additional buses */
+void radeon_i2c_add(struct radeon_device *rdev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name)
+{
+ struct drm_device *dev = rdev->ddev;
+ int i;
+
+ for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+ if (!rdev->i2c_bus[i]) {
+ rdev->i2c_bus[i] = radeon_i2c_create(dev, rec, name);
+ return;
+ }
+ }
+}
+
+/* looks up bus based on id */
+struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
+ struct radeon_i2c_bus_rec *i2c_bus)
+{
+ int i;
+
+ for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+ if (rdev->i2c_bus[i] &&
+ (rdev->i2c_bus[i]->rec.i2c_id == i2c_bus->i2c_id)) {
+ return rdev->i2c_bus[i];
+ }
+ }
+ return NULL;
+}
+
+struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
+{
+ return NULL;
+}
+
+void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 *val)
+{
+ u8 out_buf[2];
+ u8 in_buf[2];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
+ *val = in_buf[0];
+ DRM_DEBUG("val = 0x%02x\n", *val);
+ } else {
+ DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
+ addr, *val);
+ }
+}
+
+void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 val)
+{
+ uint8_t out_buf[2];
+ struct i2c_msg msg = {
+ .addr = slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = val;
+
+ if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
+ DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
+ addr, val);
+}
+
+/* ddc router switching */
+void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
+{
+ u8 val;
+
+ if (!radeon_connector->router.ddc_valid)
+ return;
+
+ if (!radeon_connector->router_bus)
+ return;
+
+ radeon_i2c_get_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x3, &val);
+ val &= ~radeon_connector->router.ddc_mux_control_pin;
+ radeon_i2c_put_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x3, val);
+ radeon_i2c_get_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x1, &val);
+ val &= ~radeon_connector->router.ddc_mux_control_pin;
+ val |= radeon_connector->router.ddc_mux_state;
+ radeon_i2c_put_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x1, val);
+}
+
+/* clock/data router switching */
+void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
+{
+ u8 val;
+
+ if (!radeon_connector->router.cd_valid)
+ return;
+
+ if (!radeon_connector->router_bus)
+ return;
+
+ radeon_i2c_get_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x3, &val);
+ val &= ~radeon_connector->router.cd_mux_control_pin;
+ radeon_i2c_put_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x3, val);
+ radeon_i2c_get_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x1, &val);
+ val &= ~radeon_connector->router.cd_mux_control_pin;
+ val |= radeon_connector->router.cd_mux_state;
+ radeon_i2c_put_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x1, val);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
new file mode 100644
index 0000000..c180df8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -0,0 +1,424 @@
+/**
+ * \file radeon_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the Radeon DRM.
+ *
+ * \author Paul Mackerras <paulus@samba.org>
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+
+typedef struct drm_radeon_init32 {
+ int func;
+ u32 sarea_priv_offset;
+ int is_pci;
+ int cp_mode;
+ int gart_size;
+ int ring_size;
+ int usec_timeout;
+
+ unsigned int fb_bpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_bpp;
+ unsigned int depth_offset, depth_pitch;
+
+ u32 fb_offset;
+ u32 mmio_offset;
+ u32 ring_offset;
+ u32 ring_rptr_offset;
+ u32 buffers_offset;
+ u32 gart_textures_offset;
+} drm_radeon_init32_t;
+
+static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_init32_t init32;
+ drm_radeon_init_t __user *init;
+
+ if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
+ return -EFAULT;
+
+ init = compat_alloc_user_space(sizeof(*init));
+ if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
+ || __put_user(init32.func, &init->func)
+ || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
+ || __put_user(init32.is_pci, &init->is_pci)
+ || __put_user(init32.cp_mode, &init->cp_mode)
+ || __put_user(init32.gart_size, &init->gart_size)
+ || __put_user(init32.ring_size, &init->ring_size)
+ || __put_user(init32.usec_timeout, &init->usec_timeout)
+ || __put_user(init32.fb_bpp, &init->fb_bpp)
+ || __put_user(init32.front_offset, &init->front_offset)
+ || __put_user(init32.front_pitch, &init->front_pitch)
+ || __put_user(init32.back_offset, &init->back_offset)
+ || __put_user(init32.back_pitch, &init->back_pitch)
+ || __put_user(init32.depth_bpp, &init->depth_bpp)
+ || __put_user(init32.depth_offset, &init->depth_offset)
+ || __put_user(init32.depth_pitch, &init->depth_pitch)
+ || __put_user(init32.fb_offset, &init->fb_offset)
+ || __put_user(init32.mmio_offset, &init->mmio_offset)
+ || __put_user(init32.ring_offset, &init->ring_offset)
+ || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
+ || __put_user(init32.buffers_offset, &init->buffers_offset)
+ || __put_user(init32.gart_textures_offset,
+ &init->gart_textures_offset))
+ return -EFAULT;
+
+ return drm_ioctl(file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
+}
+
+typedef struct drm_radeon_clear32 {
+ unsigned int flags;
+ unsigned int clear_color;
+ unsigned int clear_depth;
+ unsigned int color_mask;
+ unsigned int depth_mask; /* misnamed field: should be stencil */
+ u32 depth_boxes;
+} drm_radeon_clear32_t;
+
+static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_clear32_t clr32;
+ drm_radeon_clear_t __user *clr;
+
+ if (copy_from_user(&clr32, (void __user *)arg, sizeof(clr32)))
+ return -EFAULT;
+
+ clr = compat_alloc_user_space(sizeof(*clr));
+ if (!access_ok(VERIFY_WRITE, clr, sizeof(*clr))
+ || __put_user(clr32.flags, &clr->flags)
+ || __put_user(clr32.clear_color, &clr->clear_color)
+ || __put_user(clr32.clear_depth, &clr->clear_depth)
+ || __put_user(clr32.color_mask, &clr->color_mask)
+ || __put_user(clr32.depth_mask, &clr->depth_mask)
+ || __put_user((void __user *)(unsigned long)clr32.depth_boxes,
+ &clr->depth_boxes))
+ return -EFAULT;
+
+ return drm_ioctl(file, DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
+}
+
+typedef struct drm_radeon_stipple32 {
+ u32 mask;
+} drm_radeon_stipple32_t;
+
+static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_stipple32_t __user *argp = (void __user *)arg;
+ drm_radeon_stipple_t __user *request;
+ u32 mask;
+
+ if (get_user(mask, &argp->mask))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user((unsigned int __user *)(unsigned long)mask,
+ &request->mask))
+ return -EFAULT;
+
+ return drm_ioctl(file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
+}
+
+typedef struct drm_radeon_tex_image32 {
+ unsigned int x, y; /* Blit coordinates */
+ unsigned int width, height;
+ u32 data;
+} drm_radeon_tex_image32_t;
+
+typedef struct drm_radeon_texture32 {
+ unsigned int offset;
+ int pitch;
+ int format;
+ int width; /* Texture image coordinates */
+ int height;
+ u32 image;
+} drm_radeon_texture32_t;
+
+static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_texture32_t req32;
+ drm_radeon_texture_t __user *request;
+ drm_radeon_tex_image32_t img32;
+ drm_radeon_tex_image_t __user *image;
+
+ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+ return -EFAULT;
+ if (req32.image == 0)
+ return -EINVAL;
+ if (copy_from_user(&img32, (void __user *)(unsigned long)req32.image,
+ sizeof(img32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request) + sizeof(*image));
+ if (!access_ok(VERIFY_WRITE, request,
+ sizeof(*request) + sizeof(*image)))
+ return -EFAULT;
+ image = (drm_radeon_tex_image_t __user *) (request + 1);
+
+ if (__put_user(req32.offset, &request->offset)
+ || __put_user(req32.pitch, &request->pitch)
+ || __put_user(req32.format, &request->format)
+ || __put_user(req32.width, &request->width)
+ || __put_user(req32.height, &request->height)
+ || __put_user(image, &request->image)
+ || __put_user(img32.x, &image->x)
+ || __put_user(img32.y, &image->y)
+ || __put_user(img32.width, &image->width)
+ || __put_user(img32.height, &image->height)
+ || __put_user((const void __user *)(unsigned long)img32.data,
+ &image->data))
+ return -EFAULT;
+
+ return drm_ioctl(file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
+}
+
+typedef struct drm_radeon_vertex2_32 {
+ int idx; /* Index of vertex buffer */
+ int discard; /* Client finished with buffer? */
+ int nr_states;
+ u32 state;
+ int nr_prims;
+ u32 prim;
+} drm_radeon_vertex2_32_t;
+
+static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_vertex2_32_t req32;
+ drm_radeon_vertex2_t __user *request;
+
+ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.idx, &request->idx)
+ || __put_user(req32.discard, &request->discard)
+ || __put_user(req32.nr_states, &request->nr_states)
+ || __put_user((void __user *)(unsigned long)req32.state,
+ &request->state)
+ || __put_user(req32.nr_prims, &request->nr_prims)
+ || __put_user((void __user *)(unsigned long)req32.prim,
+ &request->prim))
+ return -EFAULT;
+
+ return drm_ioctl(file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
+}
+
+typedef struct drm_radeon_cmd_buffer32 {
+ int bufsz;
+ u32 buf;
+ int nbox;
+ u32 boxes;
+} drm_radeon_cmd_buffer32_t;
+
+static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_cmd_buffer32_t req32;
+ drm_radeon_cmd_buffer_t __user *request;
+
+ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.bufsz, &request->bufsz)
+ || __put_user((void __user *)(unsigned long)req32.buf,
+ &request->buf)
+ || __put_user(req32.nbox, &request->nbox)
+ || __put_user((void __user *)(unsigned long)req32.boxes,
+ &request->boxes))
+ return -EFAULT;
+
+ return drm_ioctl(file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
+}
+
+typedef struct drm_radeon_getparam32 {
+ int param;
+ u32 value;
+} drm_radeon_getparam32_t;
+
+static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_getparam32_t req32;
+ drm_radeon_getparam_t __user *request;
+
+ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.param, &request->param)
+ || __put_user((void __user *)(unsigned long)req32.value,
+ &request->value))
+ return -EFAULT;
+
+ return drm_ioctl(file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
+}
+
+typedef struct drm_radeon_mem_alloc32 {
+ int region;
+ int alignment;
+ int size;
+ u32 region_offset; /* offset from start of fb or GART */
+} drm_radeon_mem_alloc32_t;
+
+static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_mem_alloc32_t req32;
+ drm_radeon_mem_alloc_t __user *request;
+
+ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.region, &request->region)
+ || __put_user(req32.alignment, &request->alignment)
+ || __put_user(req32.size, &request->size)
+ || __put_user((int __user *)(unsigned long)req32.region_offset,
+ &request->region_offset))
+ return -EFAULT;
+
+ return drm_ioctl(file, DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
+}
+
+typedef struct drm_radeon_irq_emit32 {
+ u32 irq_seq;
+} drm_radeon_irq_emit32_t;
+
+static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_irq_emit32_t req32;
+ drm_radeon_irq_emit_t __user *request;
+
+ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user((int __user *)(unsigned long)req32.irq_seq,
+ &request->irq_seq))
+ return -EFAULT;
+
+ return drm_ioctl(file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
+}
+
+/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
+#if defined (CONFIG_X86_64) || defined(CONFIG_IA64)
+typedef struct drm_radeon_setparam32 {
+ int param;
+ u64 value;
+} __attribute__((packed)) drm_radeon_setparam32_t;
+
+static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_setparam32_t req32;
+ drm_radeon_setparam_t __user *request;
+
+ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.param, &request->param)
+ || __put_user((void __user *)(unsigned long)req32.value,
+ &request->value))
+ return -EFAULT;
+
+ return drm_ioctl(file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
+}
+#else
+#define compat_radeon_cp_setparam NULL
+#endif /* X86_64 || IA64 */
+
+static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
+ [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
+ [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
+ [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
+ [DRM_RADEON_TEXTURE] = compat_radeon_cp_texture,
+ [DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2,
+ [DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf,
+ [DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam,
+ [DRM_RADEON_SETPARAM] = compat_radeon_cp_setparam,
+ [DRM_RADEON_ALLOC] = compat_radeon_mem_alloc,
+ [DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit,
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+ drm_ioctl_compat_t *fn = NULL;
+ int ret;
+
+ if (nr < DRM_COMMAND_BASE)
+ return drm_compat_ioctl(filp, cmd, arg);
+
+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
+ fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
+
+ if (fn != NULL)
+ ret = (*fn) (filp, cmd, arg);
+ else
+ ret = drm_ioctl(filp, cmd, arg);
+
+ return ret;
+}
+
+long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+ int ret;
+
+ if (nr < DRM_COMMAND_BASE)
+ return drm_compat_ioctl(filp, cmd, arg);
+
+ ret = drm_ioctl(filp, cmd, arg);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
new file mode 100644
index 0000000..8d68e97
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -0,0 +1,402 @@
+/* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- */
+/*
+ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Michel D�zer <michel@daenzer.net>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
+ */
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+
+void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ if (state)
+ dev_priv->irq_enable_reg |= mask;
+ else
+ dev_priv->irq_enable_reg &= ~mask;
+
+ if (dev->irq_enabled)
+ RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
+}
+
+static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ if (state)
+ dev_priv->r500_disp_irq_reg |= mask;
+ else
+ dev_priv->r500_disp_irq_reg &= ~mask;
+
+ if (dev->irq_enabled)
+ RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
+}
+
+int radeon_enable_vblank(struct drm_device *dev, int crtc)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
+ switch (crtc) {
+ case 0:
+ r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1);
+ break;
+ case 1:
+ r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1);
+ break;
+ default:
+ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+ crtc);
+ return -EINVAL;
+ }
+ } else {
+ switch (crtc) {
+ case 0:
+ radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
+ break;
+ case 1:
+ radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
+ break;
+ default:
+ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+ crtc);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+void radeon_disable_vblank(struct drm_device *dev, int crtc)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
+ switch (crtc) {
+ case 0:
+ r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0);
+ break;
+ case 1:
+ r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0);
+ break;
+ default:
+ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+ crtc);
+ break;
+ }
+ } else {
+ switch (crtc) {
+ case 0:
+ radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
+ break;
+ case 1:
+ radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
+ break;
+ default:
+ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+ crtc);
+ break;
+ }
+ }
+}
+
+static u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_disp_int)
+{
+ u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS);
+ u32 irq_mask = RADEON_SW_INT_TEST;
+
+ *r500_disp_int = 0;
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
+ /* vbl interrupts in a different place */
+
+ if (irqs & R500_DISPLAY_INT_STATUS) {
+ /* if a display interrupt */
+ u32 disp_irq;
+
+ disp_irq = RADEON_READ(R500_DISP_INTERRUPT_STATUS);
+
+ *r500_disp_int = disp_irq;
+ if (disp_irq & R500_D1_VBLANK_INTERRUPT)
+ RADEON_WRITE(R500_D1MODE_VBLANK_STATUS, R500_VBLANK_ACK);
+ if (disp_irq & R500_D2_VBLANK_INTERRUPT)
+ RADEON_WRITE(R500_D2MODE_VBLANK_STATUS, R500_VBLANK_ACK);
+ }
+ irq_mask |= R500_DISPLAY_INT_STATUS;
+ } else
+ irq_mask |= RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT;
+
+ irqs &= irq_mask;
+
+ if (irqs)
+ RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
+
+ return irqs;
+}
+
+/* Interrupts - Used for device synchronization and flushing in the
+ * following circumstances:
+ *
+ * - Exclusive FB access with hw idle:
+ * - Wait for GUI Idle (?) interrupt, then do normal flush.
+ *
+ * - Frame throttling, NV_fence:
+ * - Drop marker irq's into command stream ahead of time.
+ * - Wait on irq's with lock *not held*
+ * - Check each for termination condition
+ *
+ * - Internally in cp_getbuffer, etc:
+ * - as above, but wait with lock held???
+ *
+ * NOTE: These functions are misleadingly named -- the irq's aren't
+ * tied to dma at all, this is just a hangover from dri prehistory.
+ */
+
+irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *) dev->dev_private;
+ u32 stat;
+ u32 r500_disp_int;
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return IRQ_NONE;
+
+ /* Only consider the bits we're interested in - others could be used
+ * outside the DRM
+ */
+ stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int);
+ if (!stat)
+ return IRQ_NONE;
+
+ stat &= dev_priv->irq_enable_reg;
+
+ /* SW interrupt */
+ if (stat & RADEON_SW_INT_TEST)
+ DRM_WAKEUP(&dev_priv->swi_queue);
+
+ /* VBLANK interrupt */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
+ if (r500_disp_int & R500_D1_VBLANK_INTERRUPT)
+ drm_handle_vblank(dev, 0);
+ if (r500_disp_int & R500_D2_VBLANK_INTERRUPT)
+ drm_handle_vblank(dev, 1);
+ } else {
+ if (stat & RADEON_CRTC_VBLANK_STAT)
+ drm_handle_vblank(dev, 0);
+ if (stat & RADEON_CRTC2_VBLANK_STAT)
+ drm_handle_vblank(dev, 1);
+ }
+ return IRQ_HANDLED;
+}
+
+static int radeon_emit_irq(struct drm_device * dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ unsigned int ret;
+ RING_LOCALS;
+
+ atomic_inc(&dev_priv->swi_emitted);
+ ret = atomic_read(&dev_priv->swi_emitted);
+
+ BEGIN_RING(4);
+ OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
+ OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE);
+ ADVANCE_RING();
+ COMMIT_RING();
+
+ return ret;
+}
+
+static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
+{
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *) dev->dev_private;
+ int ret = 0;
+
+ if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr)
+ return 0;
+
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+ DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ,
+ RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
+
+ return ret;
+}
+
+u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ if (crtc < 0 || crtc > 1) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
+ if (crtc == 0)
+ return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
+ else
+ return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
+ } else {
+ if (crtc == 0)
+ return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
+ else
+ return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
+ }
+}
+
+/* Needs the lock as it touches the ring.
+ */
+int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_irq_emit_t *emit = data;
+ int result;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return -EINVAL;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ result = radeon_emit_irq(dev);
+
+ if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+ DRM_ERROR("copy_to_user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_irq_wait_t *irqwait = data;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return -EINVAL;
+
+ return radeon_wait_irq(dev, irqwait->irq_seq);
+}
+
+/* drm_dma.h hooks
+*/
+void radeon_driver_irq_preinstall(struct drm_device * dev)
+{
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *) dev->dev_private;
+ u32 dummy;
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return;
+
+ /* Disable *all* interrupts */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
+ RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
+ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+
+ /* Clear bits if they're already high */
+ radeon_acknowledge_irqs(dev_priv, &dummy);
+}
+
+int radeon_driver_irq_postinstall(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *) dev->dev_private;
+
+ atomic_set(&dev_priv->swi_emitted, 0);
+ DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
+
+ dev->max_vblank_count = 0x001fffff;
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return 0;
+
+ radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
+
+ return 0;
+}
+
+void radeon_driver_irq_uninstall(struct drm_device * dev)
+{
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *) dev->dev_private;
+ if (!dev_priv)
+ return;
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return;
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
+ RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
+ /* Disable *all* interrupts */
+ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+}
+
+
+int radeon_vblank_crtc_get(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+
+ return dev_priv->vblank_crtc;
+}
+
+int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
+{
+ drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+ if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
+ DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
+ return -EINVAL;
+ }
+ dev_priv->vblank_crtc = (unsigned int)value;
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
new file mode 100644
index 0000000..1fe12ab
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -0,0 +1,480 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+#define RADEON_WAIT_IDLE_TIMEOUT 200
+
+/**
+ * radeon_driver_irq_handler_kms - irq handler for KMS
+ *
+ * @DRM_IRQ_ARGS: args
+ *
+ * This is the irq handler for the radeon KMS driver (all asics).
+ * radeon_irq_process is a macro that points to the per-asic
+ * irq handler callback.
+ */
+irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ struct radeon_device *rdev = dev->dev_private;
+
+ return radeon_irq_process(rdev);
+}
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+/**
+ * radeon_hotplug_work_func - display hotplug work handler
+ *
+ * @work: work struct
+ *
+ * This is the hot plug event work handler (all asics).
+ * The work gets scheduled from the irq handler if there
+ * was a hot plug interrupt. It walks the connector table
+ * and calls the hotplug handler for each one, then sends
+ * a drm hotplug event to alert userspace.
+ */
+static void radeon_hotplug_work_func(struct work_struct *work)
+{
+ struct radeon_device *rdev = container_of(work, struct radeon_device,
+ hotplug_work);
+ struct drm_device *dev = rdev->ddev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ if (mode_config->num_connector) {
+ list_for_each_entry(connector, &mode_config->connector_list, head)
+ radeon_connector_hotplug(connector);
+ }
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(dev);
+}
+
+/**
+ * radeon_driver_irq_preinstall_kms - drm irq preinstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * Gets the hw ready to enable irqs (all asics).
+ * This function disables all interrupt sources on the GPU.
+ */
+void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ unsigned long irqflags;
+ unsigned i;
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ /* Disable *all* interrupts */
+ for (i = 0; i < RADEON_NUM_RINGS; i++)
+ atomic_set(&rdev->irq.ring_int[i], 0);
+ for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
+ rdev->irq.hpd[i] = false;
+ for (i = 0; i < RADEON_MAX_CRTCS; i++) {
+ rdev->irq.crtc_vblank_int[i] = false;
+ atomic_set(&rdev->irq.pflip[i], 0);
+ rdev->irq.afmt[i] = false;
+ }
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+ /* Clear bits */
+ radeon_irq_process(rdev);
+}
+
+/**
+ * radeon_driver_irq_postinstall_kms - drm irq preinstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * Handles stuff to be done after enabling irqs (all asics).
+ * Returns 0 on success.
+ */
+int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
+{
+ dev->max_vblank_count = 0x001fffff;
+ return 0;
+}
+
+/**
+ * radeon_driver_irq_uninstall_kms - drm irq uninstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * This function disables all interrupt sources on the GPU (all asics).
+ */
+void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ unsigned long irqflags;
+ unsigned i;
+
+ if (rdev == NULL) {
+ return;
+ }
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ /* Disable *all* interrupts */
+ for (i = 0; i < RADEON_NUM_RINGS; i++)
+ atomic_set(&rdev->irq.ring_int[i], 0);
+ for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
+ rdev->irq.hpd[i] = false;
+ for (i = 0; i < RADEON_MAX_CRTCS; i++) {
+ rdev->irq.crtc_vblank_int[i] = false;
+ atomic_set(&rdev->irq.pflip[i], 0);
+ rdev->irq.afmt[i] = false;
+ }
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_msi_ok - asic specific msi checks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Handles asic specific MSI checks to determine if
+ * MSIs should be enabled on a particular chip (all asics).
+ * Returns true if MSIs should be enabled, false if MSIs
+ * should not be enabled.
+ */
+static bool radeon_msi_ok(struct radeon_device *rdev)
+{
+ /* RV370/RV380 was first asic with MSI support */
+ if (rdev->family < CHIP_RV380)
+ return false;
+
+ /* MSIs don't work on AGP */
+ if (rdev->flags & RADEON_IS_AGP)
+ return false;
+
+ /* force MSI on */
+ if (radeon_msi == 1)
+ return true;
+ else if (radeon_msi == 0)
+ return false;
+
+ /* Quirks */
+ /* HP RS690 only seems to work with MSIs. */
+ if ((rdev->pdev->device == 0x791f) &&
+ (rdev->pdev->subsystem_vendor == 0x103c) &&
+ (rdev->pdev->subsystem_device == 0x30c2))
+ return true;
+
+ /* Dell RS690 only seems to work with MSIs. */
+ if ((rdev->pdev->device == 0x791f) &&
+ (rdev->pdev->subsystem_vendor == 0x1028) &&
+ (rdev->pdev->subsystem_device == 0x01fc))
+ return true;
+
+ /* Dell RS690 only seems to work with MSIs. */
+ if ((rdev->pdev->device == 0x791f) &&
+ (rdev->pdev->subsystem_vendor == 0x1028) &&
+ (rdev->pdev->subsystem_device == 0x01fd))
+ return true;
+
+ /* Gateway RS690 only seems to work with MSIs. */
+ if ((rdev->pdev->device == 0x791f) &&
+ (rdev->pdev->subsystem_vendor == 0x107b) &&
+ (rdev->pdev->subsystem_device == 0x0185))
+ return true;
+
+ /* try and enable MSIs by default on all RS690s */
+ if (rdev->family == CHIP_RS690)
+ return true;
+
+ /* RV515 seems to have MSI issues where it loses
+ * MSI rearms occasionally. This leads to lockups and freezes.
+ * disable it by default.
+ */
+ if (rdev->family == CHIP_RV515)
+ return false;
+ if (rdev->flags & RADEON_IS_IGP) {
+ /* APUs work fine with MSIs */
+ if (rdev->family >= CHIP_PALM)
+ return true;
+ /* lots of IGPs have problems with MSIs */
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * radeon_irq_kms_init - init driver interrupt info
+ *
+ * @rdev: radeon device pointer
+ *
+ * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
+ * Returns 0 for success, error for failure.
+ */
+int radeon_irq_kms_init(struct radeon_device *rdev)
+{
+ int r = 0;
+
+ spin_lock_init(&rdev->irq.lock);
+ r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
+ if (r) {
+ return r;
+ }
+ /* enable msi */
+ rdev->msi_enabled = 0;
+
+ if (radeon_msi_ok(rdev)) {
+ int ret = pci_enable_msi(rdev->pdev);
+ if (!ret) {
+ rdev->msi_enabled = 1;
+ dev_info(rdev->dev, "radeon: using MSI.\n");
+ }
+ }
+ rdev->irq.installed = true;
+ r = drm_irq_install(rdev->ddev);
+ if (r) {
+ rdev->irq.installed = false;
+ return r;
+ }
+
+ INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+ INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
+
+ DRM_INFO("radeon: irq initialized.\n");
+ return 0;
+}
+
+/**
+ * radeon_irq_kms_fini - tear down driver interrupt info
+ *
+ * @rdev: radeon device pointer
+ *
+ * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
+ */
+void radeon_irq_kms_fini(struct radeon_device *rdev)
+{
+ drm_vblank_cleanup(rdev->ddev);
+ if (rdev->irq.installed) {
+ drm_irq_uninstall(rdev->ddev);
+ rdev->irq.installed = false;
+ if (rdev->msi_enabled)
+ pci_disable_msi(rdev->pdev);
+ flush_work(&rdev->hotplug_work);
+ }
+}
+
+/**
+ * radeon_irq_kms_sw_irq_get - enable software interrupt
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring whose interrupt you want to enable
+ *
+ * Enables the software interrupt for a specific ring (all asics).
+ * The software interrupt is generally used to signal a fence on
+ * a particular ring.
+ */
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
+{
+ unsigned long irqflags;
+
+ if (!rdev->ddev->irq_enabled)
+ return;
+
+ if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+ }
+}
+
+/**
+ * radeon_irq_kms_sw_irq_put - disable software interrupt
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring whose interrupt you want to disable
+ *
+ * Disables the software interrupt for a specific ring (all asics).
+ * The software interrupt is generally used to signal a fence on
+ * a particular ring.
+ */
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
+{
+ unsigned long irqflags;
+
+ if (!rdev->ddev->irq_enabled)
+ return;
+
+ if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+ }
+}
+
+/**
+ * radeon_irq_kms_pflip_irq_get - enable pageflip interrupt
+ *
+ * @rdev: radeon device pointer
+ * @crtc: crtc whose interrupt you want to enable
+ *
+ * Enables the pageflip interrupt for a specific crtc (all asics).
+ * For pageflips we use the vblank interrupt source.
+ */
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
+{
+ unsigned long irqflags;
+
+ if (crtc < 0 || crtc >= rdev->num_crtc)
+ return;
+
+ if (!rdev->ddev->irq_enabled)
+ return;
+
+ if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+ }
+}
+
+/**
+ * radeon_irq_kms_pflip_irq_put - disable pageflip interrupt
+ *
+ * @rdev: radeon device pointer
+ * @crtc: crtc whose interrupt you want to disable
+ *
+ * Disables the pageflip interrupt for a specific crtc (all asics).
+ * For pageflips we use the vblank interrupt source.
+ */
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
+{
+ unsigned long irqflags;
+
+ if (crtc < 0 || crtc >= rdev->num_crtc)
+ return;
+
+ if (!rdev->ddev->irq_enabled)
+ return;
+
+ if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+ }
+}
+
+/**
+ * radeon_irq_kms_enable_afmt - enable audio format change interrupt
+ *
+ * @rdev: radeon device pointer
+ * @block: afmt block whose interrupt you want to enable
+ *
+ * Enables the afmt change interrupt for a specific afmt block (all asics).
+ */
+void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
+{
+ unsigned long irqflags;
+
+ if (!rdev->ddev->irq_enabled)
+ return;
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ rdev->irq.afmt[block] = true;
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+
+}
+
+/**
+ * radeon_irq_kms_disable_afmt - disable audio format change interrupt
+ *
+ * @rdev: radeon device pointer
+ * @block: afmt block whose interrupt you want to disable
+ *
+ * Disables the afmt change interrupt for a specific afmt block (all asics).
+ */
+void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
+{
+ unsigned long irqflags;
+
+ if (!rdev->ddev->irq_enabled)
+ return;
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ rdev->irq.afmt[block] = false;
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_irq_kms_enable_hpd - enable hotplug detect interrupt
+ *
+ * @rdev: radeon device pointer
+ * @hpd_mask: mask of hpd pins you want to enable.
+ *
+ * Enables the hotplug detect interrupt for a specific hpd pin (all asics).
+ */
+void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
+{
+ unsigned long irqflags;
+ int i;
+
+ if (!rdev->ddev->irq_enabled)
+ return;
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
+ rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_irq_kms_disable_hpd - disable hotplug detect interrupt
+ *
+ * @rdev: radeon device pointer
+ * @hpd_mask: mask of hpd pins you want to disable.
+ *
+ * Disables the hotplug detect interrupt for a specific hpd pin (all asics).
+ */
+void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
+{
+ unsigned long irqflags;
+ int i;
+
+ if (!rdev->ddev->irq_enabled)
+ return;
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
+ rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
new file mode 100644
index 0000000..7e292d8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -0,0 +1,762 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include <drm/radeon_drm.h>
+#include "radeon_asic.h"
+
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
+
+/**
+ * radeon_driver_unload_kms - Main unload function for KMS.
+ *
+ * @dev: drm dev pointer
+ *
+ * This is the main unload function for KMS (all asics).
+ * It calls radeon_modeset_fini() to tear down the
+ * displays, and radeon_device_fini() to tear down
+ * the rest of the device (CP, writeback, etc.).
+ * Returns 0 on success.
+ */
+int radeon_driver_unload_kms(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (rdev == NULL)
+ return 0;
+ if (rdev->rmmio == NULL)
+ goto done_free;
+ radeon_acpi_fini(rdev);
+ radeon_modeset_fini(rdev);
+ radeon_device_fini(rdev);
+
+done_free:
+ kfree(rdev);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+/**
+ * radeon_driver_load_kms - Main load function for KMS.
+ *
+ * @dev: drm dev pointer
+ * @flags: device flags
+ *
+ * This is the main load function for KMS (all asics).
+ * It calls radeon_device_init() to set up the non-display
+ * parts of the chip (asic init, CP, writeback, etc.), and
+ * radeon_modeset_init() to set up the display parts
+ * (crtcs, encoders, hotplug detect, etc.).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
+{
+ struct radeon_device *rdev;
+ int r, acpi_status;
+
+ rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
+ if (rdev == NULL) {
+ return -ENOMEM;
+ }
+ dev->dev_private = (void *)rdev;
+
+ /* update BUS flag */
+ if (drm_pci_device_is_agp(dev)) {
+ flags |= RADEON_IS_AGP;
+ } else if (pci_is_pcie(dev->pdev)) {
+ flags |= RADEON_IS_PCIE;
+ } else {
+ flags |= RADEON_IS_PCI;
+ }
+
+ /* radeon_device_init should report only fatal error
+ * like memory allocation failure or iomapping failure,
+ * or memory manager initialization failure, it must
+ * properly initialize the GPU MC controller and permit
+ * VRAM allocation
+ */
+ r = radeon_device_init(rdev, dev, dev->pdev, flags);
+ if (r) {
+ dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+ goto out;
+ }
+
+ /* Again modeset_init should fail only on fatal error
+ * otherwise it should provide enough functionalities
+ * for shadowfb to run
+ */
+ r = radeon_modeset_init(rdev);
+ if (r)
+ dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
+
+ /* Call ACPI methods: require modeset init
+ * but failure is not fatal
+ */
+ if (!r) {
+ acpi_status = radeon_acpi_init(rdev);
+ if (acpi_status)
+ dev_dbg(&dev->pdev->dev,
+ "Error during ACPI methods call\n");
+ }
+
+out:
+ if (r)
+ radeon_driver_unload_kms(dev);
+ return r;
+}
+
+/**
+ * radeon_set_filp_rights - Set filp right.
+ *
+ * @dev: drm dev pointer
+ * @owner: drm file
+ * @applier: drm file
+ * @value: value
+ *
+ * Sets the filp rights for the device (all asics).
+ */
+static void radeon_set_filp_rights(struct drm_device *dev,
+ struct drm_file **owner,
+ struct drm_file *applier,
+ uint32_t *value)
+{
+ mutex_lock(&dev->struct_mutex);
+ if (*value == 1) {
+ /* wants rights */
+ if (!*owner)
+ *owner = applier;
+ } else if (*value == 0) {
+ /* revokes rights */
+ if (*owner == applier)
+ *owner = NULL;
+ }
+ *value = *owner == applier ? 1 : 0;
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/*
+ * Userspace get information ioctl
+ */
+/**
+ * radeon_info_ioctl - answer a device specific request.
+ *
+ * @rdev: radeon device pointer
+ * @data: request object
+ * @filp: drm filp
+ *
+ * This function is used to pass device specific parameters to the userspace
+ * drivers. Examples include: pci device id, pipeline parms, tiling params,
+ * etc. (all asics).
+ * Returns 0 on success, -EINVAL on failure.
+ */
+int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_radeon_info *info = data;
+ struct radeon_mode_info *minfo = &rdev->mode_info;
+ uint32_t *value, value_tmp, *value_ptr, value_size;
+ uint64_t value64;
+ struct drm_crtc *crtc;
+ int i, found;
+
+ value_ptr = (uint32_t *)((unsigned long)info->value);
+ value = &value_tmp;
+ value_size = sizeof(uint32_t);
+
+ switch (info->request) {
+ case RADEON_INFO_DEVICE_ID:
+ *value = dev->pci_device;
+ break;
+ case RADEON_INFO_NUM_GB_PIPES:
+ *value = rdev->num_gb_pipes;
+ break;
+ case RADEON_INFO_NUM_Z_PIPES:
+ *value = rdev->num_z_pipes;
+ break;
+ case RADEON_INFO_ACCEL_WORKING:
+ /* xf86-video-ati 6.13.0 relies on this being false for evergreen */
+ if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
+ *value = false;
+ else
+ *value = rdev->accel_working;
+ break;
+ case RADEON_INFO_CRTC_FROM_ID:
+ if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ for (i = 0, found = 0; i < rdev->num_crtc; i++) {
+ crtc = (struct drm_crtc *)minfo->crtcs[i];
+ if (crtc && crtc->base.id == *value) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ *value = radeon_crtc->crtc_id;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
+ return -EINVAL;
+ }
+ break;
+ case RADEON_INFO_ACCEL_WORKING2:
+ *value = rdev->accel_working;
+ break;
+ case RADEON_INFO_TILING_CONFIG:
+ if (rdev->family >= CHIP_TAHITI)
+ *value = rdev->config.si.tile_config;
+ else if (rdev->family >= CHIP_CAYMAN)
+ *value = rdev->config.cayman.tile_config;
+ else if (rdev->family >= CHIP_CEDAR)
+ *value = rdev->config.evergreen.tile_config;
+ else if (rdev->family >= CHIP_RV770)
+ *value = rdev->config.rv770.tile_config;
+ else if (rdev->family >= CHIP_R600)
+ *value = rdev->config.r600.tile_config;
+ else {
+ DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
+ return -EINVAL;
+ }
+ break;
+ case RADEON_INFO_WANT_HYPERZ:
+ /* The "value" here is both an input and output parameter.
+ * If the input value is 1, filp requests hyper-z access.
+ * If the input value is 0, filp revokes its hyper-z access.
+ *
+ * When returning, the value is 1 if filp owns hyper-z access,
+ * 0 otherwise. */
+ if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ if (*value >= 2) {
+ DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
+ return -EINVAL;
+ }
+ radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
+ break;
+ case RADEON_INFO_WANT_CMASK:
+ /* The same logic as Hyper-Z. */
+ if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ if (*value >= 2) {
+ DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
+ return -EINVAL;
+ }
+ radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
+ break;
+ case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
+ /* return clock value in KHz */
+ if (rdev->asic->get_xclk)
+ *value = radeon_get_xclk(rdev) * 10;
+ else
+ *value = rdev->clock.spll.reference_freq * 10;
+ break;
+ case RADEON_INFO_NUM_BACKENDS:
+ if (rdev->family >= CHIP_TAHITI)
+ *value = rdev->config.si.max_backends_per_se *
+ rdev->config.si.max_shader_engines;
+ else if (rdev->family >= CHIP_CAYMAN)
+ *value = rdev->config.cayman.max_backends_per_se *
+ rdev->config.cayman.max_shader_engines;
+ else if (rdev->family >= CHIP_CEDAR)
+ *value = rdev->config.evergreen.max_backends;
+ else if (rdev->family >= CHIP_RV770)
+ *value = rdev->config.rv770.max_backends;
+ else if (rdev->family >= CHIP_R600)
+ *value = rdev->config.r600.max_backends;
+ else {
+ return -EINVAL;
+ }
+ break;
+ case RADEON_INFO_NUM_TILE_PIPES:
+ if (rdev->family >= CHIP_TAHITI)
+ *value = rdev->config.si.max_tile_pipes;
+ else if (rdev->family >= CHIP_CAYMAN)
+ *value = rdev->config.cayman.max_tile_pipes;
+ else if (rdev->family >= CHIP_CEDAR)
+ *value = rdev->config.evergreen.max_tile_pipes;
+ else if (rdev->family >= CHIP_RV770)
+ *value = rdev->config.rv770.max_tile_pipes;
+ else if (rdev->family >= CHIP_R600)
+ *value = rdev->config.r600.max_tile_pipes;
+ else {
+ return -EINVAL;
+ }
+ break;
+ case RADEON_INFO_FUSION_GART_WORKING:
+ *value = 1;
+ break;
+ case RADEON_INFO_BACKEND_MAP:
+ if (rdev->family >= CHIP_TAHITI)
+ *value = rdev->config.si.backend_map;
+ else if (rdev->family >= CHIP_CAYMAN)
+ *value = rdev->config.cayman.backend_map;
+ else if (rdev->family >= CHIP_CEDAR)
+ *value = rdev->config.evergreen.backend_map;
+ else if (rdev->family >= CHIP_RV770)
+ *value = rdev->config.rv770.backend_map;
+ else if (rdev->family >= CHIP_R600)
+ *value = rdev->config.r600.backend_map;
+ else {
+ return -EINVAL;
+ }
+ break;
+ case RADEON_INFO_VA_START:
+ /* this is where we report if vm is supported or not */
+ if (rdev->family < CHIP_CAYMAN)
+ return -EINVAL;
+ *value = RADEON_VA_RESERVED_SIZE;
+ break;
+ case RADEON_INFO_IB_VM_MAX_SIZE:
+ /* this is where we report if vm is supported or not */
+ if (rdev->family < CHIP_CAYMAN)
+ return -EINVAL;
+ *value = RADEON_IB_VM_MAX_SIZE;
+ break;
+ case RADEON_INFO_MAX_PIPES:
+ if (rdev->family >= CHIP_TAHITI)
+ *value = rdev->config.si.max_cu_per_sh;
+ else if (rdev->family >= CHIP_CAYMAN)
+ *value = rdev->config.cayman.max_pipes_per_simd;
+ else if (rdev->family >= CHIP_CEDAR)
+ *value = rdev->config.evergreen.max_pipes;
+ else if (rdev->family >= CHIP_RV770)
+ *value = rdev->config.rv770.max_pipes;
+ else if (rdev->family >= CHIP_R600)
+ *value = rdev->config.r600.max_pipes;
+ else {
+ return -EINVAL;
+ }
+ break;
+ case RADEON_INFO_TIMESTAMP:
+ if (rdev->family < CHIP_R600) {
+ DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
+ return -EINVAL;
+ }
+ value = (uint32_t*)&value64;
+ value_size = sizeof(uint64_t);
+ value64 = radeon_get_gpu_clock_counter(rdev);
+ break;
+ case RADEON_INFO_MAX_SE:
+ if (rdev->family >= CHIP_TAHITI)
+ *value = rdev->config.si.max_shader_engines;
+ else if (rdev->family >= CHIP_CAYMAN)
+ *value = rdev->config.cayman.max_shader_engines;
+ else if (rdev->family >= CHIP_CEDAR)
+ *value = rdev->config.evergreen.num_ses;
+ else
+ *value = 1;
+ break;
+ case RADEON_INFO_MAX_SH_PER_SE:
+ if (rdev->family >= CHIP_TAHITI)
+ *value = rdev->config.si.max_sh_per_se;
+ else
+ return -EINVAL;
+ break;
+ case RADEON_INFO_FASTFB_WORKING:
+ *value = rdev->fastfb_working;
+ break;
+ case RADEON_INFO_RING_WORKING:
+ if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ switch (*value) {
+ case RADEON_CS_RING_GFX:
+ case RADEON_CS_RING_COMPUTE:
+ *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
+ break;
+ case RADEON_CS_RING_DMA:
+ *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
+ *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
+ break;
+ case RADEON_CS_RING_UVD:
+ *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case RADEON_INFO_SI_TILE_MODE_ARRAY:
+ if (rdev->family < CHIP_TAHITI) {
+ DRM_DEBUG_KMS("tile mode array is si only!\n");
+ return -EINVAL;
+ }
+ value = rdev->config.si.tile_mode_array;
+ value_size = sizeof(uint32_t)*32;
+ break;
+ case RADEON_INFO_SI_CP_DMA_COMPUTE:
+ *value = 1;
+ break;
+ default:
+ DRM_DEBUG_KMS("Invalid request %d\n", info->request);
+ return -EINVAL;
+ }
+ if (DRM_COPY_TO_USER(value_ptr, (char*)value, value_size)) {
+ DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+
+/*
+ * Outdated mess for old drm with Xorg being in charge (void function now).
+ */
+/**
+ * radeon_driver_firstopen_kms - drm callback for first open
+ *
+ * @dev: drm dev pointer
+ *
+ * Nothing to be done for KMS (all asics).
+ * Returns 0 on success.
+ */
+int radeon_driver_firstopen_kms(struct drm_device *dev)
+{
+ return 0;
+}
+
+/**
+ * radeon_driver_firstopen_kms - drm callback for last close
+ *
+ * @dev: drm dev pointer
+ *
+ * Switch vga switcheroo state after last close (all asics).
+ */
+void radeon_driver_lastclose_kms(struct drm_device *dev)
+{
+ vga_switcheroo_process_delayed_switch();
+}
+
+/**
+ * radeon_driver_open_kms - drm callback for open
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device open, init vm on cayman+ (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ file_priv->driver_priv = NULL;
+
+ /* new gpu have virtual address space support */
+ if (rdev->family >= CHIP_CAYMAN) {
+ struct radeon_fpriv *fpriv;
+ struct radeon_bo_va *bo_va;
+ int r;
+
+ fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+ if (unlikely(!fpriv)) {
+ return -ENOMEM;
+ }
+
+ radeon_vm_init(rdev, &fpriv->vm);
+
+ /* map the ib pool buffer read only into
+ * virtual address space */
+ bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
+ rdev->ring_tmp_bo.bo);
+ r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+ RADEON_VM_PAGE_READABLE |
+ RADEON_VM_PAGE_SNOOPED);
+ if (r) {
+ radeon_vm_fini(rdev, &fpriv->vm);
+ kfree(fpriv);
+ return r;
+ }
+
+ file_priv->driver_priv = fpriv;
+ }
+ return 0;
+}
+
+/**
+ * radeon_driver_postclose_kms - drm callback for post close
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device post close, tear down vm on cayman+ (all asics).
+ */
+void radeon_driver_postclose_kms(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* new gpu have virtual address space support */
+ if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
+ struct radeon_fpriv *fpriv = file_priv->driver_priv;
+ struct radeon_bo_va *bo_va;
+ int r;
+
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+ if (!r) {
+ bo_va = radeon_vm_bo_find(&fpriv->vm,
+ rdev->ring_tmp_bo.bo);
+ if (bo_va)
+ radeon_vm_bo_rmv(rdev, bo_va);
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+ }
+
+ radeon_vm_fini(rdev, &fpriv->vm);
+ kfree(fpriv);
+ file_priv->driver_priv = NULL;
+ }
+}
+
+/**
+ * radeon_driver_preclose_kms - drm callback for pre close
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx
+ * (all asics).
+ */
+void radeon_driver_preclose_kms(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ if (rdev->hyperz_filp == file_priv)
+ rdev->hyperz_filp = NULL;
+ if (rdev->cmask_filp == file_priv)
+ rdev->cmask_filp = NULL;
+ radeon_uvd_free_handles(rdev, file_priv);
+}
+
+/*
+ * VBlank related functions.
+ */
+/**
+ * radeon_get_vblank_counter_kms - get frame count
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to get the frame count from
+ *
+ * Gets the frame count on the requested crtc (all asics).
+ * Returns frame count on success, -EINVAL on failure.
+ */
+u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (crtc < 0 || crtc >= rdev->num_crtc) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ return radeon_get_vblank_counter(rdev, crtc);
+}
+
+/**
+ * radeon_enable_vblank_kms - enable vblank interrupt
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to enable vblank interrupt for
+ *
+ * Enable the interrupt on the requested crtc (all asics).
+ * Returns 0 on success, -EINVAL on failure.
+ */
+int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ unsigned long irqflags;
+ int r;
+
+ if (crtc < 0 || crtc >= rdev->num_crtc) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ rdev->irq.crtc_vblank_int[crtc] = true;
+ r = radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+ return r;
+}
+
+/**
+ * radeon_disable_vblank_kms - disable vblank interrupt
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to disable vblank interrupt for
+ *
+ * Disable the interrupt on the requested crtc (all asics).
+ */
+void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ unsigned long irqflags;
+
+ if (crtc < 0 || crtc >= rdev->num_crtc) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return;
+ }
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ rdev->irq.crtc_vblank_int[crtc] = false;
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_get_vblank_timestamp_kms - get vblank timestamp
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to get the timestamp for
+ * @max_error: max error
+ * @vblank_time: time value
+ * @flags: flags passed to the driver
+ *
+ * Gets the timestamp on the requested crtc based on the
+ * scanout position. (all asics).
+ * Returns postive status flags on success, negative error on failure.
+ */
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags)
+{
+ struct drm_crtc *drmcrtc;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (crtc < 0 || crtc >= dev->num_crtcs) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ /* Get associated drm_crtc: */
+ drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
+
+ /* Helper routine in DRM core does all the work: */
+ return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
+ vblank_time, flags,
+ drmcrtc);
+}
+
+/*
+ * IOCTL.
+ */
+int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ /* Not valid in KMS. */
+ return -EINVAL;
+}
+
+#define KMS_INVALID_IOCTL(name) \
+int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
+{ \
+ DRM_ERROR("invalid ioctl with kms %s\n", __func__); \
+ return -EINVAL; \
+}
+
+/*
+ * All these ioctls are invalid in kms world.
+ */
+KMS_INVALID_IOCTL(radeon_cp_init_kms)
+KMS_INVALID_IOCTL(radeon_cp_start_kms)
+KMS_INVALID_IOCTL(radeon_cp_stop_kms)
+KMS_INVALID_IOCTL(radeon_cp_reset_kms)
+KMS_INVALID_IOCTL(radeon_cp_idle_kms)
+KMS_INVALID_IOCTL(radeon_cp_resume_kms)
+KMS_INVALID_IOCTL(radeon_engine_reset_kms)
+KMS_INVALID_IOCTL(radeon_fullscreen_kms)
+KMS_INVALID_IOCTL(radeon_cp_swap_kms)
+KMS_INVALID_IOCTL(radeon_cp_clear_kms)
+KMS_INVALID_IOCTL(radeon_cp_vertex_kms)
+KMS_INVALID_IOCTL(radeon_cp_indices_kms)
+KMS_INVALID_IOCTL(radeon_cp_texture_kms)
+KMS_INVALID_IOCTL(radeon_cp_stipple_kms)
+KMS_INVALID_IOCTL(radeon_cp_indirect_kms)
+KMS_INVALID_IOCTL(radeon_cp_vertex2_kms)
+KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms)
+KMS_INVALID_IOCTL(radeon_cp_getparam_kms)
+KMS_INVALID_IOCTL(radeon_cp_flip_kms)
+KMS_INVALID_IOCTL(radeon_mem_alloc_kms)
+KMS_INVALID_IOCTL(radeon_mem_free_kms)
+KMS_INVALID_IOCTL(radeon_mem_init_heap_kms)
+KMS_INVALID_IOCTL(radeon_irq_emit_kms)
+KMS_INVALID_IOCTL(radeon_irq_wait_kms)
+KMS_INVALID_IOCTL(radeon_cp_setparam_kms)
+KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
+KMS_INVALID_IOCTL(radeon_surface_free_kms)
+
+
+struct drm_ioctl_desc radeon_ioctls_kms[] = {
+ DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
+ /* KMS */
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
+};
+int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
new file mode 100644
index 0000000..7cb178a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -0,0 +1,1077 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include <drm/drm_fixed.h>
+#include "radeon.h"
+#include "atom.h"
+
+static void radeon_overscan_setup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0);
+ WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0);
+ WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0);
+}
+
+static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ int xres = mode->hdisplay;
+ int yres = mode->vdisplay;
+ bool hscale = true, vscale = true;
+ int hsync_wid;
+ int vsync_wid;
+ int hsync_start;
+ int blank_width;
+ u32 scale, inc, crtc_more_cntl;
+ u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active;
+ u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp;
+ u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp;
+ struct drm_display_mode *native_mode = &radeon_crtc->native_mode;
+
+ fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) &
+ (RADEON_VERT_STRETCH_RESERVED |
+ RADEON_VERT_AUTO_RATIO_INC);
+ fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) &
+ (RADEON_HORZ_FP_LOOP_STRETCH |
+ RADEON_HORZ_AUTO_RATIO_INC);
+
+ crtc_more_cntl = 0;
+ if ((rdev->family == CHIP_RS100) ||
+ (rdev->family == CHIP_RS200)) {
+ /* This is to workaround the asic bug for RMX, some versions
+ of BIOS dosen't have this register initialized correctly. */
+ crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN;
+ }
+
+
+ fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
+ | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+ hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+ if (!hsync_wid)
+ hsync_wid = 1;
+ hsync_start = mode->crtc_hsync_start - 8;
+
+ fp_h_sync_strt_wid = ((hsync_start & 0x1fff)
+ | ((hsync_wid & 0x3f) << 16)
+ | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ? RADEON_CRTC_H_SYNC_POL
+ : 0));
+
+ fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
+ | ((mode->crtc_vdisplay - 1) << 16));
+
+ vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ if (!vsync_wid)
+ vsync_wid = 1;
+
+ fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
+ | ((vsync_wid & 0x1f) << 16)
+ | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ? RADEON_CRTC_V_SYNC_POL
+ : 0));
+
+ fp_horz_vert_active = 0;
+
+ if (native_mode->hdisplay == 0 ||
+ native_mode->vdisplay == 0) {
+ hscale = false;
+ vscale = false;
+ } else {
+ if (xres > native_mode->hdisplay)
+ xres = native_mode->hdisplay;
+ if (yres > native_mode->vdisplay)
+ yres = native_mode->vdisplay;
+
+ if (xres == native_mode->hdisplay)
+ hscale = false;
+ if (yres == native_mode->vdisplay)
+ vscale = false;
+ }
+
+ switch (radeon_crtc->rmx_type) {
+ case RMX_FULL:
+ case RMX_ASPECT:
+ if (!hscale)
+ fp_horz_stretch |= ((xres/8-1) << 16);
+ else {
+ inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0;
+ scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX)
+ / native_mode->hdisplay + 1;
+ fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) |
+ RADEON_HORZ_STRETCH_BLEND |
+ RADEON_HORZ_STRETCH_ENABLE |
+ ((native_mode->hdisplay/8-1) << 16));
+ }
+
+ if (!vscale)
+ fp_vert_stretch |= ((yres-1) << 12);
+ else {
+ inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0;
+ scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX)
+ / native_mode->vdisplay + 1;
+ fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) |
+ RADEON_VERT_STRETCH_ENABLE |
+ RADEON_VERT_STRETCH_BLEND |
+ ((native_mode->vdisplay-1) << 12));
+ }
+ break;
+ case RMX_CENTER:
+ fp_horz_stretch |= ((xres/8-1) << 16);
+ fp_vert_stretch |= ((yres-1) << 12);
+
+ crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN |
+ RADEON_CRTC_AUTO_VERT_CENTER_EN);
+
+ blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8;
+ if (blank_width > 110)
+ blank_width = 110;
+
+ fp_crtc_h_total_disp = (((blank_width) & 0x3ff)
+ | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+ hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+ if (!hsync_wid)
+ hsync_wid = 1;
+
+ fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff)
+ | ((hsync_wid & 0x3f) << 16)
+ | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ? RADEON_CRTC_H_SYNC_POL
+ : 0));
+
+ fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff)
+ | ((mode->crtc_vdisplay - 1) << 16));
+
+ vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ if (!vsync_wid)
+ vsync_wid = 1;
+
+ fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff)
+ | ((vsync_wid & 0x1f) << 16)
+ | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ? RADEON_CRTC_V_SYNC_POL
+ : 0)));
+
+ fp_horz_vert_active = (((native_mode->vdisplay) & 0xfff) |
+ (((native_mode->hdisplay / 8) & 0x1ff) << 16));
+ break;
+ case RMX_OFF:
+ default:
+ fp_horz_stretch |= ((xres/8-1) << 16);
+ fp_vert_stretch |= ((yres-1) << 12);
+ break;
+ }
+
+ WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch);
+ WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch);
+ WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl);
+ WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active);
+ WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid);
+ WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid);
+ WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp);
+ WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
+}
+
+static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ int i = 0;
+
+ /* FIXME: Certain revisions of R300 can't recover here. Not sure of
+ the cause yet, but this workaround will mask the problem for now.
+ Other chips usually will pass at the very first test, so the
+ workaround shouldn't have any effect on them. */
+ for (i = 0;
+ (i < 10000 &&
+ RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
+ i++);
+}
+
+static void radeon_pll_write_update(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ while (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
+
+ WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+ RADEON_PPLL_ATOMIC_UPDATE_W,
+ ~(RADEON_PPLL_ATOMIC_UPDATE_W));
+}
+
+static void radeon_pll2_wait_for_read_update_complete(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ int i = 0;
+
+
+ /* FIXME: Certain revisions of R300 can't recover here. Not sure of
+ the cause yet, but this workaround will mask the problem for now.
+ Other chips usually will pass at the very first test, so the
+ workaround shouldn't have any effect on them. */
+ for (i = 0;
+ (i < 10000 &&
+ RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
+ i++);
+}
+
+static void radeon_pll2_write_update(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ while (RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
+
+ WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
+ RADEON_P2PLL_ATOMIC_UPDATE_W,
+ ~(RADEON_P2PLL_ATOMIC_UPDATE_W));
+}
+
+static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
+ uint16_t fb_div)
+{
+ unsigned int vcoFreq;
+
+ if (!ref_div)
+ return 1;
+
+ vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
+
+ /*
+ * This is horribly crude: the VCO frequency range is divided into
+ * 3 parts, each part having a fixed PLL gain value.
+ */
+ if (vcoFreq >= 30000)
+ /*
+ * [300..max] MHz : 7
+ */
+ return 7;
+ else if (vcoFreq >= 18000)
+ /*
+ * [180..300) MHz : 4
+ */
+ return 4;
+ else
+ /*
+ * [0..180) MHz : 1
+ */
+ return 1;
+}
+
+static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t crtc_ext_cntl = 0;
+ uint32_t mask;
+
+ if (radeon_crtc->crtc_id)
+ mask = (RADEON_CRTC2_DISP_DIS |
+ RADEON_CRTC2_VSYNC_DIS |
+ RADEON_CRTC2_HSYNC_DIS |
+ RADEON_CRTC2_DISP_REQ_EN_B);
+ else
+ mask = (RADEON_CRTC_DISPLAY_DIS |
+ RADEON_CRTC_VSYNC_DIS |
+ RADEON_CRTC_HSYNC_DIS);
+
+ /*
+ * On all dual CRTC GPUs this bit controls the CRTC of the primary DAC.
+ * Therefore it is set in the DAC DMPS function.
+ * This is different for GPU's with a single CRTC but a primary and a
+ * TV DAC: here it controls the single CRTC no matter where it is
+ * routed. Therefore we set it here.
+ */
+ if (rdev->flags & RADEON_SINGLE_CRTC)
+ crtc_ext_cntl = RADEON_CRTC_CRT_ON;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ radeon_crtc->enabled = true;
+ /* adjust pm to dpms changes BEFORE enabling crtcs */
+ radeon_pm_compute_clocks(rdev);
+ if (radeon_crtc->crtc_id)
+ WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
+ else {
+ WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
+ RADEON_CRTC_DISP_REQ_EN_B));
+ WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
+ }
+ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ radeon_crtc_load_lut(crtc);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+ if (radeon_crtc->crtc_id)
+ WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
+ else {
+ WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
+ RADEON_CRTC_DISP_REQ_EN_B));
+ WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~(mask | crtc_ext_cntl));
+ }
+ radeon_crtc->enabled = false;
+ /* adjust pm to dpms changes AFTER disabling crtcs */
+ radeon_pm_compute_clocks(rdev);
+ break;
+ }
+}
+
+int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_framebuffer *radeon_fb;
+ struct drm_framebuffer *target_fb;
+ struct drm_gem_object *obj;
+ struct radeon_bo *rbo;
+ uint64_t base;
+ uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
+ uint32_t crtc_pitch, pitch_pixels;
+ uint32_t tiling_flags;
+ int format;
+ uint32_t gen_cntl_reg, gen_cntl_val;
+ int r;
+
+ DRM_DEBUG_KMS("\n");
+ /* no fb bound */
+ if (!atomic && !crtc->fb) {
+ DRM_DEBUG_KMS("No FB bound\n");
+ return 0;
+ }
+
+ if (atomic) {
+ radeon_fb = to_radeon_framebuffer(fb);
+ target_fb = fb;
+ }
+ else {
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+ target_fb = crtc->fb;
+ }
+
+ switch (target_fb->bits_per_pixel) {
+ case 8:
+ format = 2;
+ break;
+ case 15: /* 555 */
+ format = 3;
+ break;
+ case 16: /* 565 */
+ format = 4;
+ break;
+ case 24: /* RGB */
+ format = 5;
+ break;
+ case 32: /* xRGB */
+ format = 6;
+ break;
+ default:
+ return false;
+ }
+
+ /* Pin framebuffer & get tilling informations */
+ obj = radeon_fb->obj;
+ rbo = gem_to_radeon_bo(obj);
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ /* Only 27 bit offset for legacy CRTC */
+ r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, 1 << 27,
+ &base);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ return -EINVAL;
+ }
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
+ if (tiling_flags & RADEON_TILING_MICRO)
+ DRM_ERROR("trying to scanout microtiled buffer\n");
+
+ /* if scanout was in GTT this really wouldn't work */
+ /* crtc offset is from display base addr not FB location */
+ radeon_crtc->legacy_display_base_addr = rdev->mc.vram_start;
+
+ base -= radeon_crtc->legacy_display_base_addr;
+
+ crtc_offset_cntl = 0;
+
+ pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) +
+ ((target_fb->bits_per_pixel * 8) - 1)) /
+ (target_fb->bits_per_pixel * 8));
+ crtc_pitch |= crtc_pitch << 16;
+
+ crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
+ if (tiling_flags & RADEON_TILING_MACRO) {
+ if (ASIC_IS_R300(rdev))
+ crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
+ R300_CRTC_MICRO_TILE_BUFFER_DIS |
+ R300_CRTC_MACRO_TILE_EN);
+ else
+ crtc_offset_cntl |= RADEON_CRTC_TILE_EN;
+ } else {
+ if (ASIC_IS_R300(rdev))
+ crtc_offset_cntl &= ~(R300_CRTC_X_Y_MODE_EN |
+ R300_CRTC_MICRO_TILE_BUFFER_DIS |
+ R300_CRTC_MACRO_TILE_EN);
+ else
+ crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN;
+ }
+
+ if (tiling_flags & RADEON_TILING_MACRO) {
+ if (ASIC_IS_R300(rdev)) {
+ crtc_tile_x0_y0 = x | (y << 16);
+ base &= ~0x7ff;
+ } else {
+ int byteshift = target_fb->bits_per_pixel >> 4;
+ int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11;
+ base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8);
+ crtc_offset_cntl |= (y % 16);
+ }
+ } else {
+ int offset = y * pitch_pixels + x;
+ switch (target_fb->bits_per_pixel) {
+ case 8:
+ offset *= 1;
+ break;
+ case 15:
+ case 16:
+ offset *= 2;
+ break;
+ case 24:
+ offset *= 3;
+ break;
+ case 32:
+ offset *= 4;
+ break;
+ default:
+ return false;
+ }
+ base += offset;
+ }
+
+ base &= ~7;
+
+ if (radeon_crtc->crtc_id == 1)
+ gen_cntl_reg = RADEON_CRTC2_GEN_CNTL;
+ else
+ gen_cntl_reg = RADEON_CRTC_GEN_CNTL;
+
+ gen_cntl_val = RREG32(gen_cntl_reg);
+ gen_cntl_val &= ~(0xf << 8);
+ gen_cntl_val |= (format << 8);
+ gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK;
+ WREG32(gen_cntl_reg, gen_cntl_val);
+
+ crtc_offset = (u32)base;
+
+ WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, radeon_crtc->legacy_display_base_addr);
+
+ if (ASIC_IS_R300(rdev)) {
+ if (radeon_crtc->crtc_id)
+ WREG32(R300_CRTC2_TILE_X0_Y0, crtc_tile_x0_y0);
+ else
+ WREG32(R300_CRTC_TILE_X0_Y0, crtc_tile_x0_y0);
+ }
+ WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, crtc_offset_cntl);
+ WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset);
+ WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
+
+ if (!atomic && fb && fb != crtc->fb) {
+ radeon_fb = to_radeon_framebuffer(fb);
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
+ }
+
+ /* Bytes per pixel may have changed */
+ radeon_bandwidth_update(rdev);
+
+ return 0;
+}
+
+static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_encoder *encoder;
+ int format;
+ int hsync_start;
+ int hsync_wid;
+ int vsync_wid;
+ uint32_t crtc_h_total_disp;
+ uint32_t crtc_h_sync_strt_wid;
+ uint32_t crtc_v_total_disp;
+ uint32_t crtc_v_sync_strt_wid;
+ bool is_tv = false;
+
+ DRM_DEBUG_KMS("\n");
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
+ is_tv = true;
+ DRM_INFO("crtc %d is connected to a TV\n", radeon_crtc->crtc_id);
+ break;
+ }
+ }
+ }
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ format = 2;
+ break;
+ case 15: /* 555 */
+ format = 3;
+ break;
+ case 16: /* 565 */
+ format = 4;
+ break;
+ case 24: /* RGB */
+ format = 5;
+ break;
+ case 32: /* xRGB */
+ format = 6;
+ break;
+ default:
+ return false;
+ }
+
+ crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
+ | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+ hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+ if (!hsync_wid)
+ hsync_wid = 1;
+ hsync_start = mode->crtc_hsync_start - 8;
+
+ crtc_h_sync_strt_wid = ((hsync_start & 0x1fff)
+ | ((hsync_wid & 0x3f) << 16)
+ | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ? RADEON_CRTC_H_SYNC_POL
+ : 0));
+
+ /* This works for double scan mode. */
+ crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
+ | ((mode->crtc_vdisplay - 1) << 16));
+
+ vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ if (!vsync_wid)
+ vsync_wid = 1;
+
+ crtc_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
+ | ((vsync_wid & 0x1f) << 16)
+ | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ? RADEON_CRTC_V_SYNC_POL
+ : 0));
+
+ if (radeon_crtc->crtc_id) {
+ uint32_t crtc2_gen_cntl;
+ uint32_t disp2_merge_cntl;
+
+ /* if TV DAC is enabled for another crtc and keep it enabled */
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080;
+ crtc2_gen_cntl |= ((format << 8)
+ | RADEON_CRTC2_VSYNC_DIS
+ | RADEON_CRTC2_HSYNC_DIS
+ | RADEON_CRTC2_DISP_DIS
+ | RADEON_CRTC2_DISP_REQ_EN_B
+ | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ ? RADEON_CRTC2_DBL_SCAN_EN
+ : 0)
+ | ((mode->flags & DRM_MODE_FLAG_CSYNC)
+ ? RADEON_CRTC2_CSYNC_EN
+ : 0)
+ | ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+ ? RADEON_CRTC2_INTERLACE_EN
+ : 0));
+
+ /* rs4xx chips seem to like to have the crtc enabled when the timing is set */
+ if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
+ crtc2_gen_cntl |= RADEON_CRTC2_EN;
+
+ disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
+ disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
+
+ WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl);
+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+
+ WREG32(RADEON_FP_H2_SYNC_STRT_WID, crtc_h_sync_strt_wid);
+ WREG32(RADEON_FP_V2_SYNC_STRT_WID, crtc_v_sync_strt_wid);
+ } else {
+ uint32_t crtc_gen_cntl;
+ uint32_t crtc_ext_cntl;
+ uint32_t disp_merge_cntl;
+
+ crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000;
+ crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN
+ | (format << 8)
+ | RADEON_CRTC_DISP_REQ_EN_B
+ | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ ? RADEON_CRTC_DBL_SCAN_EN
+ : 0)
+ | ((mode->flags & DRM_MODE_FLAG_CSYNC)
+ ? RADEON_CRTC_CSYNC_EN
+ : 0)
+ | ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+ ? RADEON_CRTC_INTERLACE_EN
+ : 0));
+
+ /* rs4xx chips seem to like to have the crtc enabled when the timing is set */
+ if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
+ crtc_gen_cntl |= RADEON_CRTC_EN;
+
+ crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+ crtc_ext_cntl |= (RADEON_XCRT_CNT_EN |
+ RADEON_CRTC_VSYNC_DIS |
+ RADEON_CRTC_HSYNC_DIS |
+ RADEON_CRTC_DISPLAY_DIS);
+
+ disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
+ disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
+
+ WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
+ WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
+ WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+ }
+
+ if (is_tv)
+ radeon_legacy_tv_adjust_crtc_reg(encoder, &crtc_h_total_disp,
+ &crtc_h_sync_strt_wid, &crtc_v_total_disp,
+ &crtc_v_sync_strt_wid);
+
+ WREG32(RADEON_CRTC_H_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_h_total_disp);
+ WREG32(RADEON_CRTC_H_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_h_sync_strt_wid);
+ WREG32(RADEON_CRTC_V_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_v_total_disp);
+ WREG32(RADEON_CRTC_V_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_v_sync_strt_wid);
+
+ return true;
+}
+
+static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_encoder *encoder;
+ uint32_t feedback_div = 0;
+ uint32_t frac_fb_div = 0;
+ uint32_t reference_div = 0;
+ uint32_t post_divider = 0;
+ uint32_t freq = 0;
+ uint8_t pll_gain;
+ bool use_bios_divs = false;
+ /* PLL registers */
+ uint32_t pll_ref_div = 0;
+ uint32_t pll_fb_post_div = 0;
+ uint32_t htotal_cntl = 0;
+ bool is_tv = false;
+ struct radeon_pll *pll;
+
+ struct {
+ int divider;
+ int bitvalue;
+ } *post_div, post_divs[] = {
+ /* From RAGE 128 VR/RAGE 128 GL Register
+ * Reference Manual (Technical Reference
+ * Manual P/N RRG-G04100-C Rev. 0.04), page
+ * 3-17 (PLL_DIV_[3:0]).
+ */
+ { 1, 0 }, /* VCLK_SRC */
+ { 2, 1 }, /* VCLK_SRC/2 */
+ { 4, 2 }, /* VCLK_SRC/4 */
+ { 8, 3 }, /* VCLK_SRC/8 */
+ { 3, 4 }, /* VCLK_SRC/3 */
+ { 16, 5 }, /* VCLK_SRC/16 */
+ { 6, 6 }, /* VCLK_SRC/6 */
+ { 12, 7 }, /* VCLK_SRC/12 */
+ { 0, 0 }
+ };
+
+ if (radeon_crtc->crtc_id)
+ pll = &rdev->clock.p2pll;
+ else
+ pll = &rdev->clock.p1pll;
+
+ pll->flags = RADEON_PLL_LEGACY;
+
+ if (mode->clock > 200000) /* range limits??? */
+ pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ else
+ pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
+ is_tv = true;
+ break;
+ }
+
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
+ pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
+ if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
+ if (!rdev->is_atom_bios) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+ if (lvds) {
+ if (lvds->use_bios_dividers) {
+ pll_ref_div = lvds->panel_ref_divider;
+ pll_fb_post_div = (lvds->panel_fb_divider |
+ (lvds->panel_post_divider << 16));
+ htotal_cntl = 0;
+ use_bios_divs = true;
+ }
+ }
+ }
+ pll->flags |= RADEON_PLL_USE_REF_DIV;
+ }
+ }
+ }
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!use_bios_divs) {
+ radeon_compute_pll_legacy(pll, mode->clock,
+ &freq, &feedback_div, &frac_fb_div,
+ &reference_div, &post_divider);
+
+ for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
+ if (post_div->divider == post_divider)
+ break;
+ }
+
+ if (!post_div->divider)
+ post_div = &post_divs[0];
+
+ DRM_DEBUG_KMS("dc=%u, fd=%d, rd=%d, pd=%d\n",
+ (unsigned)freq,
+ feedback_div,
+ reference_div,
+ post_divider);
+
+ pll_ref_div = reference_div;
+#if defined(__powerpc__) && (0) /* TODO */
+ /* apparently programming this otherwise causes a hang??? */
+ if (info->MacModel == RADEON_MAC_IBOOK)
+ pll_fb_post_div = 0x000600ad;
+ else
+#endif
+ pll_fb_post_div = (feedback_div | (post_div->bitvalue << 16));
+
+ htotal_cntl = mode->htotal & 0x7;
+
+ }
+
+ pll_gain = radeon_compute_pll_gain(pll->reference_freq,
+ pll_ref_div & 0x3ff,
+ pll_fb_post_div & 0x7ff);
+
+ if (radeon_crtc->crtc_id) {
+ uint32_t pixclks_cntl = ((RREG32_PLL(RADEON_PIXCLKS_CNTL) &
+ ~(RADEON_PIX2CLK_SRC_SEL_MASK)) |
+ RADEON_PIX2CLK_SRC_SEL_P2PLLCLK);
+
+ if (is_tv) {
+ radeon_legacy_tv_adjust_pll2(encoder, &htotal_cntl,
+ &pll_ref_div, &pll_fb_post_div,
+ &pixclks_cntl);
+ }
+
+ WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
+ RADEON_PIX2CLK_SRC_SEL_CPUCLK,
+ ~(RADEON_PIX2CLK_SRC_SEL_MASK));
+
+ WREG32_PLL_P(RADEON_P2PLL_CNTL,
+ RADEON_P2PLL_RESET
+ | RADEON_P2PLL_ATOMIC_UPDATE_EN
+ | ((uint32_t)pll_gain << RADEON_P2PLL_PVG_SHIFT),
+ ~(RADEON_P2PLL_RESET
+ | RADEON_P2PLL_ATOMIC_UPDATE_EN
+ | RADEON_P2PLL_PVG_MASK));
+
+ WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
+ pll_ref_div,
+ ~RADEON_P2PLL_REF_DIV_MASK);
+
+ WREG32_PLL_P(RADEON_P2PLL_DIV_0,
+ pll_fb_post_div,
+ ~RADEON_P2PLL_FB0_DIV_MASK);
+
+ WREG32_PLL_P(RADEON_P2PLL_DIV_0,
+ pll_fb_post_div,
+ ~RADEON_P2PLL_POST0_DIV_MASK);
+
+ radeon_pll2_write_update(dev);
+ radeon_pll2_wait_for_read_update_complete(dev);
+
+ WREG32_PLL(RADEON_HTOTAL2_CNTL, htotal_cntl);
+
+ WREG32_PLL_P(RADEON_P2PLL_CNTL,
+ 0,
+ ~(RADEON_P2PLL_RESET
+ | RADEON_P2PLL_SLEEP
+ | RADEON_P2PLL_ATOMIC_UPDATE_EN));
+
+ DRM_DEBUG_KMS("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
+ (unsigned)pll_ref_div,
+ (unsigned)pll_fb_post_div,
+ (unsigned)htotal_cntl,
+ RREG32_PLL(RADEON_P2PLL_CNTL));
+ DRM_DEBUG_KMS("Wrote2: rd=%u, fd=%u, pd=%u\n",
+ (unsigned)pll_ref_div & RADEON_P2PLL_REF_DIV_MASK,
+ (unsigned)pll_fb_post_div & RADEON_P2PLL_FB0_DIV_MASK,
+ (unsigned)((pll_fb_post_div &
+ RADEON_P2PLL_POST0_DIV_MASK) >> 16));
+
+ mdelay(50); /* Let the clock to lock */
+
+ WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
+ RADEON_PIX2CLK_SRC_SEL_P2PLLCLK,
+ ~(RADEON_PIX2CLK_SRC_SEL_MASK));
+
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+ } else {
+ uint32_t pixclks_cntl;
+
+
+ if (is_tv) {
+ pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+ radeon_legacy_tv_adjust_pll1(encoder, &htotal_cntl, &pll_ref_div,
+ &pll_fb_post_div, &pixclks_cntl);
+ }
+
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ /* A temporal workaround for the occasional blanking on certain laptop panels.
+ This appears to related to the PLL divider registers (fail to lock?).
+ It occurs even when all dividers are the same with their old settings.
+ In this case we really don't need to fiddle with PLL registers.
+ By doing this we can avoid the blanking problem with some panels.
+ */
+ if ((pll_ref_div == (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_REF_DIV_MASK)) &&
+ (pll_fb_post_div == (RREG32_PLL(RADEON_PPLL_DIV_3) &
+ (RADEON_PPLL_POST3_DIV_MASK | RADEON_PPLL_FB3_DIV_MASK)))) {
+ WREG32_P(RADEON_CLOCK_CNTL_INDEX,
+ RADEON_PLL_DIV_SEL,
+ ~(RADEON_PLL_DIV_SEL));
+ r100_pll_errata_after_index(rdev);
+ return;
+ }
+ }
+
+ WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
+ RADEON_VCLK_SRC_SEL_CPUCLK,
+ ~(RADEON_VCLK_SRC_SEL_MASK));
+ WREG32_PLL_P(RADEON_PPLL_CNTL,
+ RADEON_PPLL_RESET
+ | RADEON_PPLL_ATOMIC_UPDATE_EN
+ | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
+ | ((uint32_t)pll_gain << RADEON_PPLL_PVG_SHIFT),
+ ~(RADEON_PPLL_RESET
+ | RADEON_PPLL_ATOMIC_UPDATE_EN
+ | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
+ | RADEON_PPLL_PVG_MASK));
+
+ WREG32_P(RADEON_CLOCK_CNTL_INDEX,
+ RADEON_PLL_DIV_SEL,
+ ~(RADEON_PLL_DIV_SEL));
+ r100_pll_errata_after_index(rdev);
+
+ if (ASIC_IS_R300(rdev) ||
+ (rdev->family == CHIP_RS300) ||
+ (rdev->family == CHIP_RS400) ||
+ (rdev->family == CHIP_RS480)) {
+ if (pll_ref_div & R300_PPLL_REF_DIV_ACC_MASK) {
+ /* When restoring console mode, use saved PPLL_REF_DIV
+ * setting.
+ */
+ WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+ pll_ref_div,
+ 0);
+ } else {
+ /* R300 uses ref_div_acc field as real ref divider */
+ WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+ (pll_ref_div << R300_PPLL_REF_DIV_ACC_SHIFT),
+ ~R300_PPLL_REF_DIV_ACC_MASK);
+ }
+ } else
+ WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+ pll_ref_div,
+ ~RADEON_PPLL_REF_DIV_MASK);
+
+ WREG32_PLL_P(RADEON_PPLL_DIV_3,
+ pll_fb_post_div,
+ ~RADEON_PPLL_FB3_DIV_MASK);
+
+ WREG32_PLL_P(RADEON_PPLL_DIV_3,
+ pll_fb_post_div,
+ ~RADEON_PPLL_POST3_DIV_MASK);
+
+ radeon_pll_write_update(dev);
+ radeon_pll_wait_for_read_update_complete(dev);
+
+ WREG32_PLL(RADEON_HTOTAL_CNTL, htotal_cntl);
+
+ WREG32_PLL_P(RADEON_PPLL_CNTL,
+ 0,
+ ~(RADEON_PPLL_RESET
+ | RADEON_PPLL_SLEEP
+ | RADEON_PPLL_ATOMIC_UPDATE_EN
+ | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN));
+
+ DRM_DEBUG_KMS("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
+ pll_ref_div,
+ pll_fb_post_div,
+ (unsigned)htotal_cntl,
+ RREG32_PLL(RADEON_PPLL_CNTL));
+ DRM_DEBUG_KMS("Wrote: rd=%d, fd=%d, pd=%d\n",
+ pll_ref_div & RADEON_PPLL_REF_DIV_MASK,
+ pll_fb_post_div & RADEON_PPLL_FB3_DIV_MASK,
+ (pll_fb_post_div & RADEON_PPLL_POST3_DIV_MASK) >> 16);
+
+ mdelay(50); /* Let the clock to lock */
+
+ WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
+ RADEON_VCLK_SRC_SEL_PPLLCLK,
+ ~(RADEON_VCLK_SRC_SEL_MASK));
+
+ if (is_tv)
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+ }
+}
+
+static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+ return false;
+ return true;
+}
+
+static int radeon_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ /* TODO TV */
+ radeon_crtc_set_base(crtc, x, y, old_fb);
+ radeon_set_crtc_timing(crtc, adjusted_mode);
+ radeon_set_pll(crtc, adjusted_mode);
+ radeon_overscan_setup(crtc, adjusted_mode);
+ if (radeon_crtc->crtc_id == 0) {
+ radeon_legacy_rmx_mode_set(crtc, adjusted_mode);
+ } else {
+ if (radeon_crtc->rmx_type != RMX_OFF) {
+ /* FIXME: only first crtc has rmx what should we
+ * do ?
+ */
+ DRM_ERROR("Mode need scaling but only first crtc can do that.\n");
+ }
+ }
+ return 0;
+}
+
+static void radeon_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *crtci;
+
+ /*
+ * The hardware wedges sometimes if you reconfigure one CRTC
+ * whilst another is running (see fdo bug #24611).
+ */
+ list_for_each_entry(crtci, &dev->mode_config.crtc_list, head)
+ radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *crtci;
+
+ /*
+ * Reenable the CRTCs that should be running.
+ */
+ list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) {
+ if (crtci->enabled)
+ radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
+ }
+}
+
+static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
+ .dpms = radeon_crtc_dpms,
+ .mode_fixup = radeon_crtc_mode_fixup,
+ .mode_set = radeon_crtc_mode_set,
+ .mode_set_base = radeon_crtc_set_base,
+ .mode_set_base_atomic = radeon_crtc_set_base_atomic,
+ .prepare = radeon_crtc_prepare,
+ .commit = radeon_crtc_commit,
+ .load_lut = radeon_crtc_load_lut,
+};
+
+
+void radeon_legacy_init_crtc(struct drm_device *dev,
+ struct radeon_crtc *radeon_crtc)
+{
+ if (radeon_crtc->crtc_id == 1)
+ radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP;
+ drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
new file mode 100644
index 0000000..62cd512
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -0,0 +1,1810 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+#include <linux/backlight.h>
+#ifdef CONFIG_PMAC_BACKLIGHT
+#include <asm/backlight.h>
+#endif
+
+static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder_helper_funcs *encoder_funcs;
+
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+ radeon_encoder->active_device = 0;
+}
+
+static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
+ int panel_pwr_delay = 2000;
+ bool is_mac = false;
+ uint8_t backlight_level;
+ DRM_DEBUG_KMS("\n");
+
+ lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ backlight_level = (lvds_gen_cntl >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+ if (radeon_encoder->enc_priv) {
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+ panel_pwr_delay = lvds->panel_pwr_delay;
+ if (lvds->bl_dev)
+ backlight_level = lvds->backlight_level;
+ } else {
+ struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+ panel_pwr_delay = lvds->panel_pwr_delay;
+ if (lvds->bl_dev)
+ backlight_level = lvds->backlight_level;
+ }
+ }
+
+ /* macs (and possibly some x86 oem systems?) wire up LVDS strangely
+ * Taken from radeonfb.
+ */
+ if ((rdev->mode_info.connector_table == CT_IBOOK) ||
+ (rdev->mode_info.connector_table == CT_POWERBOOK_EXTERNAL) ||
+ (rdev->mode_info.connector_table == CT_POWERBOOK_INTERNAL) ||
+ (rdev->mode_info.connector_table == CT_POWERBOOK_VGA))
+ is_mac = true;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN);
+ disp_pwr_man |= RADEON_AUTO_PWRUP_EN;
+ WREG32(RADEON_DISP_PWR_MAN, disp_pwr_man);
+ lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+ lvds_pll_cntl |= RADEON_LVDS_PLL_EN;
+ WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+ mdelay(1);
+
+ lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+ lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
+ WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+
+ lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS |
+ RADEON_LVDS_BL_MOD_LEVEL_MASK);
+ lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN |
+ RADEON_LVDS_DIGON | RADEON_LVDS_BLON |
+ (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT));
+ if (is_mac)
+ lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
+ mdelay(panel_pwr_delay);
+ WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+ WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
+ lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
+ if (is_mac) {
+ lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN;
+ WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+ lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_EN);
+ } else {
+ WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+ lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
+ }
+ mdelay(panel_pwr_delay);
+ WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+ mdelay(panel_pwr_delay);
+ break;
+ }
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ else
+ radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ DRM_DEBUG("\n");
+
+ if (radeon_encoder->enc_priv) {
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+ lvds->dpms_mode = mode;
+ } else {
+ struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+ lvds->dpms_mode = mode;
+ }
+ }
+
+ radeon_legacy_lvds_update(encoder, mode);
+}
+
+static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, true);
+ else
+ radeon_combios_output_lock(encoder, true);
+ radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_lvds_commit(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_ON);
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, false);
+ else
+ radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t lvds_pll_cntl, lvds_gen_cntl, lvds_ss_gen_cntl;
+
+ DRM_DEBUG_KMS("\n");
+
+ lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+ lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
+
+ lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
+ if (rdev->is_atom_bios) {
+ /* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl
+ * need to call that on resume to set up the reg properly.
+ */
+ radeon_encoder->pixel_clock = adjusted_mode->clock;
+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+ lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ } else {
+ struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+ if (lvds) {
+ DRM_DEBUG_KMS("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
+ lvds_gen_cntl = lvds->lvds_gen_cntl;
+ lvds_ss_gen_cntl &= ~((0xf << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
+ (0xf << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
+ lvds_ss_gen_cntl |= ((lvds->panel_digon_delay << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
+ (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
+ } else
+ lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ }
+ lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
+ lvds_gen_cntl &= ~(RADEON_LVDS_ON |
+ RADEON_LVDS_BLON |
+ RADEON_LVDS_EN |
+ RADEON_LVDS_RST_FM);
+
+ if (ASIC_IS_R300(rdev))
+ lvds_pll_cntl &= ~(R300_LVDS_SRC_SEL_MASK);
+
+ if (radeon_crtc->crtc_id == 0) {
+ if (ASIC_IS_R300(rdev)) {
+ if (radeon_encoder->rmx_type != RMX_OFF)
+ lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX;
+ } else
+ lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2;
+ } else {
+ if (ASIC_IS_R300(rdev))
+ lvds_pll_cntl |= R300_LVDS_SRC_SEL_CRTC2;
+ else
+ lvds_gen_cntl |= RADEON_LVDS_SEL_CRTC2;
+ }
+
+ WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+ WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+ WREG32(RADEON_LVDS_SS_GEN_CNTL, lvds_ss_gen_cntl);
+
+ if (rdev->family == CHIP_RV410)
+ WREG32(RADEON_CLOCK_CNTL_INDEX, 0);
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+ else
+ radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ /* set the active encoder to connector routing */
+ radeon_encoder_set_active_device(encoder);
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ /* get the native mode for LVDS */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+ radeon_panel_mode_fixup(encoder, adjusted_mode);
+
+ return true;
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
+ .dpms = radeon_legacy_lvds_dpms,
+ .mode_fixup = radeon_legacy_mode_fixup,
+ .prepare = radeon_legacy_lvds_prepare,
+ .mode_set = radeon_legacy_lvds_mode_set,
+ .commit = radeon_legacy_lvds_commit,
+ .disable = radeon_legacy_encoder_disable,
+};
+
+u8
+radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ u8 backlight_level;
+
+ backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+ RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+ return backlight_level;
+}
+
+void
+radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int dpms_mode = DRM_MODE_DPMS_ON;
+
+ if (radeon_encoder->enc_priv) {
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+ if (lvds->backlight_level > 0)
+ dpms_mode = lvds->dpms_mode;
+ else
+ dpms_mode = DRM_MODE_DPMS_OFF;
+ lvds->backlight_level = level;
+ } else {
+ struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+ if (lvds->backlight_level > 0)
+ dpms_mode = lvds->dpms_mode;
+ else
+ dpms_mode = DRM_MODE_DPMS_OFF;
+ lvds->backlight_level = level;
+ }
+ }
+
+ radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
+{
+ struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+ uint8_t level;
+
+ /* Convert brightness to hardware level */
+ if (bd->props.brightness < 0)
+ level = 0;
+ else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
+ level = RADEON_MAX_BL_LEVEL;
+ else
+ level = bd->props.brightness;
+
+ if (pdata->negative)
+ level = RADEON_MAX_BL_LEVEL - level;
+
+ return level;
+}
+
+static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
+{
+ struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+ struct radeon_encoder *radeon_encoder = pdata->encoder;
+
+ radeon_legacy_set_backlight_level(radeon_encoder,
+ radeon_legacy_lvds_level(bd));
+
+ return 0;
+}
+
+static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd)
+{
+ struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+ struct radeon_encoder *radeon_encoder = pdata->encoder;
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint8_t backlight_level;
+
+ backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+ RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+ return pdata->negative ? RADEON_MAX_BL_LEVEL - backlight_level : backlight_level;
+}
+
+static const struct backlight_ops radeon_backlight_ops = {
+ .get_brightness = radeon_legacy_backlight_get_brightness,
+ .update_status = radeon_legacy_backlight_update_status,
+};
+
+void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
+ struct drm_connector *drm_connector)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct backlight_device *bd;
+ struct backlight_properties props;
+ struct radeon_backlight_privdata *pdata;
+ uint8_t backlight_level;
+ char bl_name[16];
+
+ if (!radeon_encoder->enc_priv)
+ return;
+
+#ifdef CONFIG_PMAC_BACKLIGHT
+ if (!pmac_has_backlight_type("ati") &&
+ !pmac_has_backlight_type("mnca"))
+ return;
+#endif
+
+ pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
+ if (!pdata) {
+ DRM_ERROR("Memory allocation failed\n");
+ goto error;
+ }
+
+ memset(&props, 0, sizeof(props));
+ props.max_brightness = RADEON_MAX_BL_LEVEL;
+ props.type = BACKLIGHT_RAW;
+ snprintf(bl_name, sizeof(bl_name),
+ "radeon_bl%d", dev->primary->index);
+ bd = backlight_device_register(bl_name, &drm_connector->kdev,
+ pdata, &radeon_backlight_ops, &props);
+ if (IS_ERR(bd)) {
+ DRM_ERROR("Backlight registration failed\n");
+ goto error;
+ }
+
+ pdata->encoder = radeon_encoder;
+
+ backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+ RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+ /* First, try to detect backlight level sense based on the assumption
+ * that firmware set it up at full brightness
+ */
+ if (backlight_level == 0)
+ pdata->negative = true;
+ else if (backlight_level == 0xff)
+ pdata->negative = false;
+ else {
+ /* XXX hack... maybe some day we can figure out in what direction
+ * backlight should work on a given panel?
+ */
+ pdata->negative = (rdev->family != CHIP_RV200 &&
+ rdev->family != CHIP_RV250 &&
+ rdev->family != CHIP_RV280 &&
+ rdev->family != CHIP_RV350);
+
+#ifdef CONFIG_PMAC_BACKLIGHT
+ pdata->negative = (pdata->negative ||
+ of_machine_is_compatible("PowerBook4,3") ||
+ of_machine_is_compatible("PowerBook6,3") ||
+ of_machine_is_compatible("PowerBook6,5"));
+#endif
+ }
+
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+ lvds->bl_dev = bd;
+ } else {
+ struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+ lvds->bl_dev = bd;
+ }
+
+ bd->props.brightness = radeon_legacy_backlight_get_brightness(bd);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
+
+ DRM_INFO("radeon legacy LVDS backlight initialized\n");
+
+ return;
+
+error:
+ kfree(pdata);
+ return;
+}
+
+static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct backlight_device *bd = NULL;
+
+ if (!radeon_encoder->enc_priv)
+ return;
+
+ if (rdev->is_atom_bios) {
+ struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+ bd = lvds->bl_dev;
+ lvds->bl_dev = NULL;
+ } else {
+ struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+ bd = lvds->bl_dev;
+ lvds->bl_dev = NULL;
+ }
+
+ if (bd) {
+ struct radeon_backlight_privdata *pdata;
+
+ pdata = bl_get_data(bd);
+ backlight_device_unregister(bd);
+ kfree(pdata);
+
+ DRM_INFO("radeon legacy LVDS backlight unloaded\n");
+ }
+}
+
+#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+void radeon_legacy_backlight_init(struct radeon_encoder *encoder)
+{
+}
+
+static void radeon_legacy_backlight_exit(struct radeon_encoder *encoder)
+{
+}
+
+#endif
+
+
+static void radeon_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->enc_priv) {
+ radeon_legacy_backlight_exit(radeon_encoder);
+ kfree(radeon_encoder->enc_priv);
+ }
+ drm_encoder_cleanup(encoder);
+ kfree(radeon_encoder);
+}
+
+static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
+ .destroy = radeon_lvds_enc_destroy,
+};
+
+static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+ uint32_t dac_cntl = RREG32(RADEON_DAC_CNTL);
+ uint32_t dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+
+ DRM_DEBUG_KMS("\n");
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ crtc_ext_cntl |= RADEON_CRTC_CRT_ON;
+ dac_cntl &= ~RADEON_DAC_PDWN;
+ dac_macro_cntl &= ~(RADEON_DAC_PDWN_R |
+ RADEON_DAC_PDWN_G |
+ RADEON_DAC_PDWN_B);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ crtc_ext_cntl &= ~RADEON_CRTC_CRT_ON;
+ dac_cntl |= RADEON_DAC_PDWN;
+ dac_macro_cntl |= (RADEON_DAC_PDWN_R |
+ RADEON_DAC_PDWN_G |
+ RADEON_DAC_PDWN_B);
+ break;
+ }
+
+ /* handled in radeon_crtc_dpms() */
+ if (!(rdev->flags & RADEON_SINGLE_CRTC))
+ WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+ WREG32(RADEON_DAC_CNTL, dac_cntl);
+ WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ else
+ radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, true);
+ else
+ radeon_combios_output_lock(encoder, true);
+ radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, false);
+ else
+ radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t disp_output_cntl, dac_cntl, dac2_cntl, dac_macro_cntl;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (radeon_crtc->crtc_id == 0) {
+ if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
+ disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
+ ~(RADEON_DISP_DAC_SOURCE_MASK);
+ WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+ } else {
+ dac2_cntl = RREG32(RADEON_DAC_CNTL2) & ~(RADEON_DAC2_DAC_CLK_SEL);
+ WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+ }
+ } else {
+ if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
+ disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
+ ~(RADEON_DISP_DAC_SOURCE_MASK);
+ disp_output_cntl |= RADEON_DISP_DAC_SOURCE_CRTC2;
+ WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+ } else {
+ dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC_CLK_SEL;
+ WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+ }
+ }
+
+ dac_cntl = (RADEON_DAC_MASK_ALL |
+ RADEON_DAC_VGA_ADR_EN |
+ /* TODO 6-bits */
+ RADEON_DAC_8BIT_EN);
+
+ WREG32_P(RADEON_DAC_CNTL,
+ dac_cntl,
+ RADEON_DAC_RANGE_CNTL |
+ RADEON_DAC_BLANKING);
+
+ if (radeon_encoder->enc_priv) {
+ struct radeon_encoder_primary_dac *p_dac = (struct radeon_encoder_primary_dac *)radeon_encoder->enc_priv;
+ dac_macro_cntl = p_dac->ps2_pdac_adj;
+ } else
+ dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+ dac_macro_cntl |= RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B;
+ WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+ else
+ radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t vclk_ecp_cntl, crtc_ext_cntl;
+ uint32_t dac_ext_cntl, dac_cntl, dac_macro_cntl, tmp;
+ enum drm_connector_status found = connector_status_disconnected;
+ bool color = true;
+
+ /* just don't bother on RN50 those chip are often connected to remoting
+ * console hw and often we get failure to load detect those. So to make
+ * everyone happy report the encoder as always connected.
+ */
+ if (ASIC_IS_RN50(rdev)) {
+ return connector_status_connected;
+ }
+
+ /* save the regs we need */
+ vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+ dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
+ dac_cntl = RREG32(RADEON_DAC_CNTL);
+ dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+
+ tmp = vclk_ecp_cntl &
+ ~(RADEON_PIXCLK_ALWAYS_ONb | RADEON_PIXCLK_DAC_ALWAYS_ONb);
+ WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+ tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON;
+ WREG32(RADEON_CRTC_EXT_CNTL, tmp);
+
+ tmp = RADEON_DAC_FORCE_BLANK_OFF_EN |
+ RADEON_DAC_FORCE_DATA_EN;
+
+ if (color)
+ tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB;
+ else
+ tmp |= RADEON_DAC_FORCE_DATA_SEL_G;
+
+ if (ASIC_IS_R300(rdev))
+ tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
+ else if (ASIC_IS_RV100(rdev))
+ tmp |= (0x1ac << RADEON_DAC_FORCE_DATA_SHIFT);
+ else
+ tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
+
+ WREG32(RADEON_DAC_EXT_CNTL, tmp);
+
+ tmp = dac_cntl & ~(RADEON_DAC_RANGE_CNTL_MASK | RADEON_DAC_PDWN);
+ tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN;
+ WREG32(RADEON_DAC_CNTL, tmp);
+
+ tmp = dac_macro_cntl;
+ tmp &= ~(RADEON_DAC_PDWN_R |
+ RADEON_DAC_PDWN_G |
+ RADEON_DAC_PDWN_B);
+
+ WREG32(RADEON_DAC_MACRO_CNTL, tmp);
+
+ mdelay(2);
+
+ if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT)
+ found = connector_status_connected;
+
+ /* restore the regs we used */
+ WREG32(RADEON_DAC_CNTL, dac_cntl);
+ WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+ WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
+ WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+ WREG32_PLL(RADEON_VCLK_ECP_CNTL, vclk_ecp_cntl);
+
+ return found;
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
+ .dpms = radeon_legacy_primary_dac_dpms,
+ .mode_fixup = radeon_legacy_mode_fixup,
+ .prepare = radeon_legacy_primary_dac_prepare,
+ .mode_set = radeon_legacy_primary_dac_mode_set,
+ .commit = radeon_legacy_primary_dac_commit,
+ .detect = radeon_legacy_primary_dac_detect,
+ .disable = radeon_legacy_encoder_disable,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = {
+ .destroy = radeon_enc_destroy,
+};
+
+static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t fp_gen_cntl = RREG32(RADEON_FP_GEN_CNTL);
+ DRM_DEBUG_KMS("\n");
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ fp_gen_cntl |= (RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+ break;
+ }
+
+ WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ else
+ radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, true);
+ else
+ radeon_combios_output_lock(encoder, true);
+ radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, true);
+ else
+ radeon_combios_output_lock(encoder, true);
+}
+
+static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t tmp, tmds_pll_cntl, tmds_transmitter_cntl, fp_gen_cntl;
+ int i;
+
+ DRM_DEBUG_KMS("\n");
+
+ tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL);
+ tmp &= 0xfffff;
+ if (rdev->family == CHIP_RV280) {
+ /* bit 22 of TMDS_PLL_CNTL is read-back inverted */
+ tmp ^= (1 << 22);
+ tmds_pll_cntl ^= (1 << 22);
+ }
+
+ if (radeon_encoder->enc_priv) {
+ struct radeon_encoder_int_tmds *tmds = (struct radeon_encoder_int_tmds *)radeon_encoder->enc_priv;
+
+ for (i = 0; i < 4; i++) {
+ if (tmds->tmds_pll[i].freq == 0)
+ break;
+ if ((uint32_t)(mode->clock / 10) < tmds->tmds_pll[i].freq) {
+ tmp = tmds->tmds_pll[i].value ;
+ break;
+ }
+ }
+ }
+
+ if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV280)) {
+ if (tmp & 0xfff00000)
+ tmds_pll_cntl = tmp;
+ else {
+ tmds_pll_cntl &= 0xfff00000;
+ tmds_pll_cntl |= tmp;
+ }
+ } else
+ tmds_pll_cntl = tmp;
+
+ tmds_transmitter_cntl = RREG32(RADEON_TMDS_TRANSMITTER_CNTL) &
+ ~(RADEON_TMDS_TRANSMITTER_PLLRST);
+
+ if (rdev->family == CHIP_R200 ||
+ rdev->family == CHIP_R100 ||
+ ASIC_IS_R300(rdev))
+ tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN);
+ else /* RV chips got this bit reversed */
+ tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN;
+
+ fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) |
+ (RADEON_FP_CRTC_DONT_SHADOW_VPAR |
+ RADEON_FP_CRTC_DONT_SHADOW_HEND));
+
+ fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+
+ fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN |
+ RADEON_FP_DFP_SYNC_SEL |
+ RADEON_FP_CRT_SYNC_SEL |
+ RADEON_FP_CRTC_LOCK_8DOT |
+ RADEON_FP_USE_SHADOW_EN |
+ RADEON_FP_CRTC_USE_SHADOW_VEND |
+ RADEON_FP_CRT_SYNC_ALT);
+
+ if (1) /* FIXME rgbBits == 8 */
+ fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */
+ else
+ fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */
+
+ if (radeon_crtc->crtc_id == 0) {
+ if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+ fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+ if (radeon_encoder->rmx_type != RMX_OFF)
+ fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
+ else
+ fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
+ } else
+ fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2;
+ } else {
+ if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+ fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+ fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2;
+ } else
+ fp_gen_cntl |= RADEON_FP_SEL_CRTC2;
+ }
+
+ WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl);
+ WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl);
+ WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+ else
+ radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
+ .dpms = radeon_legacy_tmds_int_dpms,
+ .mode_fixup = radeon_legacy_mode_fixup,
+ .prepare = radeon_legacy_tmds_int_prepare,
+ .mode_set = radeon_legacy_tmds_int_mode_set,
+ .commit = radeon_legacy_tmds_int_commit,
+ .disable = radeon_legacy_encoder_disable,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = {
+ .destroy = radeon_enc_destroy,
+};
+
+static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+ DRM_DEBUG_KMS("\n");
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ fp2_gen_cntl &= ~RADEON_FP2_BLANK_EN;
+ fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ fp2_gen_cntl |= RADEON_FP2_BLANK_EN;
+ fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+ break;
+ }
+
+ WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ else
+ radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, true);
+ else
+ radeon_combios_output_lock(encoder, true);
+ radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, false);
+ else
+ radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t fp2_gen_cntl;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (rdev->is_atom_bios) {
+ radeon_encoder->pixel_clock = adjusted_mode->clock;
+ atombios_dvo_setup(encoder, ATOM_ENABLE);
+ fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+ } else {
+ fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+
+ if (1) /* FIXME rgbBits == 8 */
+ fp2_gen_cntl |= RADEON_FP2_PANEL_FORMAT; /* 24 bit format, */
+ else
+ fp2_gen_cntl &= ~RADEON_FP2_PANEL_FORMAT;/* 18 bit format, */
+
+ fp2_gen_cntl &= ~(RADEON_FP2_ON |
+ RADEON_FP2_DVO_EN |
+ RADEON_FP2_DVO_RATE_SEL_SDR);
+
+ /* XXX: these are oem specific */
+ if (ASIC_IS_R300(rdev)) {
+ if ((dev->pdev->device == 0x4850) &&
+ (dev->pdev->subsystem_vendor == 0x1028) &&
+ (dev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */
+ fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE;
+ else
+ fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE;
+
+ /*if (mode->clock > 165000)
+ fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
+ }
+ if (!radeon_combios_external_tmds_setup(encoder))
+ radeon_external_tmds_setup(encoder);
+ }
+
+ if (radeon_crtc->crtc_id == 0) {
+ if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
+ fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
+ if (radeon_encoder->rmx_type != RMX_OFF)
+ fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX;
+ else
+ fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1;
+ } else
+ fp2_gen_cntl &= ~RADEON_FP2_SRC_SEL_CRTC2;
+ } else {
+ if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
+ fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
+ fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
+ } else
+ fp2_gen_cntl |= RADEON_FP2_SRC_SEL_CRTC2;
+ }
+
+ WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+ else
+ radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ /* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
+ kfree(radeon_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(radeon_encoder);
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
+ .dpms = radeon_legacy_tmds_ext_dpms,
+ .mode_fixup = radeon_legacy_mode_fixup,
+ .prepare = radeon_legacy_tmds_ext_prepare,
+ .mode_set = radeon_legacy_tmds_ext_mode_set,
+ .commit = radeon_legacy_tmds_ext_commit,
+ .disable = radeon_legacy_encoder_disable,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
+ .destroy = radeon_ext_tmds_enc_destroy,
+};
+
+static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0;
+ uint32_t tv_master_cntl = 0;
+ bool is_tv;
+ DRM_DEBUG_KMS("\n");
+
+ is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false;
+
+ if (rdev->family == CHIP_R200)
+ fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+ else {
+ if (is_tv)
+ tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL);
+ else
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+ tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+ }
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (rdev->family == CHIP_R200) {
+ fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+ } else {
+ if (is_tv)
+ tv_master_cntl |= RADEON_TV_ON;
+ else
+ crtc2_gen_cntl |= RADEON_CRTC2_CRT2_ON;
+
+ if (rdev->family == CHIP_R420 ||
+ rdev->family == CHIP_R423 ||
+ rdev->family == CHIP_RV410)
+ tv_dac_cntl &= ~(R420_TV_DAC_RDACPD |
+ R420_TV_DAC_GDACPD |
+ R420_TV_DAC_BDACPD |
+ RADEON_TV_DAC_BGSLEEP);
+ else
+ tv_dac_cntl &= ~(RADEON_TV_DAC_RDACPD |
+ RADEON_TV_DAC_GDACPD |
+ RADEON_TV_DAC_BDACPD |
+ RADEON_TV_DAC_BGSLEEP);
+ }
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (rdev->family == CHIP_R200)
+ fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+ else {
+ if (is_tv)
+ tv_master_cntl &= ~RADEON_TV_ON;
+ else
+ crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON;
+
+ if (rdev->family == CHIP_R420 ||
+ rdev->family == CHIP_R423 ||
+ rdev->family == CHIP_RV410)
+ tv_dac_cntl |= (R420_TV_DAC_RDACPD |
+ R420_TV_DAC_GDACPD |
+ R420_TV_DAC_BDACPD |
+ RADEON_TV_DAC_BGSLEEP);
+ else
+ tv_dac_cntl |= (RADEON_TV_DAC_RDACPD |
+ RADEON_TV_DAC_GDACPD |
+ RADEON_TV_DAC_BDACPD |
+ RADEON_TV_DAC_BGSLEEP);
+ }
+ break;
+ }
+
+ if (rdev->family == CHIP_R200) {
+ WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+ } else {
+ if (is_tv)
+ WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
+ /* handled in radeon_crtc_dpms() */
+ else if (!(rdev->flags & RADEON_SINGLE_CRTC))
+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+ WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+ }
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ else
+ radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, true);
+ else
+ radeon_combios_output_lock(encoder, true);
+ radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+
+ radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ if (rdev->is_atom_bios)
+ radeon_atom_output_lock(encoder, true);
+ else
+ radeon_combios_output_lock(encoder, true);
+}
+
+static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+ uint32_t tv_dac_cntl, gpiopad_a = 0, dac2_cntl, disp_output_cntl = 0;
+ uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0, disp_tv_out_cntl = 0;
+ bool is_tv = false;
+
+ DRM_DEBUG_KMS("\n");
+
+ is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false;
+
+ if (rdev->family != CHIP_R200) {
+ tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+ if (rdev->family == CHIP_R420 ||
+ rdev->family == CHIP_R423 ||
+ rdev->family == CHIP_RV410) {
+ tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
+ RADEON_TV_DAC_BGADJ_MASK |
+ R420_TV_DAC_DACADJ_MASK |
+ R420_TV_DAC_RDACPD |
+ R420_TV_DAC_GDACPD |
+ R420_TV_DAC_BDACPD |
+ R420_TV_DAC_TVENABLE);
+ } else {
+ tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
+ RADEON_TV_DAC_BGADJ_MASK |
+ RADEON_TV_DAC_DACADJ_MASK |
+ RADEON_TV_DAC_RDACPD |
+ RADEON_TV_DAC_GDACPD |
+ RADEON_TV_DAC_BDACPD);
+ }
+
+ tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD;
+
+ if (is_tv) {
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J ||
+ tv_dac->tv_std == TV_STD_PAL_M ||
+ tv_dac->tv_std == TV_STD_PAL_60)
+ tv_dac_cntl |= tv_dac->ntsc_tvdac_adj;
+ else
+ tv_dac_cntl |= tv_dac->pal_tvdac_adj;
+
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J)
+ tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC;
+ else
+ tv_dac_cntl |= RADEON_TV_DAC_STD_PAL;
+ } else
+ tv_dac_cntl |= (RADEON_TV_DAC_STD_PS2 |
+ tv_dac->ps2_tvdac_adj);
+
+ WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+ }
+
+ if (ASIC_IS_R300(rdev)) {
+ gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1;
+ disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+ } else if (rdev->family != CHIP_R200)
+ disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+ else if (rdev->family == CHIP_R200)
+ fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+
+ if (rdev->family >= CHIP_R200)
+ disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL);
+
+ if (is_tv) {
+ uint32_t dac_cntl;
+
+ dac_cntl = RREG32(RADEON_DAC_CNTL);
+ dac_cntl &= ~RADEON_DAC_TVO_EN;
+ WREG32(RADEON_DAC_CNTL, dac_cntl);
+
+ if (ASIC_IS_R300(rdev))
+ gpiopad_a = RREG32(RADEON_GPIOPAD_A) & ~1;
+
+ dac2_cntl = RREG32(RADEON_DAC_CNTL2) & ~RADEON_DAC2_DAC2_CLK_SEL;
+ if (radeon_crtc->crtc_id == 0) {
+ if (ASIC_IS_R300(rdev)) {
+ disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+ disp_output_cntl |= (RADEON_DISP_TVDAC_SOURCE_CRTC |
+ RADEON_DISP_TV_SOURCE_CRTC);
+ }
+ if (rdev->family >= CHIP_R200) {
+ disp_tv_out_cntl &= ~RADEON_DISP_TV_PATH_SRC_CRTC2;
+ } else {
+ disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+ }
+ } else {
+ if (ASIC_IS_R300(rdev)) {
+ disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+ disp_output_cntl |= RADEON_DISP_TV_SOURCE_CRTC;
+ }
+ if (rdev->family >= CHIP_R200) {
+ disp_tv_out_cntl |= RADEON_DISP_TV_PATH_SRC_CRTC2;
+ } else {
+ disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL;
+ }
+ }
+ WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+ } else {
+
+ dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC2_CLK_SEL;
+
+ if (radeon_crtc->crtc_id == 0) {
+ if (ASIC_IS_R300(rdev)) {
+ disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+ disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC;
+ } else if (rdev->family == CHIP_R200) {
+ fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
+ RADEON_FP2_DVO_RATE_SEL_SDR);
+ } else
+ disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+ } else {
+ if (ASIC_IS_R300(rdev)) {
+ disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+ disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+ } else if (rdev->family == CHIP_R200) {
+ fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
+ RADEON_FP2_DVO_RATE_SEL_SDR);
+ fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
+ } else
+ disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL;
+ }
+ WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+ }
+
+ if (ASIC_IS_R300(rdev)) {
+ WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+ WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+ } else if (rdev->family != CHIP_R200)
+ WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+ else if (rdev->family == CHIP_R200)
+ WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+
+ if (rdev->family >= CHIP_R200)
+ WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl);
+
+ if (is_tv)
+ radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode);
+
+ if (rdev->is_atom_bios)
+ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+ else
+ radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+
+}
+
+static bool r300_legacy_tv_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
+ uint32_t disp_output_cntl, gpiopad_a, tmp;
+ bool found = false;
+
+ /* save regs needed */
+ gpiopad_a = RREG32(RADEON_GPIOPAD_A);
+ dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+ dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
+ tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+ disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+
+ WREG32_P(RADEON_GPIOPAD_A, 0, ~1);
+
+ WREG32(RADEON_DAC_CNTL2, RADEON_DAC2_DAC2_CLK_SEL);
+
+ WREG32(RADEON_CRTC2_GEN_CNTL,
+ RADEON_CRTC2_CRT2_ON | RADEON_CRTC2_VSYNC_TRISTAT);
+
+ tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
+ tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+ WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
+
+ WREG32(RADEON_DAC_EXT_CNTL,
+ RADEON_DAC2_FORCE_BLANK_OFF_EN |
+ RADEON_DAC2_FORCE_DATA_EN |
+ RADEON_DAC_FORCE_DATA_SEL_RGB |
+ (0xec << RADEON_DAC_FORCE_DATA_SHIFT));
+
+ WREG32(RADEON_TV_DAC_CNTL,
+ RADEON_TV_DAC_STD_NTSC |
+ (8 << RADEON_TV_DAC_BGADJ_SHIFT) |
+ (6 << RADEON_TV_DAC_DACADJ_SHIFT));
+
+ RREG32(RADEON_TV_DAC_CNTL);
+ mdelay(4);
+
+ WREG32(RADEON_TV_DAC_CNTL,
+ RADEON_TV_DAC_NBLANK |
+ RADEON_TV_DAC_NHOLD |
+ RADEON_TV_MONITOR_DETECT_EN |
+ RADEON_TV_DAC_STD_NTSC |
+ (8 << RADEON_TV_DAC_BGADJ_SHIFT) |
+ (6 << RADEON_TV_DAC_DACADJ_SHIFT));
+
+ RREG32(RADEON_TV_DAC_CNTL);
+ mdelay(6);
+
+ tmp = RREG32(RADEON_TV_DAC_CNTL);
+ if ((tmp & RADEON_TV_DAC_GDACDET) != 0) {
+ found = true;
+ DRM_DEBUG_KMS("S-video TV connection detected\n");
+ } else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) {
+ found = true;
+ DRM_DEBUG_KMS("Composite TV connection detected\n");
+ }
+
+ WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+ WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+ WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+ WREG32(RADEON_DAC_CNTL2, dac_cntl2);
+ WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+ return found;
+}
+
+static bool radeon_legacy_tv_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t tv_dac_cntl, dac_cntl2;
+ uint32_t config_cntl, tv_pre_dac_mux_cntl, tv_master_cntl, tmp;
+ bool found = false;
+
+ if (ASIC_IS_R300(rdev))
+ return r300_legacy_tv_detect(encoder, connector);
+
+ dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
+ tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL);
+ tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+ config_cntl = RREG32(RADEON_CONFIG_CNTL);
+ tv_pre_dac_mux_cntl = RREG32(RADEON_TV_PRE_DAC_MUX_CNTL);
+
+ tmp = dac_cntl2 & ~RADEON_DAC2_DAC2_CLK_SEL;
+ WREG32(RADEON_DAC_CNTL2, tmp);
+
+ tmp = tv_master_cntl | RADEON_TV_ON;
+ tmp &= ~(RADEON_TV_ASYNC_RST |
+ RADEON_RESTART_PHASE_FIX |
+ RADEON_CRT_FIFO_CE_EN |
+ RADEON_TV_FIFO_CE_EN |
+ RADEON_RE_SYNC_NOW_SEL_MASK);
+ tmp |= RADEON_TV_FIFO_ASYNC_RST | RADEON_CRT_ASYNC_RST;
+ WREG32(RADEON_TV_MASTER_CNTL, tmp);
+
+ tmp = RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD |
+ RADEON_TV_MONITOR_DETECT_EN | RADEON_TV_DAC_STD_NTSC |
+ (8 << RADEON_TV_DAC_BGADJ_SHIFT);
+
+ if (config_cntl & RADEON_CFG_ATI_REV_ID_MASK)
+ tmp |= (4 << RADEON_TV_DAC_DACADJ_SHIFT);
+ else
+ tmp |= (8 << RADEON_TV_DAC_DACADJ_SHIFT);
+ WREG32(RADEON_TV_DAC_CNTL, tmp);
+
+ tmp = RADEON_C_GRN_EN | RADEON_CMP_BLU_EN |
+ RADEON_RED_MX_FORCE_DAC_DATA |
+ RADEON_GRN_MX_FORCE_DAC_DATA |
+ RADEON_BLU_MX_FORCE_DAC_DATA |
+ (0x109 << RADEON_TV_FORCE_DAC_DATA_SHIFT);
+ WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tmp);
+
+ mdelay(3);
+ tmp = RREG32(RADEON_TV_DAC_CNTL);
+ if (tmp & RADEON_TV_DAC_GDACDET) {
+ found = true;
+ DRM_DEBUG_KMS("S-video TV connection detected\n");
+ } else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) {
+ found = true;
+ DRM_DEBUG_KMS("Composite TV connection detected\n");
+ }
+
+ WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tv_pre_dac_mux_cntl);
+ WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+ WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
+ WREG32(RADEON_DAC_CNTL2, dac_cntl2);
+ return found;
+}
+
+static bool radeon_legacy_ext_dac_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t gpio_monid, fp2_gen_cntl, disp_output_cntl, crtc2_gen_cntl;
+ uint32_t disp_lin_trans_grph_a, disp_lin_trans_grph_b, disp_lin_trans_grph_c;
+ uint32_t disp_lin_trans_grph_d, disp_lin_trans_grph_e, disp_lin_trans_grph_f;
+ uint32_t tmp, crtc2_h_total_disp, crtc2_v_total_disp;
+ uint32_t crtc2_h_sync_strt_wid, crtc2_v_sync_strt_wid;
+ bool found = false;
+ int i;
+
+ /* save the regs we need */
+ gpio_monid = RREG32(RADEON_GPIO_MONID);
+ fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+ disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+ disp_lin_trans_grph_a = RREG32(RADEON_DISP_LIN_TRANS_GRPH_A);
+ disp_lin_trans_grph_b = RREG32(RADEON_DISP_LIN_TRANS_GRPH_B);
+ disp_lin_trans_grph_c = RREG32(RADEON_DISP_LIN_TRANS_GRPH_C);
+ disp_lin_trans_grph_d = RREG32(RADEON_DISP_LIN_TRANS_GRPH_D);
+ disp_lin_trans_grph_e = RREG32(RADEON_DISP_LIN_TRANS_GRPH_E);
+ disp_lin_trans_grph_f = RREG32(RADEON_DISP_LIN_TRANS_GRPH_F);
+ crtc2_h_total_disp = RREG32(RADEON_CRTC2_H_TOTAL_DISP);
+ crtc2_v_total_disp = RREG32(RADEON_CRTC2_V_TOTAL_DISP);
+ crtc2_h_sync_strt_wid = RREG32(RADEON_CRTC2_H_SYNC_STRT_WID);
+ crtc2_v_sync_strt_wid = RREG32(RADEON_CRTC2_V_SYNC_STRT_WID);
+
+ tmp = RREG32(RADEON_GPIO_MONID);
+ tmp &= ~RADEON_GPIO_A_0;
+ WREG32(RADEON_GPIO_MONID, tmp);
+
+ WREG32(RADEON_FP2_GEN_CNTL, (RADEON_FP2_ON |
+ RADEON_FP2_PANEL_FORMAT |
+ R200_FP2_SOURCE_SEL_TRANS_UNIT |
+ RADEON_FP2_DVO_EN |
+ R200_FP2_DVO_RATE_SEL_SDR));
+
+ WREG32(RADEON_DISP_OUTPUT_CNTL, (RADEON_DISP_DAC_SOURCE_RMX |
+ RADEON_DISP_TRANS_MATRIX_GRAPHICS));
+
+ WREG32(RADEON_CRTC2_GEN_CNTL, (RADEON_CRTC2_EN |
+ RADEON_CRTC2_DISP_REQ_EN_B));
+
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, 0x00000000);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, 0x000003f0);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, 0x00000000);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, 0x000003f0);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, 0x00000000);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, 0x000003f0);
+
+ WREG32(RADEON_CRTC2_H_TOTAL_DISP, 0x01000008);
+ WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, 0x00000800);
+ WREG32(RADEON_CRTC2_V_TOTAL_DISP, 0x00080001);
+ WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, 0x00000080);
+
+ for (i = 0; i < 200; i++) {
+ tmp = RREG32(RADEON_GPIO_MONID);
+ if (tmp & RADEON_GPIO_Y_0)
+ found = true;
+
+ if (found)
+ break;
+
+ if (!drm_can_sleep())
+ mdelay(1);
+ else
+ msleep(1);
+ }
+
+ /* restore the regs we used */
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, disp_lin_trans_grph_a);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, disp_lin_trans_grph_b);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, disp_lin_trans_grph_c);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, disp_lin_trans_grph_d);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, disp_lin_trans_grph_e);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, disp_lin_trans_grph_f);
+ WREG32(RADEON_CRTC2_H_TOTAL_DISP, crtc2_h_total_disp);
+ WREG32(RADEON_CRTC2_V_TOTAL_DISP, crtc2_v_total_disp);
+ WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, crtc2_h_sync_strt_wid);
+ WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, crtc2_v_sync_strt_wid);
+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+ WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+ WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+ WREG32(RADEON_GPIO_MONID, gpio_monid);
+
+ return found;
+}
+
+static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t crtc2_gen_cntl = 0, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
+ uint32_t gpiopad_a = 0, pixclks_cntl, tmp;
+ uint32_t disp_output_cntl = 0, disp_hw_debug = 0, crtc_ext_cntl = 0;
+ enum drm_connector_status found = connector_status_disconnected;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+ bool color = true;
+ struct drm_crtc *crtc;
+
+ /* find out if crtc2 is in use or if this encoder is using it */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ if ((radeon_crtc->crtc_id == 1) && crtc->enabled) {
+ if (encoder->crtc != crtc) {
+ return connector_status_disconnected;
+ }
+ }
+ }
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO ||
+ connector->connector_type == DRM_MODE_CONNECTOR_Composite ||
+ connector->connector_type == DRM_MODE_CONNECTOR_9PinDIN) {
+ bool tv_detect;
+
+ if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT))
+ return connector_status_disconnected;
+
+ tv_detect = radeon_legacy_tv_detect(encoder, connector);
+ if (tv_detect && tv_dac)
+ found = connector_status_connected;
+ return found;
+ }
+
+ /* don't probe if the encoder is being used for something else not CRT related */
+ if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_CRT_SUPPORT)) {
+ DRM_INFO("not detecting due to %08x\n", radeon_encoder->active_device);
+ return connector_status_disconnected;
+ }
+
+ /* R200 uses an external DAC for secondary DAC */
+ if (rdev->family == CHIP_R200) {
+ if (radeon_legacy_ext_dac_detect(encoder, connector))
+ found = connector_status_connected;
+ return found;
+ }
+
+ /* save the regs we need */
+ pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+ } else {
+ if (ASIC_IS_R300(rdev)) {
+ gpiopad_a = RREG32(RADEON_GPIOPAD_A);
+ disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+ } else {
+ disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+ }
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+ }
+ tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+ dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
+ dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
+
+ tmp = pixclks_cntl & ~(RADEON_PIX2CLK_ALWAYS_ONb
+ | RADEON_PIX2CLK_DAC_ALWAYS_ONb);
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON;
+ WREG32(RADEON_CRTC_EXT_CNTL, tmp);
+ } else {
+ tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK;
+ tmp |= RADEON_CRTC2_CRT2_ON |
+ (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT);
+ WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+
+ if (ASIC_IS_R300(rdev)) {
+ WREG32_P(RADEON_GPIOPAD_A, 1, ~1);
+ tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
+ tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+ WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
+ } else {
+ tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL;
+ WREG32(RADEON_DISP_HW_DEBUG, tmp);
+ }
+ }
+
+ tmp = RADEON_TV_DAC_NBLANK |
+ RADEON_TV_DAC_NHOLD |
+ RADEON_TV_MONITOR_DETECT_EN |
+ RADEON_TV_DAC_STD_PS2;
+
+ WREG32(RADEON_TV_DAC_CNTL, tmp);
+
+ tmp = RADEON_DAC2_FORCE_BLANK_OFF_EN |
+ RADEON_DAC2_FORCE_DATA_EN;
+
+ if (color)
+ tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB;
+ else
+ tmp |= RADEON_DAC_FORCE_DATA_SEL_G;
+
+ if (ASIC_IS_R300(rdev))
+ tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
+ else
+ tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
+
+ WREG32(RADEON_DAC_EXT_CNTL, tmp);
+
+ tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN;
+ WREG32(RADEON_DAC_CNTL2, tmp);
+
+ mdelay(10);
+
+ if (ASIC_IS_R300(rdev)) {
+ if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B)
+ found = connector_status_connected;
+ } else {
+ if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUTPUT)
+ found = connector_status_connected;
+ }
+
+ /* restore regs we used */
+ WREG32(RADEON_DAC_CNTL2, dac_cntl2);
+ WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
+ WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+ } else {
+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+ if (ASIC_IS_R300(rdev)) {
+ WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+ WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+ } else {
+ WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+ }
+ }
+
+ WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+
+ return found;
+
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
+ .dpms = radeon_legacy_tv_dac_dpms,
+ .mode_fixup = radeon_legacy_mode_fixup,
+ .prepare = radeon_legacy_tv_dac_prepare,
+ .mode_set = radeon_legacy_tv_dac_mode_set,
+ .commit = radeon_legacy_tv_dac_commit,
+ .detect = radeon_legacy_tv_dac_detect,
+ .disable = radeon_legacy_encoder_disable,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tv_dac_enc_funcs = {
+ .destroy = radeon_enc_destroy,
+};
+
+
+static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder_int_tmds *tmds = NULL;
+ bool ret;
+
+ tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
+
+ if (!tmds)
+ return NULL;
+
+ if (rdev->is_atom_bios)
+ ret = radeon_atombios_get_tmds_info(encoder, tmds);
+ else
+ ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds);
+
+ if (ret == false)
+ radeon_legacy_get_tmds_info_from_table(encoder, tmds);
+
+ return tmds;
+}
+
+static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder_ext_tmds *tmds = NULL;
+ bool ret;
+
+ if (rdev->is_atom_bios)
+ return NULL;
+
+ tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL);
+
+ if (!tmds)
+ return NULL;
+
+ ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
+
+ if (ret == false)
+ radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
+
+ return tmds;
+}
+
+void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+
+ /* see if we already added it */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ if (radeon_encoder->encoder_enum == encoder_enum) {
+ radeon_encoder->devices |= supported_device;
+ return;
+ }
+
+ }
+
+ /* add a new one */
+ radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
+ if (!radeon_encoder)
+ return;
+
+ encoder = &radeon_encoder->base;
+ if (rdev->flags & RADEON_SINGLE_CRTC)
+ encoder->possible_crtcs = 0x1;
+ else
+ encoder->possible_crtcs = 0x3;
+
+ radeon_encoder->enc_priv = NULL;
+
+ radeon_encoder->encoder_enum = encoder_enum;
+ radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ radeon_encoder->devices = supported_device;
+ radeon_encoder->rmx_type = RMX_OFF;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ encoder->possible_crtcs = 0x1;
+ drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs);
+ if (rdev->is_atom_bios)
+ radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+ else
+ radeon_encoder->enc_priv = radeon_combios_get_lvds_info(radeon_encoder);
+ radeon_encoder->rmx_type = RMX_FULL;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs);
+ radeon_encoder->enc_priv = radeon_legacy_get_tmds_info(radeon_encoder);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs);
+ if (rdev->is_atom_bios)
+ radeon_encoder->enc_priv = radeon_atombios_get_primary_dac_info(radeon_encoder);
+ else
+ radeon_encoder->enc_priv = radeon_combios_get_primary_dac_info(radeon_encoder);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs, DRM_MODE_ENCODER_TVDAC);
+ drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs);
+ if (rdev->is_atom_bios)
+ radeon_encoder->enc_priv = radeon_atombios_get_tv_dac_info(radeon_encoder);
+ else
+ radeon_encoder->enc_priv = radeon_combios_get_tv_dac_info(radeon_encoder);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
+ if (!rdev->is_atom_bios)
+ radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
new file mode 100644
index 0000000..49750d0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -0,0 +1,923 @@
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "radeon.h"
+
+/*
+ * Integrated TV out support based on the GATOS code by
+ * Federico Ulivi <fulivi@lycos.com>
+ */
+
+
+/*
+ * Limits of h/v positions (hPos & vPos)
+ */
+#define MAX_H_POSITION 5 /* Range: [-5..5], negative is on the left, 0 is default, positive is on the right */
+#define MAX_V_POSITION 5 /* Range: [-5..5], negative is up, 0 is default, positive is down */
+
+/*
+ * Unit for hPos (in TV clock periods)
+ */
+#define H_POS_UNIT 10
+
+/*
+ * Indexes in h. code timing table for horizontal line position adjustment
+ */
+#define H_TABLE_POS1 6
+#define H_TABLE_POS2 8
+
+/*
+ * Limits of hor. size (hSize)
+ */
+#define MAX_H_SIZE 5 /* Range: [-5..5], negative is smaller, positive is larger */
+
+/* tv standard constants */
+#define NTSC_TV_CLOCK_T 233
+#define NTSC_TV_VFTOTAL 1
+#define NTSC_TV_LINES_PER_FRAME 525
+#define NTSC_TV_ZERO_H_SIZE 479166
+#define NTSC_TV_H_SIZE_UNIT 9478
+
+#define PAL_TV_CLOCK_T 188
+#define PAL_TV_VFTOTAL 3
+#define PAL_TV_LINES_PER_FRAME 625
+#define PAL_TV_ZERO_H_SIZE 473200
+#define PAL_TV_H_SIZE_UNIT 9360
+
+/* tv pll setting for 27 mhz ref clk */
+#define NTSC_TV_PLL_M_27 22
+#define NTSC_TV_PLL_N_27 175
+#define NTSC_TV_PLL_P_27 5
+
+#define PAL_TV_PLL_M_27 113
+#define PAL_TV_PLL_N_27 668
+#define PAL_TV_PLL_P_27 3
+
+/* tv pll setting for 14 mhz ref clk */
+#define NTSC_TV_PLL_M_14 33
+#define NTSC_TV_PLL_N_14 693
+#define NTSC_TV_PLL_P_14 7
+
+#define PAL_TV_PLL_M_14 19
+#define PAL_TV_PLL_N_14 353
+#define PAL_TV_PLL_P_14 5
+
+#define VERT_LEAD_IN_LINES 2
+#define FRAC_BITS 0xe
+#define FRAC_MASK 0x3fff
+
+struct radeon_tv_mode_constants {
+ uint16_t hor_resolution;
+ uint16_t ver_resolution;
+ enum radeon_tv_std standard;
+ uint16_t hor_total;
+ uint16_t ver_total;
+ uint16_t hor_start;
+ uint16_t hor_syncstart;
+ uint16_t ver_syncstart;
+ unsigned def_restart;
+ uint16_t crtcPLL_N;
+ uint8_t crtcPLL_M;
+ uint8_t crtcPLL_post_div;
+ unsigned pix_to_tv;
+};
+
+static const uint16_t hor_timing_NTSC[MAX_H_CODE_TIMING_LEN] = {
+ 0x0007,
+ 0x003f,
+ 0x0263,
+ 0x0a24,
+ 0x2a6b,
+ 0x0a36,
+ 0x126d, /* H_TABLE_POS1 */
+ 0x1bfe,
+ 0x1a8f, /* H_TABLE_POS2 */
+ 0x1ec7,
+ 0x3863,
+ 0x1bfe,
+ 0x1bfe,
+ 0x1a2a,
+ 0x1e95,
+ 0x0e31,
+ 0x201b,
+ 0
+};
+
+static const uint16_t vert_timing_NTSC[MAX_V_CODE_TIMING_LEN] = {
+ 0x2001,
+ 0x200d,
+ 0x1006,
+ 0x0c06,
+ 0x1006,
+ 0x1818,
+ 0x21e3,
+ 0x1006,
+ 0x0c06,
+ 0x1006,
+ 0x1817,
+ 0x21d4,
+ 0x0002,
+ 0
+};
+
+static const uint16_t hor_timing_PAL[MAX_H_CODE_TIMING_LEN] = {
+ 0x0007,
+ 0x0058,
+ 0x027c,
+ 0x0a31,
+ 0x2a77,
+ 0x0a95,
+ 0x124f, /* H_TABLE_POS1 */
+ 0x1bfe,
+ 0x1b22, /* H_TABLE_POS2 */
+ 0x1ef9,
+ 0x387c,
+ 0x1bfe,
+ 0x1bfe,
+ 0x1b31,
+ 0x1eb5,
+ 0x0e43,
+ 0x201b,
+ 0
+};
+
+static const uint16_t vert_timing_PAL[MAX_V_CODE_TIMING_LEN] = {
+ 0x2001,
+ 0x200c,
+ 0x1005,
+ 0x0c05,
+ 0x1005,
+ 0x1401,
+ 0x1821,
+ 0x2240,
+ 0x1005,
+ 0x0c05,
+ 0x1005,
+ 0x1401,
+ 0x1822,
+ 0x2230,
+ 0x0002,
+ 0
+};
+
+/**********************************************************************
+ *
+ * availableModes
+ *
+ * Table of all allowed modes for tv output
+ *
+ **********************************************************************/
+static const struct radeon_tv_mode_constants available_tv_modes[] = {
+ { /* NTSC timing for 27 Mhz ref clk */
+ 800, /* horResolution */
+ 600, /* verResolution */
+ TV_STD_NTSC, /* standard */
+ 990, /* horTotal */
+ 740, /* verTotal */
+ 813, /* horStart */
+ 824, /* horSyncStart */
+ 632, /* verSyncStart */
+ 625592, /* defRestart */
+ 592, /* crtcPLL_N */
+ 91, /* crtcPLL_M */
+ 4, /* crtcPLL_postDiv */
+ 1022, /* pixToTV */
+ },
+ { /* PAL timing for 27 Mhz ref clk */
+ 800, /* horResolution */
+ 600, /* verResolution */
+ TV_STD_PAL, /* standard */
+ 1144, /* horTotal */
+ 706, /* verTotal */
+ 812, /* horStart */
+ 824, /* horSyncStart */
+ 669, /* verSyncStart */
+ 696700, /* defRestart */
+ 1382, /* crtcPLL_N */
+ 231, /* crtcPLL_M */
+ 4, /* crtcPLL_postDiv */
+ 759, /* pixToTV */
+ },
+ { /* NTSC timing for 14 Mhz ref clk */
+ 800, /* horResolution */
+ 600, /* verResolution */
+ TV_STD_NTSC, /* standard */
+ 1018, /* horTotal */
+ 727, /* verTotal */
+ 813, /* horStart */
+ 840, /* horSyncStart */
+ 633, /* verSyncStart */
+ 630627, /* defRestart */
+ 347, /* crtcPLL_N */
+ 14, /* crtcPLL_M */
+ 8, /* crtcPLL_postDiv */
+ 1022, /* pixToTV */
+ },
+ { /* PAL timing for 14 Mhz ref clk */
+ 800, /* horResolution */
+ 600, /* verResolution */
+ TV_STD_PAL, /* standard */
+ 1131, /* horTotal */
+ 742, /* verTotal */
+ 813, /* horStart */
+ 840, /* horSyncStart */
+ 633, /* verSyncStart */
+ 708369, /* defRestart */
+ 211, /* crtcPLL_N */
+ 9, /* crtcPLL_M */
+ 8, /* crtcPLL_postDiv */
+ 759, /* pixToTV */
+ },
+};
+
+#define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
+
+static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(struct radeon_encoder *radeon_encoder,
+ uint16_t *pll_ref_freq)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc;
+ struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+ const struct radeon_tv_mode_constants *const_ptr;
+ struct radeon_pll *pll;
+
+ radeon_crtc = to_radeon_crtc(radeon_encoder->base.crtc);
+ if (radeon_crtc->crtc_id == 1)
+ pll = &rdev->clock.p2pll;
+ else
+ pll = &rdev->clock.p1pll;
+
+ if (pll_ref_freq)
+ *pll_ref_freq = pll->reference_freq;
+
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J ||
+ tv_dac->tv_std == TV_STD_PAL_M) {
+ if (pll->reference_freq == 2700)
+ const_ptr = &available_tv_modes[0];
+ else
+ const_ptr = &available_tv_modes[2];
+ } else {
+ if (pll->reference_freq == 2700)
+ const_ptr = &available_tv_modes[1];
+ else
+ const_ptr = &available_tv_modes[3];
+ }
+ return const_ptr;
+}
+
+static long YCOEF_value[5] = { 2, 2, 0, 4, 0 };
+static long YCOEF_EN_value[5] = { 1, 1, 0, 1, 0 };
+static long SLOPE_value[5] = { 1, 2, 2, 4, 8 };
+static long SLOPE_limit[5] = { 6, 5, 4, 3, 2 };
+
+static void radeon_wait_pll_lock(struct drm_encoder *encoder, unsigned n_tests,
+ unsigned n_wait_loops, unsigned cnt_threshold)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t save_pll_test;
+ unsigned int i, j;
+
+ WREG32(RADEON_TEST_DEBUG_MUX, (RREG32(RADEON_TEST_DEBUG_MUX) & 0xffff60ff) | 0x100);
+ save_pll_test = RREG32_PLL(RADEON_PLL_TEST_CNTL);
+ WREG32_PLL(RADEON_PLL_TEST_CNTL, save_pll_test & ~RADEON_PLL_MASK_READ_B);
+
+ WREG8(RADEON_CLOCK_CNTL_INDEX, RADEON_PLL_TEST_CNTL);
+ for (i = 0; i < n_tests; i++) {
+ WREG8(RADEON_CLOCK_CNTL_DATA + 3, 0);
+ for (j = 0; j < n_wait_loops; j++)
+ if (RREG8(RADEON_CLOCK_CNTL_DATA + 3) >= cnt_threshold)
+ break;
+ }
+ WREG32_PLL(RADEON_PLL_TEST_CNTL, save_pll_test);
+ WREG32(RADEON_TEST_DEBUG_MUX, RREG32(RADEON_TEST_DEBUG_MUX) & 0xffffe0ff);
+}
+
+
+static void radeon_legacy_tv_write_fifo(struct radeon_encoder *radeon_encoder,
+ uint16_t addr, uint32_t value)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t tmp;
+ int i = 0;
+
+ WREG32(RADEON_TV_HOST_WRITE_DATA, value);
+
+ WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr);
+ WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr | RADEON_HOST_FIFO_WT);
+
+ do {
+ tmp = RREG32(RADEON_TV_HOST_RD_WT_CNTL);
+ if ((tmp & RADEON_HOST_FIFO_WT_ACK) == 0)
+ break;
+ i++;
+ } while (i < 10000);
+ WREG32(RADEON_TV_HOST_RD_WT_CNTL, 0);
+}
+
+#if 0 /* included for completeness */
+static uint32_t radeon_legacy_tv_read_fifo(struct radeon_encoder *radeon_encoder, uint16_t addr)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t tmp;
+ int i = 0;
+
+ WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr);
+ WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr | RADEON_HOST_FIFO_RD);
+
+ do {
+ tmp = RREG32(RADEON_TV_HOST_RD_WT_CNTL);
+ if ((tmp & RADEON_HOST_FIFO_RD_ACK) == 0)
+ break;
+ i++;
+ } while (i < 10000);
+ WREG32(RADEON_TV_HOST_RD_WT_CNTL, 0);
+ return RREG32(RADEON_TV_HOST_READ_DATA);
+}
+#endif
+
+static uint16_t radeon_get_htiming_tables_addr(uint32_t tv_uv_adr)
+{
+ uint16_t h_table;
+
+ switch ((tv_uv_adr & RADEON_HCODE_TABLE_SEL_MASK) >> RADEON_HCODE_TABLE_SEL_SHIFT) {
+ case 0:
+ h_table = RADEON_TV_MAX_FIFO_ADDR_INTERNAL;
+ break;
+ case 1:
+ h_table = ((tv_uv_adr & RADEON_TABLE1_BOT_ADR_MASK) >> RADEON_TABLE1_BOT_ADR_SHIFT) * 2;
+ break;
+ case 2:
+ h_table = ((tv_uv_adr & RADEON_TABLE3_TOP_ADR_MASK) >> RADEON_TABLE3_TOP_ADR_SHIFT) * 2;
+ break;
+ default:
+ h_table = 0;
+ break;
+ }
+ return h_table;
+}
+
+static uint16_t radeon_get_vtiming_tables_addr(uint32_t tv_uv_adr)
+{
+ uint16_t v_table;
+
+ switch ((tv_uv_adr & RADEON_VCODE_TABLE_SEL_MASK) >> RADEON_VCODE_TABLE_SEL_SHIFT) {
+ case 0:
+ v_table = ((tv_uv_adr & RADEON_MAX_UV_ADR_MASK) >> RADEON_MAX_UV_ADR_SHIFT) * 2 + 1;
+ break;
+ case 1:
+ v_table = ((tv_uv_adr & RADEON_TABLE1_BOT_ADR_MASK) >> RADEON_TABLE1_BOT_ADR_SHIFT) * 2 + 1;
+ break;
+ case 2:
+ v_table = ((tv_uv_adr & RADEON_TABLE3_TOP_ADR_MASK) >> RADEON_TABLE3_TOP_ADR_SHIFT) * 2 + 1;
+ break;
+ default:
+ v_table = 0;
+ break;
+ }
+ return v_table;
+}
+
+static void radeon_restore_tv_timing_tables(struct radeon_encoder *radeon_encoder)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+ uint16_t h_table, v_table;
+ uint32_t tmp;
+ int i;
+
+ WREG32(RADEON_TV_UV_ADR, tv_dac->tv.tv_uv_adr);
+ h_table = radeon_get_htiming_tables_addr(tv_dac->tv.tv_uv_adr);
+ v_table = radeon_get_vtiming_tables_addr(tv_dac->tv.tv_uv_adr);
+
+ for (i = 0; i < MAX_H_CODE_TIMING_LEN; i += 2, h_table--) {
+ tmp = ((uint32_t)tv_dac->tv.h_code_timing[i] << 14) | ((uint32_t)tv_dac->tv.h_code_timing[i+1]);
+ radeon_legacy_tv_write_fifo(radeon_encoder, h_table, tmp);
+ if (tv_dac->tv.h_code_timing[i] == 0 || tv_dac->tv.h_code_timing[i + 1] == 0)
+ break;
+ }
+ for (i = 0; i < MAX_V_CODE_TIMING_LEN; i += 2, v_table++) {
+ tmp = ((uint32_t)tv_dac->tv.v_code_timing[i+1] << 14) | ((uint32_t)tv_dac->tv.v_code_timing[i]);
+ radeon_legacy_tv_write_fifo(radeon_encoder, v_table, tmp);
+ if (tv_dac->tv.v_code_timing[i] == 0 || tv_dac->tv.v_code_timing[i + 1] == 0)
+ break;
+ }
+}
+
+static void radeon_legacy_write_tv_restarts(struct radeon_encoder *radeon_encoder)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+ WREG32(RADEON_TV_FRESTART, tv_dac->tv.frestart);
+ WREG32(RADEON_TV_HRESTART, tv_dac->tv.hrestart);
+ WREG32(RADEON_TV_VRESTART, tv_dac->tv.vrestart);
+}
+
+static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+ struct radeon_crtc *radeon_crtc;
+ int restart;
+ unsigned int h_total, v_total, f_total;
+ int v_offset, h_offset;
+ u16 p1, p2, h_inc;
+ bool h_changed;
+ const struct radeon_tv_mode_constants *const_ptr;
+ struct radeon_pll *pll;
+
+ radeon_crtc = to_radeon_crtc(radeon_encoder->base.crtc);
+ if (radeon_crtc->crtc_id == 1)
+ pll = &rdev->clock.p2pll;
+ else
+ pll = &rdev->clock.p1pll;
+
+ const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
+ if (!const_ptr)
+ return false;
+
+ h_total = const_ptr->hor_total;
+ v_total = const_ptr->ver_total;
+
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J ||
+ tv_dac->tv_std == TV_STD_PAL_M ||
+ tv_dac->tv_std == TV_STD_PAL_60)
+ f_total = NTSC_TV_VFTOTAL + 1;
+ else
+ f_total = PAL_TV_VFTOTAL + 1;
+
+ /* adjust positions 1&2 in hor. cod timing table */
+ h_offset = tv_dac->h_pos * H_POS_UNIT;
+
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J ||
+ tv_dac->tv_std == TV_STD_PAL_M) {
+ h_offset -= 50;
+ p1 = hor_timing_NTSC[H_TABLE_POS1];
+ p2 = hor_timing_NTSC[H_TABLE_POS2];
+ } else {
+ p1 = hor_timing_PAL[H_TABLE_POS1];
+ p2 = hor_timing_PAL[H_TABLE_POS2];
+ }
+
+ p1 = (u16)((int)p1 + h_offset);
+ p2 = (u16)((int)p2 - h_offset);
+
+ h_changed = (p1 != tv_dac->tv.h_code_timing[H_TABLE_POS1] ||
+ p2 != tv_dac->tv.h_code_timing[H_TABLE_POS2]);
+
+ tv_dac->tv.h_code_timing[H_TABLE_POS1] = p1;
+ tv_dac->tv.h_code_timing[H_TABLE_POS2] = p2;
+
+ /* Convert hOffset from n. of TV clock periods to n. of CRTC clock periods (CRTC pixels) */
+ h_offset = (h_offset * (int)(const_ptr->pix_to_tv)) / 1000;
+
+ /* adjust restart */
+ restart = const_ptr->def_restart;
+
+ /*
+ * convert v_pos TV lines to n. of CRTC pixels
+ */
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J ||
+ tv_dac->tv_std == TV_STD_PAL_M ||
+ tv_dac->tv_std == TV_STD_PAL_60)
+ v_offset = ((int)(v_total * h_total) * 2 * tv_dac->v_pos) / (int)(NTSC_TV_LINES_PER_FRAME);
+ else
+ v_offset = ((int)(v_total * h_total) * 2 * tv_dac->v_pos) / (int)(PAL_TV_LINES_PER_FRAME);
+
+ restart -= v_offset + h_offset;
+
+ DRM_DEBUG_KMS("compute_restarts: def = %u h = %d v = %d, p1 = %04x, p2 = %04x, restart = %d\n",
+ const_ptr->def_restart, tv_dac->h_pos, tv_dac->v_pos, p1, p2, restart);
+
+ tv_dac->tv.hrestart = restart % h_total;
+ restart /= h_total;
+ tv_dac->tv.vrestart = restart % v_total;
+ restart /= v_total;
+ tv_dac->tv.frestart = restart % f_total;
+
+ DRM_DEBUG_KMS("compute_restart: F/H/V=%u,%u,%u\n",
+ (unsigned)tv_dac->tv.frestart,
+ (unsigned)tv_dac->tv.vrestart,
+ (unsigned)tv_dac->tv.hrestart);
+
+ /* compute h_inc from hsize */
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J ||
+ tv_dac->tv_std == TV_STD_PAL_M)
+ h_inc = (u16)((int)(const_ptr->hor_resolution * 4096 * NTSC_TV_CLOCK_T) /
+ (tv_dac->h_size * (int)(NTSC_TV_H_SIZE_UNIT) + (int)(NTSC_TV_ZERO_H_SIZE)));
+ else
+ h_inc = (u16)((int)(const_ptr->hor_resolution * 4096 * PAL_TV_CLOCK_T) /
+ (tv_dac->h_size * (int)(PAL_TV_H_SIZE_UNIT) + (int)(PAL_TV_ZERO_H_SIZE)));
+
+ tv_dac->tv.timing_cntl = (tv_dac->tv.timing_cntl & ~RADEON_H_INC_MASK) |
+ ((u32)h_inc << RADEON_H_INC_SHIFT);
+
+ DRM_DEBUG_KMS("compute_restart: h_size = %d h_inc = %d\n", tv_dac->h_size, h_inc);
+
+ return h_changed;
+}
+
+void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+ const struct radeon_tv_mode_constants *const_ptr;
+ struct radeon_crtc *radeon_crtc;
+ int i;
+ uint16_t pll_ref_freq;
+ uint32_t vert_space, flicker_removal, tmp;
+ uint32_t tv_master_cntl, tv_rgb_cntl, tv_dac_cntl;
+ uint32_t tv_modulator_cntl1, tv_modulator_cntl2;
+ uint32_t tv_vscaler_cntl1, tv_vscaler_cntl2;
+ uint32_t tv_pll_cntl, tv_pll_cntl1, tv_ftotal;
+ uint32_t tv_y_fall_cntl, tv_y_rise_cntl, tv_y_saw_tooth_cntl;
+ uint32_t m, n, p;
+ const uint16_t *hor_timing;
+ const uint16_t *vert_timing;
+
+ const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, &pll_ref_freq);
+ if (!const_ptr)
+ return;
+
+ radeon_crtc = to_radeon_crtc(encoder->crtc);
+
+ tv_master_cntl = (RADEON_VIN_ASYNC_RST |
+ RADEON_CRT_FIFO_CE_EN |
+ RADEON_TV_FIFO_CE_EN |
+ RADEON_TV_ON);
+
+ if (!ASIC_IS_R300(rdev))
+ tv_master_cntl |= RADEON_TVCLK_ALWAYS_ONb;
+
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J)
+ tv_master_cntl |= RADEON_RESTART_PHASE_FIX;
+
+ tv_modulator_cntl1 = (RADEON_SLEW_RATE_LIMIT |
+ RADEON_SYNC_TIP_LEVEL |
+ RADEON_YFLT_EN |
+ RADEON_UVFLT_EN |
+ (6 << RADEON_CY_FILT_BLEND_SHIFT));
+
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J) {
+ tv_modulator_cntl1 |= (0x46 << RADEON_SET_UP_LEVEL_SHIFT) |
+ (0x3b << RADEON_BLANK_LEVEL_SHIFT);
+ tv_modulator_cntl2 = (-111 & RADEON_TV_U_BURST_LEVEL_MASK) |
+ ((0 & RADEON_TV_V_BURST_LEVEL_MASK) << RADEON_TV_V_BURST_LEVEL_SHIFT);
+ } else if (tv_dac->tv_std == TV_STD_SCART_PAL) {
+ tv_modulator_cntl1 |= RADEON_ALT_PHASE_EN;
+ tv_modulator_cntl2 = (0 & RADEON_TV_U_BURST_LEVEL_MASK) |
+ ((0 & RADEON_TV_V_BURST_LEVEL_MASK) << RADEON_TV_V_BURST_LEVEL_SHIFT);
+ } else {
+ tv_modulator_cntl1 |= RADEON_ALT_PHASE_EN |
+ (0x3b << RADEON_SET_UP_LEVEL_SHIFT) |
+ (0x3b << RADEON_BLANK_LEVEL_SHIFT);
+ tv_modulator_cntl2 = (-78 & RADEON_TV_U_BURST_LEVEL_MASK) |
+ ((62 & RADEON_TV_V_BURST_LEVEL_MASK) << RADEON_TV_V_BURST_LEVEL_SHIFT);
+ }
+
+
+ tv_rgb_cntl = (RADEON_RGB_DITHER_EN
+ | RADEON_TVOUT_SCALE_EN
+ | (0x0b << RADEON_UVRAM_READ_MARGIN_SHIFT)
+ | (0x07 << RADEON_FIFORAM_FFMACRO_READ_MARGIN_SHIFT)
+ | RADEON_RGB_ATTEN_SEL(0x3)
+ | RADEON_RGB_ATTEN_VAL(0xc));
+
+ if (radeon_crtc->crtc_id == 1)
+ tv_rgb_cntl |= RADEON_RGB_SRC_SEL_CRTC2;
+ else {
+ if (radeon_crtc->rmx_type != RMX_OFF)
+ tv_rgb_cntl |= RADEON_RGB_SRC_SEL_RMX;
+ else
+ tv_rgb_cntl |= RADEON_RGB_SRC_SEL_CRTC1;
+ }
+
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J ||
+ tv_dac->tv_std == TV_STD_PAL_M ||
+ tv_dac->tv_std == TV_STD_PAL_60)
+ vert_space = const_ptr->ver_total * 2 * 10000 / NTSC_TV_LINES_PER_FRAME;
+ else
+ vert_space = const_ptr->ver_total * 2 * 10000 / PAL_TV_LINES_PER_FRAME;
+
+ tmp = RREG32(RADEON_TV_VSCALER_CNTL1);
+ tmp &= 0xe3ff0000;
+ tmp |= (vert_space * (1 << FRAC_BITS) / 10000);
+ tv_vscaler_cntl1 = tmp;
+
+ if (pll_ref_freq == 2700)
+ tv_vscaler_cntl1 |= RADEON_RESTART_FIELD;
+
+ if (const_ptr->hor_resolution == 1024)
+ tv_vscaler_cntl1 |= (4 << RADEON_Y_DEL_W_SIG_SHIFT);
+ else
+ tv_vscaler_cntl1 |= (2 << RADEON_Y_DEL_W_SIG_SHIFT);
+
+ /* scale up for int divide */
+ tmp = const_ptr->ver_total * 2 * 1000;
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J ||
+ tv_dac->tv_std == TV_STD_PAL_M ||
+ tv_dac->tv_std == TV_STD_PAL_60) {
+ tmp /= NTSC_TV_LINES_PER_FRAME;
+ } else {
+ tmp /= PAL_TV_LINES_PER_FRAME;
+ }
+ flicker_removal = (tmp + 500) / 1000;
+
+ if (flicker_removal < 3)
+ flicker_removal = 3;
+ for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
+ if (flicker_removal == SLOPE_limit[i])
+ break;
+ }
+
+ tv_y_saw_tooth_cntl = (vert_space * SLOPE_value[i] * (1 << (FRAC_BITS - 1)) +
+ 5001) / 10000 / 8 | ((SLOPE_value[i] *
+ (1 << (FRAC_BITS - 1)) / 8) << 16);
+ tv_y_fall_cntl =
+ (YCOEF_EN_value[i] << 17) | ((YCOEF_value[i] * (1 << 8) / 8) << 24) |
+ RADEON_Y_FALL_PING_PONG | (272 * SLOPE_value[i] / 8) * (1 << (FRAC_BITS - 1)) /
+ 1024;
+ tv_y_rise_cntl = RADEON_Y_RISE_PING_PONG|
+ (flicker_removal * 1024 - 272) * SLOPE_value[i] / 8 * (1 << (FRAC_BITS - 1)) / 1024;
+
+ tv_vscaler_cntl2 = RREG32(RADEON_TV_VSCALER_CNTL2) & 0x00fffff0;
+ tv_vscaler_cntl2 |= (0x10 << 24) |
+ RADEON_DITHER_MODE |
+ RADEON_Y_OUTPUT_DITHER_EN |
+ RADEON_UV_OUTPUT_DITHER_EN |
+ RADEON_UV_TO_BUF_DITHER_EN;
+
+ tmp = (tv_vscaler_cntl1 >> RADEON_UV_INC_SHIFT) & RADEON_UV_INC_MASK;
+ tmp = ((16384 * 256 * 10) / tmp + 5) / 10;
+ tmp = (tmp << RADEON_UV_OUTPUT_POST_SCALE_SHIFT) | 0x000b0000;
+ tv_dac->tv.timing_cntl = tmp;
+
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J ||
+ tv_dac->tv_std == TV_STD_PAL_M ||
+ tv_dac->tv_std == TV_STD_PAL_60)
+ tv_dac_cntl = tv_dac->ntsc_tvdac_adj;
+ else
+ tv_dac_cntl = tv_dac->pal_tvdac_adj;
+
+ tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD;
+
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J)
+ tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC;
+ else
+ tv_dac_cntl |= RADEON_TV_DAC_STD_PAL;
+
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J) {
+ if (pll_ref_freq == 2700) {
+ m = NTSC_TV_PLL_M_27;
+ n = NTSC_TV_PLL_N_27;
+ p = NTSC_TV_PLL_P_27;
+ } else {
+ m = NTSC_TV_PLL_M_14;
+ n = NTSC_TV_PLL_N_14;
+ p = NTSC_TV_PLL_P_14;
+ }
+ } else {
+ if (pll_ref_freq == 2700) {
+ m = PAL_TV_PLL_M_27;
+ n = PAL_TV_PLL_N_27;
+ p = PAL_TV_PLL_P_27;
+ } else {
+ m = PAL_TV_PLL_M_14;
+ n = PAL_TV_PLL_N_14;
+ p = PAL_TV_PLL_P_14;
+ }
+ }
+
+ tv_pll_cntl = (m & RADEON_TV_M0LO_MASK) |
+ (((m >> 8) & RADEON_TV_M0HI_MASK) << RADEON_TV_M0HI_SHIFT) |
+ ((n & RADEON_TV_N0LO_MASK) << RADEON_TV_N0LO_SHIFT) |
+ (((n >> 9) & RADEON_TV_N0HI_MASK) << RADEON_TV_N0HI_SHIFT) |
+ ((p & RADEON_TV_P_MASK) << RADEON_TV_P_SHIFT);
+
+ tv_pll_cntl1 = (((4 & RADEON_TVPCP_MASK) << RADEON_TVPCP_SHIFT) |
+ ((4 & RADEON_TVPVG_MASK) << RADEON_TVPVG_SHIFT) |
+ ((1 & RADEON_TVPDC_MASK) << RADEON_TVPDC_SHIFT) |
+ RADEON_TVCLK_SRC_SEL_TVPLL |
+ RADEON_TVPLL_TEST_DIS);
+
+ tv_dac->tv.tv_uv_adr = 0xc8;
+
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J ||
+ tv_dac->tv_std == TV_STD_PAL_M ||
+ tv_dac->tv_std == TV_STD_PAL_60) {
+ tv_ftotal = NTSC_TV_VFTOTAL;
+ hor_timing = hor_timing_NTSC;
+ vert_timing = vert_timing_NTSC;
+ } else {
+ hor_timing = hor_timing_PAL;
+ vert_timing = vert_timing_PAL;
+ tv_ftotal = PAL_TV_VFTOTAL;
+ }
+
+ for (i = 0; i < MAX_H_CODE_TIMING_LEN; i++) {
+ if ((tv_dac->tv.h_code_timing[i] = hor_timing[i]) == 0)
+ break;
+ }
+
+ for (i = 0; i < MAX_V_CODE_TIMING_LEN; i++) {
+ if ((tv_dac->tv.v_code_timing[i] = vert_timing[i]) == 0)
+ break;
+ }
+
+ radeon_legacy_tv_init_restarts(encoder);
+
+ /* play with DAC_CNTL */
+ /* play with GPIOPAD_A */
+ /* DISP_OUTPUT_CNTL */
+ /* use reference freq */
+
+ /* program the TV registers */
+ WREG32(RADEON_TV_MASTER_CNTL, (tv_master_cntl | RADEON_TV_ASYNC_RST |
+ RADEON_CRT_ASYNC_RST | RADEON_TV_FIFO_ASYNC_RST));
+
+ tmp = RREG32(RADEON_TV_DAC_CNTL);
+ tmp &= ~RADEON_TV_DAC_NBLANK;
+ tmp |= RADEON_TV_DAC_BGSLEEP |
+ RADEON_TV_DAC_RDACPD |
+ RADEON_TV_DAC_GDACPD |
+ RADEON_TV_DAC_BDACPD;
+ WREG32(RADEON_TV_DAC_CNTL, tmp);
+
+ /* TV PLL */
+ WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~RADEON_TVCLK_SRC_SEL_TVPLL);
+ WREG32_PLL(RADEON_TV_PLL_CNTL, tv_pll_cntl);
+ WREG32_PLL_P(RADEON_TV_PLL_CNTL1, RADEON_TVPLL_RESET, ~RADEON_TVPLL_RESET);
+
+ radeon_wait_pll_lock(encoder, 200, 800, 135);
+
+ WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~RADEON_TVPLL_RESET);
+
+ radeon_wait_pll_lock(encoder, 300, 160, 27);
+ radeon_wait_pll_lock(encoder, 200, 800, 135);
+
+ WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~0xf);
+ WREG32_PLL_P(RADEON_TV_PLL_CNTL1, RADEON_TVCLK_SRC_SEL_TVPLL, ~RADEON_TVCLK_SRC_SEL_TVPLL);
+
+ WREG32_PLL_P(RADEON_TV_PLL_CNTL1, (1 << RADEON_TVPDC_SHIFT), ~RADEON_TVPDC_MASK);
+ WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~RADEON_TVPLL_SLEEP);
+
+ /* TV HV */
+ WREG32(RADEON_TV_RGB_CNTL, tv_rgb_cntl);
+ WREG32(RADEON_TV_HTOTAL, const_ptr->hor_total - 1);
+ WREG32(RADEON_TV_HDISP, const_ptr->hor_resolution - 1);
+ WREG32(RADEON_TV_HSTART, const_ptr->hor_start);
+
+ WREG32(RADEON_TV_VTOTAL, const_ptr->ver_total - 1);
+ WREG32(RADEON_TV_VDISP, const_ptr->ver_resolution - 1);
+ WREG32(RADEON_TV_FTOTAL, tv_ftotal);
+ WREG32(RADEON_TV_VSCALER_CNTL1, tv_vscaler_cntl1);
+ WREG32(RADEON_TV_VSCALER_CNTL2, tv_vscaler_cntl2);
+
+ WREG32(RADEON_TV_Y_FALL_CNTL, tv_y_fall_cntl);
+ WREG32(RADEON_TV_Y_RISE_CNTL, tv_y_rise_cntl);
+ WREG32(RADEON_TV_Y_SAW_TOOTH_CNTL, tv_y_saw_tooth_cntl);
+
+ WREG32(RADEON_TV_MASTER_CNTL, (tv_master_cntl | RADEON_TV_ASYNC_RST |
+ RADEON_CRT_ASYNC_RST));
+
+ /* TV restarts */
+ radeon_legacy_write_tv_restarts(radeon_encoder);
+
+ /* tv timings */
+ radeon_restore_tv_timing_tables(radeon_encoder);
+
+ WREG32(RADEON_TV_MASTER_CNTL, (tv_master_cntl | RADEON_TV_ASYNC_RST));
+
+ /* tv std */
+ WREG32(RADEON_TV_SYNC_CNTL, (RADEON_SYNC_PUB | RADEON_TV_SYNC_IO_DRIVE));
+ WREG32(RADEON_TV_TIMING_CNTL, tv_dac->tv.timing_cntl);
+ WREG32(RADEON_TV_MODULATOR_CNTL1, tv_modulator_cntl1);
+ WREG32(RADEON_TV_MODULATOR_CNTL2, tv_modulator_cntl2);
+ WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, (RADEON_Y_RED_EN |
+ RADEON_C_GRN_EN |
+ RADEON_CMP_BLU_EN |
+ RADEON_DAC_DITHER_EN));
+
+ WREG32(RADEON_TV_CRC_CNTL, 0);
+
+ WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
+
+ WREG32(RADEON_TV_GAIN_LIMIT_SETTINGS, ((0x17f << RADEON_UV_GAIN_LIMIT_SHIFT) |
+ (0x5ff << RADEON_Y_GAIN_LIMIT_SHIFT)));
+ WREG32(RADEON_TV_LINEAR_GAIN_SETTINGS, ((0x100 << RADEON_UV_GAIN_SHIFT) |
+ (0x100 << RADEON_Y_GAIN_SHIFT)));
+
+ WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+
+}
+
+void radeon_legacy_tv_adjust_crtc_reg(struct drm_encoder *encoder,
+ uint32_t *h_total_disp, uint32_t *h_sync_strt_wid,
+ uint32_t *v_total_disp, uint32_t *v_sync_strt_wid)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ const struct radeon_tv_mode_constants *const_ptr;
+ uint32_t tmp;
+
+ const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
+ if (!const_ptr)
+ return;
+
+ *h_total_disp = (((const_ptr->hor_resolution / 8) - 1) << RADEON_CRTC_H_DISP_SHIFT) |
+ (((const_ptr->hor_total / 8) - 1) << RADEON_CRTC_H_TOTAL_SHIFT);
+
+ tmp = *h_sync_strt_wid;
+ tmp &= ~(RADEON_CRTC_H_SYNC_STRT_PIX | RADEON_CRTC_H_SYNC_STRT_CHAR);
+ tmp |= (((const_ptr->hor_syncstart / 8) - 1) << RADEON_CRTC_H_SYNC_STRT_CHAR_SHIFT) |
+ (const_ptr->hor_syncstart & 7);
+ *h_sync_strt_wid = tmp;
+
+ *v_total_disp = ((const_ptr->ver_resolution - 1) << RADEON_CRTC_V_DISP_SHIFT) |
+ ((const_ptr->ver_total - 1) << RADEON_CRTC_V_TOTAL_SHIFT);
+
+ tmp = *v_sync_strt_wid;
+ tmp &= ~RADEON_CRTC_V_SYNC_STRT;
+ tmp |= ((const_ptr->ver_syncstart - 1) << RADEON_CRTC_V_SYNC_STRT_SHIFT);
+ *v_sync_strt_wid = tmp;
+}
+
+static int get_post_div(int value)
+{
+ int post_div;
+ switch (value) {
+ case 1: post_div = 0; break;
+ case 2: post_div = 1; break;
+ case 3: post_div = 4; break;
+ case 4: post_div = 2; break;
+ case 6: post_div = 6; break;
+ case 8: post_div = 3; break;
+ case 12: post_div = 7; break;
+ case 16:
+ default: post_div = 5; break;
+ }
+ return post_div;
+}
+
+void radeon_legacy_tv_adjust_pll1(struct drm_encoder *encoder,
+ uint32_t *htotal_cntl, uint32_t *ppll_ref_div,
+ uint32_t *ppll_div_3, uint32_t *pixclks_cntl)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ const struct radeon_tv_mode_constants *const_ptr;
+
+ const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
+ if (!const_ptr)
+ return;
+
+ *htotal_cntl = (const_ptr->hor_total & 0x7) | RADEON_HTOT_CNTL_VGA_EN;
+
+ *ppll_ref_div = const_ptr->crtcPLL_M;
+
+ *ppll_div_3 = (const_ptr->crtcPLL_N & 0x7ff) | (get_post_div(const_ptr->crtcPLL_post_div) << 16);
+ *pixclks_cntl &= ~(RADEON_PIX2CLK_SRC_SEL_MASK | RADEON_PIXCLK_TV_SRC_SEL);
+ *pixclks_cntl |= RADEON_PIX2CLK_SRC_SEL_P2PLLCLK;
+}
+
+void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
+ uint32_t *htotal2_cntl, uint32_t *p2pll_ref_div,
+ uint32_t *p2pll_div_0, uint32_t *pixclks_cntl)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ const struct radeon_tv_mode_constants *const_ptr;
+
+ const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
+ if (!const_ptr)
+ return;
+
+ *htotal2_cntl = (const_ptr->hor_total & 0x7);
+
+ *p2pll_ref_div = const_ptr->crtcPLL_M;
+
+ *p2pll_div_0 = (const_ptr->crtcPLL_N & 0x7ff) | (get_post_div(const_ptr->crtcPLL_post_div) << 16);
+ *pixclks_cntl &= ~RADEON_PIX2CLK_SRC_SEL_MASK;
+ *pixclks_cntl |= RADEON_PIX2CLK_SRC_SEL_P2PLLCLK | RADEON_PIXCLK_TV_SRC_SEL;
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
new file mode 100644
index 0000000..d54d2d7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_mem.c
@@ -0,0 +1,302 @@
+/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */
+/*
+ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
+ */
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+
+/* Very simple allocator for GART memory, working on a static range
+ * already mapped into each client's address space.
+ */
+
+static struct mem_block *split_block(struct mem_block *p, int start, int size,
+ struct drm_file *file_priv)
+{
+ /* Maybe cut off the start of an existing block */
+ if (start > p->start) {
+ struct mem_block *newblock = kmalloc(sizeof(*newblock),
+ GFP_KERNEL);
+ if (!newblock)
+ goto out;
+ newblock->start = start;
+ newblock->size = p->size - (start - p->start);
+ newblock->file_priv = NULL;
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+ p->size -= newblock->size;
+ p = newblock;
+ }
+
+ /* Maybe cut off the end of an existing block */
+ if (size < p->size) {
+ struct mem_block *newblock = kmalloc(sizeof(*newblock),
+ GFP_KERNEL);
+ if (!newblock)
+ goto out;
+ newblock->start = start + size;
+ newblock->size = p->size - size;
+ newblock->file_priv = NULL;
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+ p->size = size;
+ }
+
+ out:
+ /* Our block is in the middle */
+ p->file_priv = file_priv;
+ return p;
+}
+
+static struct mem_block *alloc_block(struct mem_block *heap, int size,
+ int align2, struct drm_file *file_priv)
+{
+ struct mem_block *p;
+ int mask = (1 << align2) - 1;
+
+ list_for_each(p, heap) {
+ int start = (p->start + mask) & ~mask;
+ if (p->file_priv == NULL && start + size <= p->start + p->size)
+ return split_block(p, start, size, file_priv);
+ }
+
+ return NULL;
+}
+
+static struct mem_block *find_block(struct mem_block *heap, int start)
+{
+ struct mem_block *p;
+
+ list_for_each(p, heap)
+ if (p->start == start)
+ return p;
+
+ return NULL;
+}
+
+static void free_block(struct mem_block *p)
+{
+ p->file_priv = NULL;
+
+ /* Assumes a single contiguous range. Needs a special file_priv in
+ * 'heap' to stop it being subsumed.
+ */
+ if (p->next->file_priv == NULL) {
+ struct mem_block *q = p->next;
+ p->size += q->size;
+ p->next = q->next;
+ p->next->prev = p;
+ kfree(q);
+ }
+
+ if (p->prev->file_priv == NULL) {
+ struct mem_block *q = p->prev;
+ q->size += p->size;
+ q->next = p->next;
+ q->next->prev = q;
+ kfree(p);
+ }
+}
+
+/* Initialize. How to check for an uninitialized heap?
+ */
+static int init_heap(struct mem_block **heap, int start, int size)
+{
+ struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
+
+ if (!blocks)
+ return -ENOMEM;
+
+ *heap = kzalloc(sizeof(**heap), GFP_KERNEL);
+ if (!*heap) {
+ kfree(blocks);
+ return -ENOMEM;
+ }
+
+ blocks->start = start;
+ blocks->size = size;
+ blocks->file_priv = NULL;
+ blocks->next = blocks->prev = *heap;
+
+ (*heap)->file_priv = (struct drm_file *) - 1;
+ (*heap)->next = (*heap)->prev = blocks;
+ return 0;
+}
+
+/* Free all blocks associated with the releasing file.
+ */
+void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
+{
+ struct mem_block *p;
+
+ if (!heap || !heap->next)
+ return;
+
+ list_for_each(p, heap) {
+ if (p->file_priv == file_priv)
+ p->file_priv = NULL;
+ }
+
+ /* Assumes a single contiguous range. Needs a special file_priv in
+ * 'heap' to stop it being subsumed.
+ */
+ list_for_each(p, heap) {
+ while (p->file_priv == NULL && p->next->file_priv == NULL) {
+ struct mem_block *q = p->next;
+ p->size += q->size;
+ p->next = q->next;
+ p->next->prev = p;
+ kfree(q);
+ }
+ }
+}
+
+/* Shutdown.
+ */
+void radeon_mem_takedown(struct mem_block **heap)
+{
+ struct mem_block *p;
+
+ if (!*heap)
+ return;
+
+ for (p = (*heap)->next; p != *heap;) {
+ struct mem_block *q = p;
+ p = p->next;
+ kfree(q);
+ }
+
+ kfree(*heap);
+ *heap = NULL;
+}
+
+/* IOCTL HANDLERS */
+
+static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region)
+{
+ switch (region) {
+ case RADEON_MEM_REGION_GART:
+ return &dev_priv->gart_heap;
+ case RADEON_MEM_REGION_FB:
+ return &dev_priv->fb_heap;
+ default:
+ return NULL;
+ }
+}
+
+int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_mem_alloc_t *alloc = data;
+ struct mem_block *block, **heap;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ heap = get_heap(dev_priv, alloc->region);
+ if (!heap || !*heap)
+ return -EFAULT;
+
+ /* Make things easier on ourselves: all allocations at least
+ * 4k aligned.
+ */
+ if (alloc->alignment < 12)
+ alloc->alignment = 12;
+
+ block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
+
+ if (!block)
+ return -ENOMEM;
+
+ if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
+ sizeof(int))) {
+ DRM_ERROR("copy_to_user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_mem_free_t *memfree = data;
+ struct mem_block *block, **heap;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ heap = get_heap(dev_priv, memfree->region);
+ if (!heap || !*heap)
+ return -EFAULT;
+
+ block = find_block(*heap, memfree->region_offset);
+ if (!block)
+ return -EFAULT;
+
+ if (block->file_priv != file_priv)
+ return -EPERM;
+
+ free_block(block);
+ return 0;
+}
+
+int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_mem_init_heap_t *initheap = data;
+ struct mem_block **heap;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ heap = get_heap(dev_priv, initheap->region);
+ if (!heap)
+ return -EFAULT;
+
+ if (*heap) {
+ DRM_ERROR("heap already initialized?");
+ return -EFAULT;
+ }
+
+ return init_heap(heap, initheap->start, initheap->size);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
new file mode 100644
index 0000000..69ad4fe
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -0,0 +1,761 @@
+/*
+ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
+ * VA Linux Systems Inc., Fremont, California.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Original Authors:
+ * Kevin E. Martin, Rickard E. Faith, Alan Hourihane
+ *
+ * Kernel port Author: Dave Airlie
+ */
+
+#ifndef RADEON_MODE_H
+#define RADEON_MODE_H
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_fixed.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+struct radeon_bo;
+struct radeon_device;
+
+#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
+#define to_radeon_connector(x) container_of(x, struct radeon_connector, base)
+#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
+#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
+
+enum radeon_rmx_type {
+ RMX_OFF,
+ RMX_FULL,
+ RMX_CENTER,
+ RMX_ASPECT
+};
+
+enum radeon_tv_std {
+ TV_STD_NTSC,
+ TV_STD_PAL,
+ TV_STD_PAL_M,
+ TV_STD_PAL_60,
+ TV_STD_NTSC_J,
+ TV_STD_SCART_PAL,
+ TV_STD_SECAM,
+ TV_STD_PAL_CN,
+ TV_STD_PAL_N,
+};
+
+enum radeon_underscan_type {
+ UNDERSCAN_OFF,
+ UNDERSCAN_ON,
+ UNDERSCAN_AUTO,
+};
+
+enum radeon_hpd_id {
+ RADEON_HPD_1 = 0,
+ RADEON_HPD_2,
+ RADEON_HPD_3,
+ RADEON_HPD_4,
+ RADEON_HPD_5,
+ RADEON_HPD_6,
+ RADEON_HPD_NONE = 0xff,
+};
+
+#define RADEON_MAX_I2C_BUS 16
+
+/* radeon gpio-based i2c
+ * 1. "mask" reg and bits
+ * grabs the gpio pins for software use
+ * 0=not held 1=held
+ * 2. "a" reg and bits
+ * output pin value
+ * 0=low 1=high
+ * 3. "en" reg and bits
+ * sets the pin direction
+ * 0=input 1=output
+ * 4. "y" reg and bits
+ * input pin value
+ * 0=low 1=high
+ */
+struct radeon_i2c_bus_rec {
+ bool valid;
+ /* id used by atom */
+ uint8_t i2c_id;
+ /* id used by atom */
+ enum radeon_hpd_id hpd;
+ /* can be used with hw i2c engine */
+ bool hw_capable;
+ /* uses multi-media i2c engine */
+ bool mm_i2c;
+ /* regs and bits */
+ uint32_t mask_clk_reg;
+ uint32_t mask_data_reg;
+ uint32_t a_clk_reg;
+ uint32_t a_data_reg;
+ uint32_t en_clk_reg;
+ uint32_t en_data_reg;
+ uint32_t y_clk_reg;
+ uint32_t y_data_reg;
+ uint32_t mask_clk_mask;
+ uint32_t mask_data_mask;
+ uint32_t a_clk_mask;
+ uint32_t a_data_mask;
+ uint32_t en_clk_mask;
+ uint32_t en_data_mask;
+ uint32_t y_clk_mask;
+ uint32_t y_data_mask;
+};
+
+struct radeon_tmds_pll {
+ uint32_t freq;
+ uint32_t value;
+};
+
+#define RADEON_MAX_BIOS_CONNECTOR 16
+
+/* pll flags */
+#define RADEON_PLL_USE_BIOS_DIVS (1 << 0)
+#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1)
+#define RADEON_PLL_USE_REF_DIV (1 << 2)
+#define RADEON_PLL_LEGACY (1 << 3)
+#define RADEON_PLL_PREFER_LOW_REF_DIV (1 << 4)
+#define RADEON_PLL_PREFER_HIGH_REF_DIV (1 << 5)
+#define RADEON_PLL_PREFER_LOW_FB_DIV (1 << 6)
+#define RADEON_PLL_PREFER_HIGH_FB_DIV (1 << 7)
+#define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8)
+#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
+#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
+#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
+#define RADEON_PLL_USE_POST_DIV (1 << 12)
+#define RADEON_PLL_IS_LCD (1 << 13)
+#define RADEON_PLL_PREFER_MINM_OVER_MAXP (1 << 14)
+
+struct radeon_pll {
+ /* reference frequency */
+ uint32_t reference_freq;
+
+ /* fixed dividers */
+ uint32_t reference_div;
+ uint32_t post_div;
+
+ /* pll in/out limits */
+ uint32_t pll_in_min;
+ uint32_t pll_in_max;
+ uint32_t pll_out_min;
+ uint32_t pll_out_max;
+ uint32_t lcd_pll_out_min;
+ uint32_t lcd_pll_out_max;
+ uint32_t best_vco;
+
+ /* divider limits */
+ uint32_t min_ref_div;
+ uint32_t max_ref_div;
+ uint32_t min_post_div;
+ uint32_t max_post_div;
+ uint32_t min_feedback_div;
+ uint32_t max_feedback_div;
+ uint32_t min_frac_feedback_div;
+ uint32_t max_frac_feedback_div;
+
+ /* flags for the current clock */
+ uint32_t flags;
+
+ /* pll id */
+ uint32_t id;
+};
+
+struct radeon_i2c_chan {
+ struct i2c_adapter adapter;
+ struct drm_device *dev;
+ union {
+ struct i2c_algo_bit_data bit;
+ struct i2c_algo_dp_aux_data dp;
+ } algo;
+ struct radeon_i2c_bus_rec rec;
+};
+
+/* mostly for macs, but really any system without connector tables */
+enum radeon_connector_table {
+ CT_NONE = 0,
+ CT_GENERIC,
+ CT_IBOOK,
+ CT_POWERBOOK_EXTERNAL,
+ CT_POWERBOOK_INTERNAL,
+ CT_POWERBOOK_VGA,
+ CT_MINI_EXTERNAL,
+ CT_MINI_INTERNAL,
+ CT_IMAC_G5_ISIGHT,
+ CT_EMAC,
+ CT_RN50_POWER,
+ CT_MAC_X800,
+ CT_MAC_G5_9600,
+ CT_SAM440EP,
+ CT_MAC_G4_SILVER
+};
+
+enum radeon_dvo_chip {
+ DVO_SIL164,
+ DVO_SIL1178,
+};
+
+struct radeon_fbdev;
+
+struct radeon_afmt {
+ bool enabled;
+ int offset;
+ bool last_buffer_filled_status;
+ int id;
+};
+
+struct radeon_mode_info {
+ struct atom_context *atom_context;
+ struct card_info *atom_card_info;
+ enum radeon_connector_table connector_table;
+ bool mode_config_initialized;
+ struct radeon_crtc *crtcs[6];
+ struct radeon_afmt *afmt[6];
+ /* DVI-I properties */
+ struct drm_property *coherent_mode_property;
+ /* DAC enable load detect */
+ struct drm_property *load_detect_property;
+ /* TV standard */
+ struct drm_property *tv_std_property;
+ /* legacy TMDS PLL detect */
+ struct drm_property *tmds_pll_property;
+ /* underscan */
+ struct drm_property *underscan_property;
+ struct drm_property *underscan_hborder_property;
+ struct drm_property *underscan_vborder_property;
+ /* hardcoded DFP edid from BIOS */
+ struct edid *bios_hardcoded_edid;
+ int bios_hardcoded_edid_size;
+
+ /* pointer to fbdev info structure */
+ struct radeon_fbdev *rfbdev;
+ /* firmware flags */
+ u16 firmware_flags;
+ /* pointer to backlight encoder */
+ struct radeon_encoder *bl_encoder;
+};
+
+#define RADEON_MAX_BL_LEVEL 0xFF
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+struct radeon_backlight_privdata {
+ struct radeon_encoder *encoder;
+ uint8_t negative;
+};
+
+#endif
+
+#define MAX_H_CODE_TIMING_LEN 32
+#define MAX_V_CODE_TIMING_LEN 32
+
+/* need to store these as reading
+ back code tables is excessive */
+struct radeon_tv_regs {
+ uint32_t tv_uv_adr;
+ uint32_t timing_cntl;
+ uint32_t hrestart;
+ uint32_t vrestart;
+ uint32_t frestart;
+ uint16_t h_code_timing[MAX_H_CODE_TIMING_LEN];
+ uint16_t v_code_timing[MAX_V_CODE_TIMING_LEN];
+};
+
+struct radeon_atom_ss {
+ uint16_t percentage;
+ uint8_t type;
+ uint16_t step;
+ uint8_t delay;
+ uint8_t range;
+ uint8_t refdiv;
+ /* asic_ss */
+ uint16_t rate;
+ uint16_t amount;
+};
+
+struct radeon_crtc {
+ struct drm_crtc base;
+ int crtc_id;
+ u16 lut_r[256], lut_g[256], lut_b[256];
+ bool enabled;
+ bool can_tile;
+ uint32_t crtc_offset;
+ struct drm_gem_object *cursor_bo;
+ uint64_t cursor_addr;
+ int cursor_width;
+ int cursor_height;
+ uint32_t legacy_display_base_addr;
+ uint32_t legacy_cursor_offset;
+ enum radeon_rmx_type rmx_type;
+ u8 h_border;
+ u8 v_border;
+ fixed20_12 vsc;
+ fixed20_12 hsc;
+ struct drm_display_mode native_mode;
+ int pll_id;
+ /* page flipping */
+ struct radeon_unpin_work *unpin_work;
+ int deferred_flip_completion;
+ /* pll sharing */
+ struct radeon_atom_ss ss;
+ bool ss_enabled;
+ u32 adjusted_clock;
+ int bpc;
+ u32 pll_reference_div;
+ u32 pll_post_div;
+ u32 pll_flags;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+};
+
+struct radeon_encoder_primary_dac {
+ /* legacy primary dac */
+ uint32_t ps2_pdac_adj;
+};
+
+struct radeon_encoder_lvds {
+ /* legacy lvds */
+ uint16_t panel_vcc_delay;
+ uint8_t panel_pwr_delay;
+ uint8_t panel_digon_delay;
+ uint8_t panel_blon_delay;
+ uint16_t panel_ref_divider;
+ uint8_t panel_post_divider;
+ uint16_t panel_fb_divider;
+ bool use_bios_dividers;
+ uint32_t lvds_gen_cntl;
+ /* panel mode */
+ struct drm_display_mode native_mode;
+ struct backlight_device *bl_dev;
+ int dpms_mode;
+ uint8_t backlight_level;
+};
+
+struct radeon_encoder_tv_dac {
+ /* legacy tv dac */
+ uint32_t ps2_tvdac_adj;
+ uint32_t ntsc_tvdac_adj;
+ uint32_t pal_tvdac_adj;
+
+ int h_pos;
+ int v_pos;
+ int h_size;
+ int supported_tv_stds;
+ bool tv_on;
+ enum radeon_tv_std tv_std;
+ struct radeon_tv_regs tv;
+};
+
+struct radeon_encoder_int_tmds {
+ /* legacy int tmds */
+ struct radeon_tmds_pll tmds_pll[4];
+};
+
+struct radeon_encoder_ext_tmds {
+ /* tmds over dvo */
+ struct radeon_i2c_chan *i2c_bus;
+ uint8_t slave_addr;
+ enum radeon_dvo_chip dvo_chip;
+};
+
+/* spread spectrum */
+struct radeon_encoder_atom_dig {
+ bool linkb;
+ /* atom dig */
+ bool coherent_mode;
+ int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */
+ /* atom lvds/edp */
+ uint32_t lcd_misc;
+ uint16_t panel_pwr_delay;
+ uint32_t lcd_ss_id;
+ /* panel mode */
+ struct drm_display_mode native_mode;
+ struct backlight_device *bl_dev;
+ int dpms_mode;
+ uint8_t backlight_level;
+ int panel_mode;
+ struct radeon_afmt *afmt;
+};
+
+struct radeon_encoder_atom_dac {
+ enum radeon_tv_std tv_std;
+};
+
+struct radeon_encoder {
+ struct drm_encoder base;
+ uint32_t encoder_enum;
+ uint32_t encoder_id;
+ uint32_t devices;
+ uint32_t active_device;
+ uint32_t flags;
+ uint32_t pixel_clock;
+ enum radeon_rmx_type rmx_type;
+ enum radeon_underscan_type underscan_type;
+ uint32_t underscan_hborder;
+ uint32_t underscan_vborder;
+ struct drm_display_mode native_mode;
+ void *enc_priv;
+ int audio_polling_active;
+ bool is_ext_encoder;
+ u16 caps;
+};
+
+struct radeon_connector_atom_dig {
+ uint32_t igp_lane_info;
+ /* displayport */
+ struct radeon_i2c_chan *dp_i2c_bus;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ u8 dp_sink_type;
+ int dp_clock;
+ int dp_lane_count;
+ bool edp_on;
+};
+
+struct radeon_gpio_rec {
+ bool valid;
+ u8 id;
+ u32 reg;
+ u32 mask;
+};
+
+struct radeon_hpd {
+ enum radeon_hpd_id hpd;
+ u8 plugged_state;
+ struct radeon_gpio_rec gpio;
+};
+
+struct radeon_router {
+ u32 router_id;
+ struct radeon_i2c_bus_rec i2c_info;
+ u8 i2c_addr;
+ /* i2c mux */
+ bool ddc_valid;
+ u8 ddc_mux_type;
+ u8 ddc_mux_control_pin;
+ u8 ddc_mux_state;
+ /* clock/data mux */
+ bool cd_valid;
+ u8 cd_mux_type;
+ u8 cd_mux_control_pin;
+ u8 cd_mux_state;
+};
+
+struct radeon_connector {
+ struct drm_connector base;
+ uint32_t connector_id;
+ uint32_t devices;
+ struct radeon_i2c_chan *ddc_bus;
+ /* some systems have an hdmi and vga port with a shared ddc line */
+ bool shared_ddc;
+ bool use_digital;
+ /* we need to mind the EDID between detect
+ and get modes due to analog/digital/tvencoder */
+ struct edid *edid;
+ void *con_priv;
+ bool dac_load_detect;
+ bool detected_by_load; /* if the connection status was determined by load */
+ uint16_t connector_object_id;
+ struct radeon_hpd hpd;
+ struct radeon_router router;
+ struct radeon_i2c_chan *router_bus;
+};
+
+struct radeon_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
+ ((em) == ATOM_ENCODER_MODE_DP_MST))
+
+struct atom_clock_dividers {
+ u32 post_div;
+ union {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 reserved : 6;
+ u32 whole_fb_div : 12;
+ u32 frac_fb_div : 14;
+#else
+ u32 frac_fb_div : 14;
+ u32 whole_fb_div : 12;
+ u32 reserved : 6;
+#endif
+ };
+ u32 fb_div;
+ };
+ u32 ref_div;
+ bool enable_post_div;
+ bool enable_dithen;
+ u32 vco_mode;
+ u32 real_clock;
+};
+
+extern enum radeon_tv_std
+radeon_combios_get_tv_info(struct radeon_device *rdev);
+extern enum radeon_tv_std
+radeon_atombios_get_tv_info(struct radeon_device *rdev);
+
+extern struct drm_connector *
+radeon_get_connector_for_encoder(struct drm_encoder *encoder);
+extern struct drm_connector *
+radeon_get_connector_for_encoder_init(struct drm_encoder *encoder);
+extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
+ u32 pixel_clock);
+
+extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
+extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
+extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
+extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
+extern int radeon_get_monitor_bpc(struct drm_connector *connector);
+
+extern void radeon_connector_hotplug(struct drm_connector *connector);
+extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+extern void radeon_dp_set_link_config(struct drm_connector *connector,
+ const struct drm_display_mode *mode);
+extern void radeon_dp_link_train(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
+extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
+extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
+extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
+extern void radeon_atom_encoder_init(struct radeon_device *rdev);
+extern void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev);
+extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
+ int action, uint8_t lane_num,
+ uint8_t lane_set);
+extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
+extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
+extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ u8 write_byte, u8 *read_byte);
+
+extern void radeon_i2c_init(struct radeon_device *rdev);
+extern void radeon_i2c_fini(struct radeon_device *rdev);
+extern void radeon_combios_i2c_init(struct radeon_device *rdev);
+extern void radeon_atombios_i2c_init(struct radeon_device *rdev);
+extern void radeon_i2c_add(struct radeon_device *rdev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name);
+extern struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
+ struct radeon_i2c_bus_rec *i2c_bus);
+extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name);
+extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name);
+extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
+extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 *val);
+extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
+ u8 slave_addr,
+ u8 addr,
+ u8 val);
+extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
+extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
+extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
+extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
+
+extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
+
+extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
+ struct radeon_atom_ss *ss,
+ int id);
+extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
+ struct radeon_atom_ss *ss,
+ int id, u32 clock);
+
+extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p);
+
+extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
+ u32 freq,
+ u32 *dot_clock_p,
+ u32 *fb_div_p,
+ u32 *frac_fb_div_p,
+ u32 *ref_div_p,
+ u32 *post_div_p);
+
+extern void radeon_setup_encoder_clones(struct drm_device *dev);
+
+struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
+struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
+struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
+struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
+struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
+extern void atombios_dvo_setup(struct drm_encoder *encoder, int action);
+extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
+extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
+extern bool atombios_set_edp_panel_power(struct drm_connector *connector, int action);
+extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
+
+extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
+extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+extern int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y,
+ enum mode_set_atomic state);
+extern int atombios_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb);
+extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
+
+extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y,
+ enum mode_set_atomic state);
+extern int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic);
+extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height);
+extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+ int x, int y);
+
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+ int *vpos, int *hpos);
+
+extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
+extern struct edid *
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev);
+extern bool radeon_atom_get_clock_info(struct drm_device *dev);
+extern bool radeon_combios_get_clock_info(struct drm_device *dev);
+extern struct radeon_encoder_atom_dig *
+radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
+extern bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds);
+extern struct radeon_encoder_primary_dac *
+radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_tv_dac *
+radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_lvds *
+radeon_combios_get_lvds_info(struct radeon_encoder *encoder);
+extern void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_tv_dac *
+radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_primary_dac *
+radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder);
+extern void radeon_external_tmds_setup(struct drm_encoder *encoder);
+extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
+extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
+extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
+extern void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev);
+extern void radeon_save_bios_scratch_regs(struct radeon_device *rdev);
+extern void radeon_restore_bios_scratch_regs(struct radeon_device *rdev);
+extern void
+radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
+extern void
+radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
+extern void
+radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
+extern void
+radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
+extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+int radeon_framebuffer_init(struct drm_device *dev,
+ struct radeon_framebuffer *rfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
+bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev);
+void radeon_atombios_init_crtc(struct drm_device *dev,
+ struct radeon_crtc *radeon_crtc);
+void radeon_legacy_init_crtc(struct drm_device *dev,
+ struct radeon_crtc *radeon_crtc);
+
+void radeon_get_clock_info(struct drm_device *dev);
+
+extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
+extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
+
+void radeon_enc_destroy(struct drm_encoder *encoder);
+void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
+void radeon_combios_asic_init(struct drm_device *dev);
+bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+void radeon_panel_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode);
+void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *radeon_crtc);
+
+/* legacy tv */
+void radeon_legacy_tv_adjust_crtc_reg(struct drm_encoder *encoder,
+ uint32_t *h_total_disp, uint32_t *h_sync_strt_wid,
+ uint32_t *v_total_disp, uint32_t *v_sync_strt_wid);
+void radeon_legacy_tv_adjust_pll1(struct drm_encoder *encoder,
+ uint32_t *htotal_cntl, uint32_t *ppll_ref_div,
+ uint32_t *ppll_div_3, uint32_t *pixclks_cntl);
+void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
+ uint32_t *htotal2_cntl, uint32_t *p2pll_ref_div,
+ uint32_t *p2pll_div_0, uint32_t *pixclks_cntl);
+void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+/* fbdev layer */
+int radeon_fbdev_init(struct radeon_device *rdev);
+void radeon_fbdev_fini(struct radeon_device *rdev);
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
+int radeon_fbdev_total_size(struct radeon_device *rdev);
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
+
+void radeon_fb_output_poll_changed(struct radeon_device *rdev);
+
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
+
+int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
new file mode 100644
index 0000000..1424ccd
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -0,0 +1,642 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Jerome Glisse <glisse@freedesktop.org>
+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ * Dave Airlie
+ */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_trace.h"
+
+
+int radeon_ttm_init(struct radeon_device *rdev);
+void radeon_ttm_fini(struct radeon_device *rdev);
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
+
+/*
+ * To exclude mutual BO access we rely on bo_reserve exclusion, as all
+ * function are calling it.
+ */
+
+void radeon_bo_clear_va(struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+
+ list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
+ /* remove from all vm address space */
+ radeon_vm_bo_rmv(bo->rdev, bo_va);
+ }
+}
+
+static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+{
+ struct radeon_bo *bo;
+
+ bo = container_of(tbo, struct radeon_bo, tbo);
+ mutex_lock(&bo->rdev->gem.mutex);
+ list_del_init(&bo->list);
+ mutex_unlock(&bo->rdev->gem.mutex);
+ radeon_bo_clear_surface_reg(bo);
+ radeon_bo_clear_va(bo);
+ drm_gem_object_release(&bo->gem_base);
+ kfree(bo);
+}
+
+bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &radeon_ttm_bo_destroy)
+ return true;
+ return false;
+}
+
+void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
+{
+ u32 c = 0;
+
+ rbo->placement.fpfn = 0;
+ rbo->placement.lpfn = 0;
+ rbo->placement.placement = rbo->placements;
+ rbo->placement.busy_placement = rbo->placements;
+ if (domain & RADEON_GEM_DOMAIN_VRAM)
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_VRAM;
+ if (domain & RADEON_GEM_DOMAIN_GTT) {
+ if (rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+ } else {
+ rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+ }
+ }
+ if (domain & RADEON_GEM_DOMAIN_CPU) {
+ if (rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
+ } else {
+ rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
+ }
+ }
+ if (!c)
+ rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ rbo->placement.num_placement = c;
+ rbo->placement.num_busy_placement = c;
+}
+
+int radeon_bo_create(struct radeon_device *rdev,
+ unsigned long size, int byte_align, bool kernel, u32 domain,
+ struct sg_table *sg, struct radeon_bo **bo_ptr)
+{
+ struct radeon_bo *bo;
+ enum ttm_bo_type type;
+ unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+ size_t acc_size;
+ int r;
+
+ size = ALIGN(size, PAGE_SIZE);
+
+ rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
+ if (kernel) {
+ type = ttm_bo_type_kernel;
+ } else if (sg) {
+ type = ttm_bo_type_sg;
+ } else {
+ type = ttm_bo_type_device;
+ }
+ *bo_ptr = NULL;
+
+ acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
+ sizeof(struct radeon_bo));
+
+ bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
+ if (bo == NULL)
+ return -ENOMEM;
+ r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
+ if (unlikely(r)) {
+ kfree(bo);
+ return r;
+ }
+ bo->rdev = rdev;
+ bo->gem_base.driver_private = NULL;
+ bo->surface_reg = -1;
+ INIT_LIST_HEAD(&bo->list);
+ INIT_LIST_HEAD(&bo->va);
+ radeon_ttm_placement_from_domain(bo, domain);
+ /* Kernel allocation are uninterruptible */
+ down_read(&rdev->pm.mclk_lock);
+ r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
+ &bo->placement, page_align, !kernel, NULL,
+ acc_size, sg, &radeon_ttm_bo_destroy);
+ up_read(&rdev->pm.mclk_lock);
+ if (unlikely(r != 0)) {
+ return r;
+ }
+ *bo_ptr = bo;
+
+ trace_radeon_bo_create(bo);
+
+ return 0;
+}
+
+int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
+{
+ bool is_iomem;
+ int r;
+
+ if (bo->kptr) {
+ if (ptr) {
+ *ptr = bo->kptr;
+ }
+ return 0;
+ }
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+ if (r) {
+ return r;
+ }
+ bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+ if (ptr) {
+ *ptr = bo->kptr;
+ }
+ radeon_bo_check_tiling(bo, 0, 0);
+ return 0;
+}
+
+void radeon_bo_kunmap(struct radeon_bo *bo)
+{
+ if (bo->kptr == NULL)
+ return;
+ bo->kptr = NULL;
+ radeon_bo_check_tiling(bo, 0, 0);
+ ttm_bo_kunmap(&bo->kmap);
+}
+
+void radeon_bo_unref(struct radeon_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+ struct radeon_device *rdev;
+
+ if ((*bo) == NULL)
+ return;
+ rdev = (*bo)->rdev;
+ tbo = &((*bo)->tbo);
+ down_read(&rdev->pm.mclk_lock);
+ ttm_bo_unref(&tbo);
+ up_read(&rdev->pm.mclk_lock);
+ if (tbo == NULL)
+ *bo = NULL;
+}
+
+int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
+ u64 *gpu_addr)
+{
+ int r, i;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = radeon_bo_gpu_offset(bo);
+
+ if (max_offset != 0) {
+ u64 domain_start;
+
+ if (domain == RADEON_GEM_DOMAIN_VRAM)
+ domain_start = bo->rdev->mc.vram_start;
+ else
+ domain_start = bo->rdev->mc.gtt_start;
+ WARN_ON_ONCE(max_offset <
+ (radeon_bo_gpu_offset(bo) - domain_start));
+ }
+
+ return 0;
+ }
+ radeon_ttm_placement_from_domain(bo, domain);
+ if (domain == RADEON_GEM_DOMAIN_VRAM) {
+ /* force to pin into visible video ram */
+ bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ }
+ if (max_offset) {
+ u64 lpfn = max_offset >> PAGE_SHIFT;
+
+ if (!bo->placement.lpfn)
+ bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;
+
+ if (lpfn < bo->placement.lpfn)
+ bo->placement.lpfn = lpfn;
+ }
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (likely(r == 0)) {
+ bo->pin_count = 1;
+ if (gpu_addr != NULL)
+ *gpu_addr = radeon_bo_gpu_offset(bo);
+ }
+ if (unlikely(r != 0))
+ dev_err(bo->rdev->dev, "%p pin failed\n", bo);
+ return r;
+}
+
+int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
+{
+ return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
+}
+
+int radeon_bo_unpin(struct radeon_bo *bo)
+{
+ int r, i;
+
+ if (!bo->pin_count) {
+ dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (unlikely(r != 0))
+ dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
+ return r;
+}
+
+int radeon_bo_evict_vram(struct radeon_device *rdev)
+{
+ /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
+ if (0 && (rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->mc.igp_sideport_enabled == false)
+ /* Useless to evict on IGP chips */
+ return 0;
+ }
+ return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+}
+
+void radeon_bo_force_delete(struct radeon_device *rdev)
+{
+ struct radeon_bo *bo, *n;
+
+ if (list_empty(&rdev->gem.objects)) {
+ return;
+ }
+ dev_err(rdev->dev, "Userspace still has active objects !\n");
+ list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+ mutex_lock(&rdev->ddev->struct_mutex);
+ dev_err(rdev->dev, "%p %p %lu %lu force free\n",
+ &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
+ *((unsigned long *)&bo->gem_base.refcount));
+ mutex_lock(&bo->rdev->gem.mutex);
+ list_del_init(&bo->list);
+ mutex_unlock(&bo->rdev->gem.mutex);
+ /* this should unref the ttm bo */
+ drm_gem_object_unreference(&bo->gem_base);
+ mutex_unlock(&rdev->ddev->struct_mutex);
+ }
+}
+
+int radeon_bo_init(struct radeon_device *rdev)
+{
+ /* Add an MTRR for the VRAM */
+ if (!rdev->fastfb_working) {
+ rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
+ MTRR_TYPE_WRCOMB, 1);
+ }
+ DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
+ rdev->mc.mc_vram_size >> 20,
+ (unsigned long long)rdev->mc.aper_size >> 20);
+ DRM_INFO("RAM width %dbits %cDR\n",
+ rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
+ return radeon_ttm_init(rdev);
+}
+
+void radeon_bo_fini(struct radeon_device *rdev)
+{
+ radeon_ttm_fini(rdev);
+}
+
+void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+ struct list_head *head)
+{
+ if (lobj->written) {
+ list_add(&lobj->tv.head, head);
+ } else {
+ list_add_tail(&lobj->tv.head, head);
+ }
+}
+
+int radeon_bo_list_validate(struct list_head *head, int ring)
+{
+ struct radeon_bo_list *lobj;
+ struct radeon_bo *bo;
+ u32 domain;
+ int r;
+
+ r = ttm_eu_reserve_buffers(head);
+ if (unlikely(r != 0)) {
+ return r;
+ }
+ list_for_each_entry(lobj, head, tv.head) {
+ bo = lobj->bo;
+ if (!bo->pin_count) {
+ domain = lobj->domain;
+
+ retry:
+ radeon_ttm_placement_from_domain(bo, domain);
+ if (ring == R600_RING_TYPE_UVD_INDEX)
+ radeon_uvd_force_into_uvd_segment(bo);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement,
+ true, false);
+ if (unlikely(r)) {
+ if (r != -ERESTARTSYS && domain != lobj->alt_domain) {
+ domain = lobj->alt_domain;
+ goto retry;
+ }
+ return r;
+ }
+ }
+ lobj->gpu_offset = radeon_bo_gpu_offset(bo);
+ lobj->tiling_flags = bo->tiling_flags;
+ }
+ return 0;
+}
+
+int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
+ struct vm_area_struct *vma)
+{
+ return ttm_fbdev_mmap(vma, &bo->tbo);
+}
+
+int radeon_bo_get_surface_reg(struct radeon_bo *bo)
+{
+ struct radeon_device *rdev = bo->rdev;
+ struct radeon_surface_reg *reg;
+ struct radeon_bo *old_object;
+ int steal;
+ int i;
+
+ BUG_ON(!radeon_bo_is_reserved(bo));
+
+ if (!bo->tiling_flags)
+ return 0;
+
+ if (bo->surface_reg >= 0) {
+ reg = &rdev->surface_regs[bo->surface_reg];
+ i = bo->surface_reg;
+ goto out;
+ }
+
+ steal = -1;
+ for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
+
+ reg = &rdev->surface_regs[i];
+ if (!reg->bo)
+ break;
+
+ old_object = reg->bo;
+ if (old_object->pin_count == 0)
+ steal = i;
+ }
+
+ /* if we are all out */
+ if (i == RADEON_GEM_MAX_SURFACES) {
+ if (steal == -1)
+ return -ENOMEM;
+ /* find someone with a surface reg and nuke their BO */
+ reg = &rdev->surface_regs[steal];
+ old_object = reg->bo;
+ /* blow away the mapping */
+ DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
+ ttm_bo_unmap_virtual(&old_object->tbo);
+ old_object->surface_reg = -1;
+ i = steal;
+ }
+
+ bo->surface_reg = i;
+ reg->bo = bo;
+
+out:
+ radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
+ bo->tbo.mem.start << PAGE_SHIFT,
+ bo->tbo.num_pages << PAGE_SHIFT);
+ return 0;
+}
+
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
+{
+ struct radeon_device *rdev = bo->rdev;
+ struct radeon_surface_reg *reg;
+
+ if (bo->surface_reg == -1)
+ return;
+
+ reg = &rdev->surface_regs[bo->surface_reg];
+ radeon_clear_surface_reg(rdev, bo->surface_reg);
+
+ reg->bo = NULL;
+ bo->surface_reg = -1;
+}
+
+int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+ uint32_t tiling_flags, uint32_t pitch)
+{
+ struct radeon_device *rdev = bo->rdev;
+ int r;
+
+ if (rdev->family >= CHIP_CEDAR) {
+ unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
+
+ bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
+ bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
+ mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
+ tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
+ stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
+ switch (bankw) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (bankh) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (mtaspect) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (tilesplit > 6) {
+ return -EINVAL;
+ }
+ if (stilesplit > 6) {
+ return -EINVAL;
+ }
+ }
+ r = radeon_bo_reserve(bo, false);
+ if (unlikely(r != 0))
+ return r;
+ bo->tiling_flags = tiling_flags;
+ bo->pitch = pitch;
+ radeon_bo_unreserve(bo);
+ return 0;
+}
+
+void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+ uint32_t *tiling_flags,
+ uint32_t *pitch)
+{
+ BUG_ON(!radeon_bo_is_reserved(bo));
+ if (tiling_flags)
+ *tiling_flags = bo->tiling_flags;
+ if (pitch)
+ *pitch = bo->pitch;
+}
+
+int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+ bool force_drop)
+{
+ BUG_ON(!radeon_bo_is_reserved(bo) && !force_drop);
+
+ if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
+ return 0;
+
+ if (force_drop) {
+ radeon_bo_clear_surface_reg(bo);
+ return 0;
+ }
+
+ if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
+ if (!has_moved)
+ return 0;
+
+ if (bo->surface_reg >= 0)
+ radeon_bo_clear_surface_reg(bo);
+ return 0;
+ }
+
+ if ((bo->surface_reg >= 0) && !has_moved)
+ return 0;
+
+ return radeon_bo_get_surface_reg(bo);
+}
+
+void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
+{
+ struct radeon_bo *rbo;
+ if (!radeon_ttm_bo_is_radeon_bo(bo))
+ return;
+ rbo = container_of(bo, struct radeon_bo, tbo);
+ radeon_bo_check_tiling(rbo, 0, 1);
+ radeon_vm_bo_invalidate(rbo->rdev, rbo);
+}
+
+int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+ struct radeon_device *rdev;
+ struct radeon_bo *rbo;
+ unsigned long offset, size;
+ int r;
+
+ if (!radeon_ttm_bo_is_radeon_bo(bo))
+ return 0;
+ rbo = container_of(bo, struct radeon_bo, tbo);
+ radeon_bo_check_tiling(rbo, 0, 0);
+ rdev = rbo->rdev;
+ if (bo->mem.mem_type == TTM_PL_VRAM) {
+ size = bo->mem.num_pages << PAGE_SHIFT;
+ offset = bo->mem.start << PAGE_SHIFT;
+ if ((offset + size) > rdev->mc.visible_vram_size) {
+ /* hurrah the memory is not visible ! */
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+ rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ r = ttm_bo_validate(bo, &rbo->placement, false, false);
+ if (unlikely(r != 0))
+ return r;
+ offset = bo->mem.start << PAGE_SHIFT;
+ /* this should not happen */
+ if ((offset + size) > rdev->mc.visible_vram_size)
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
+{
+ int r;
+
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ if (unlikely(r != 0))
+ return r;
+ spin_lock(&bo->tbo.bdev->fence_lock);
+ if (mem_type)
+ *mem_type = bo->tbo.mem.mem_type;
+ if (bo->tbo.sync_obj)
+ r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+ spin_unlock(&bo->tbo.bdev->fence_lock);
+ ttm_bo_unreserve(&bo->tbo);
+ return r;
+}
+
+
+/**
+ * radeon_bo_reserve - reserve bo
+ * @bo: bo structure
+ * @no_intr: don't return -ERESTARTSYS on pending signal
+ *
+ * Returns:
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ */
+int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
+{
+ int r;
+
+ r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+ return r;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
new file mode 100644
index 0000000..2943823
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RADEON_OBJECT_H__
+#define __RADEON_OBJECT_H__
+
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+/**
+ * radeon_mem_type_to_domain - return domain corresponding to mem_type
+ * @mem_type: ttm memory type
+ *
+ * Returns corresponding domain of the ttm mem_type
+ */
+static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
+{
+ switch (mem_type) {
+ case TTM_PL_VRAM:
+ return RADEON_GEM_DOMAIN_VRAM;
+ case TTM_PL_TT:
+ return RADEON_GEM_DOMAIN_GTT;
+ case TTM_PL_SYSTEM:
+ return RADEON_GEM_DOMAIN_CPU;
+ default:
+ break;
+ }
+ return 0;
+}
+
+int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr);
+
+static inline void radeon_bo_unreserve(struct radeon_bo *bo)
+{
+ ttm_bo_unreserve(&bo->tbo);
+}
+
+/**
+ * radeon_bo_gpu_offset - return GPU offset of bo
+ * @bo: radeon object for which we query the offset
+ *
+ * Returns current GPU offset of the object.
+ *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be useful to add check for this for debugging.
+ */
+static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
+{
+ return bo->tbo.offset;
+}
+
+static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
+{
+ return bo->tbo.num_pages << PAGE_SHIFT;
+}
+
+static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
+{
+ return ttm_bo_is_reserved(&bo->tbo);
+}
+
+static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
+{
+ return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
+static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
+{
+ return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
+/**
+ * radeon_bo_mmap_offset - return mmap offset of bo
+ * @bo: radeon object for which we query the offset
+ *
+ * Returns mmap offset of the object.
+ *
+ * Note: addr_space_offset is constant after ttm bo init thus isn't protected
+ * by any lock.
+ */
+static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
+{
+ return bo->tbo.addr_space_offset;
+}
+
+extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
+ bool no_wait);
+
+extern int radeon_bo_create(struct radeon_device *rdev,
+ unsigned long size, int byte_align,
+ bool kernel, u32 domain,
+ struct sg_table *sg,
+ struct radeon_bo **bo_ptr);
+extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
+extern void radeon_bo_kunmap(struct radeon_bo *bo);
+extern void radeon_bo_unref(struct radeon_bo **bo);
+extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
+extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
+ u64 max_offset, u64 *gpu_addr);
+extern int radeon_bo_unpin(struct radeon_bo *bo);
+extern int radeon_bo_evict_vram(struct radeon_device *rdev);
+extern void radeon_bo_force_delete(struct radeon_device *rdev);
+extern int radeon_bo_init(struct radeon_device *rdev);
+extern void radeon_bo_fini(struct radeon_device *rdev);
+extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+ struct list_head *head);
+extern int radeon_bo_list_validate(struct list_head *head, int ring);
+extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
+ struct vm_area_struct *vma);
+extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+ u32 tiling_flags, u32 pitch);
+extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+ u32 *tiling_flags, u32 *pitch);
+extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+ bool force_drop);
+extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
+
+/*
+ * sub allocation
+ */
+
+static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
+{
+ return sa_bo->manager->gpu_addr + sa_bo->soffset;
+}
+
+static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
+{
+ return sa_bo->manager->cpu_ptr + sa_bo->soffset;
+}
+
+extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager,
+ unsigned size, u32 align, u32 domain);
+extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_new(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager,
+ struct radeon_sa_bo **sa_bo,
+ unsigned size, unsigned align, bool block);
+extern void radeon_sa_bo_free(struct radeon_device *rdev,
+ struct radeon_sa_bo **sa_bo,
+ struct radeon_fence *fence);
+#if defined(CONFIG_DEBUG_FS)
+extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
+ struct seq_file *m);
+#endif
+
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
new file mode 100644
index 0000000..788c64c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -0,0 +1,874 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rafał Miłecki <zajec5@gmail.com>
+ * Alex Deucher <alexdeucher@gmail.com>
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "avivod.h"
+#include "atom.h"
+#include <linux/power_supply.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#define RADEON_IDLE_LOOP_MS 100
+#define RADEON_RECLOCK_DELAY_MS 200
+#define RADEON_WAIT_VBLANK_TIMEOUT 200
+
+static const char *radeon_pm_state_type_name[5] = {
+ "",
+ "Powersave",
+ "Battery",
+ "Balanced",
+ "Performance",
+};
+
+static void radeon_dynpm_idle_work_handler(struct work_struct *work);
+static int radeon_debugfs_pm_init(struct radeon_device *rdev);
+static bool radeon_pm_in_vbl(struct radeon_device *rdev);
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
+static void radeon_pm_update_profile(struct radeon_device *rdev);
+static void radeon_pm_set_clocks(struct radeon_device *rdev);
+
+int radeon_pm_get_type_index(struct radeon_device *rdev,
+ enum radeon_pm_state_type ps_type,
+ int instance)
+{
+ int i;
+ int found_instance = -1;
+
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].type == ps_type) {
+ found_instance++;
+ if (found_instance == instance)
+ return i;
+ }
+ }
+ /* return default if no match */
+ return rdev->pm.default_power_state_index;
+}
+
+void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
+{
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ if (rdev->pm.profile == PM_PROFILE_AUTO) {
+ mutex_lock(&rdev->pm.mutex);
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ mutex_unlock(&rdev->pm.mutex);
+ }
+ }
+}
+
+static void radeon_pm_update_profile(struct radeon_device *rdev)
+{
+ switch (rdev->pm.profile) {
+ case PM_PROFILE_DEFAULT:
+ rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
+ break;
+ case PM_PROFILE_AUTO:
+ if (power_supply_is_system_supplied() > 0) {
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
+ } else {
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
+ }
+ break;
+ case PM_PROFILE_LOW:
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
+ break;
+ case PM_PROFILE_MID:
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
+ break;
+ case PM_PROFILE_HIGH:
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
+ break;
+ }
+
+ if (rdev->pm.active_crtc_count == 0) {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
+ } else {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
+ }
+}
+
+static void radeon_unmap_vram_bos(struct radeon_device *rdev)
+{
+ struct radeon_bo *bo, *n;
+
+ if (list_empty(&rdev->gem.objects))
+ return;
+
+ list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ ttm_bo_unmap_virtual(&bo->tbo);
+ }
+}
+
+static void radeon_sync_with_vblank(struct radeon_device *rdev)
+{
+ if (rdev->pm.active_crtcs) {
+ rdev->pm.vblank_sync = false;
+ wait_event_timeout(
+ rdev->irq.vblank_queue, rdev->pm.vblank_sync,
+ msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
+ }
+}
+
+static void radeon_set_power_state(struct radeon_device *rdev)
+{
+ u32 sclk, mclk;
+ bool misc_after = false;
+
+ if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+ (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
+ return;
+
+ if (radeon_gui_idle(rdev)) {
+ sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].sclk;
+ if (sclk > rdev->pm.default_sclk)
+ sclk = rdev->pm.default_sclk;
+
+ /* starting with BTC, there is one state that is used for both
+ * MH and SH. Difference is that we always use the high clock index for
+ * mclk and vddci.
+ */
+ if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
+ (rdev->family >= CHIP_BARTS) &&
+ rdev->pm.active_crtc_count &&
+ ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
+ (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
+ mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
+ else
+ mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].mclk;
+
+ if (mclk > rdev->pm.default_mclk)
+ mclk = rdev->pm.default_mclk;
+
+ /* upvolt before raising clocks, downvolt after lowering clocks */
+ if (sclk < rdev->pm.current_sclk)
+ misc_after = true;
+
+ radeon_sync_with_vblank(rdev);
+
+ if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ if (!radeon_pm_in_vbl(rdev))
+ return;
+ }
+
+ radeon_pm_prepare(rdev);
+
+ if (!misc_after)
+ /* voltage, pcie lanes, etc.*/
+ radeon_pm_misc(rdev);
+
+ /* set engine clock */
+ if (sclk != rdev->pm.current_sclk) {
+ radeon_pm_debug_check_in_vbl(rdev, false);
+ radeon_set_engine_clock(rdev, sclk);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+ rdev->pm.current_sclk = sclk;
+ DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
+ }
+
+ /* set memory clock */
+ if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+ radeon_pm_debug_check_in_vbl(rdev, false);
+ radeon_set_memory_clock(rdev, mclk);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+ rdev->pm.current_mclk = mclk;
+ DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
+ }
+
+ if (misc_after)
+ /* voltage, pcie lanes, etc.*/
+ radeon_pm_misc(rdev);
+
+ radeon_pm_finish(rdev);
+
+ rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
+ rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
+ } else
+ DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
+}
+
+static void radeon_pm_set_clocks(struct radeon_device *rdev)
+{
+ int i, r;
+
+ /* no need to take locks, etc. if nothing's going to change */
+ if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+ (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
+ return;
+
+ mutex_lock(&rdev->ddev->struct_mutex);
+ down_write(&rdev->pm.mclk_lock);
+ mutex_lock(&rdev->ring_lock);
+
+ /* wait for the rings to drain */
+ for (i = 0; i < RADEON_NUM_RINGS; i++) {
+ struct radeon_ring *ring = &rdev->ring[i];
+ if (!ring->ready) {
+ continue;
+ }
+ r = radeon_fence_wait_empty_locked(rdev, i);
+ if (r) {
+ /* needs a GPU reset dont reset here */
+ mutex_unlock(&rdev->ring_lock);
+ up_write(&rdev->pm.mclk_lock);
+ mutex_unlock(&rdev->ddev->struct_mutex);
+ return;
+ }
+ }
+
+ radeon_unmap_vram_bos(rdev);
+
+ if (rdev->irq.installed) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->pm.active_crtcs & (1 << i)) {
+ rdev->pm.req_vblank |= (1 << i);
+ drm_vblank_get(rdev->ddev, i);
+ }
+ }
+ }
+
+ radeon_set_power_state(rdev);
+
+ if (rdev->irq.installed) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->pm.req_vblank & (1 << i)) {
+ rdev->pm.req_vblank &= ~(1 << i);
+ drm_vblank_put(rdev->ddev, i);
+ }
+ }
+ }
+
+ /* update display watermarks based on new power state */
+ radeon_update_bandwidth_info(rdev);
+ if (rdev->pm.active_crtc_count)
+ radeon_bandwidth_update(rdev);
+
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+
+ mutex_unlock(&rdev->ring_lock);
+ up_write(&rdev->pm.mclk_lock);
+ mutex_unlock(&rdev->ddev->struct_mutex);
+}
+
+static void radeon_pm_print_states(struct radeon_device *rdev)
+{
+ int i, j;
+ struct radeon_power_state *power_state;
+ struct radeon_pm_clock_info *clock_info;
+
+ DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ power_state = &rdev->pm.power_state[i];
+ DRM_DEBUG_DRIVER("State %d: %s\n", i,
+ radeon_pm_state_type_name[power_state->type]);
+ if (i == rdev->pm.default_power_state_index)
+ DRM_DEBUG_DRIVER("\tDefault");
+ if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
+ DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
+ if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ DRM_DEBUG_DRIVER("\tSingle display only\n");
+ DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
+ for (j = 0; j < power_state->num_clock_modes; j++) {
+ clock_info = &(power_state->clock_info[j]);
+ if (rdev->flags & RADEON_IS_IGP)
+ DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
+ j,
+ clock_info->sclk * 10);
+ else
+ DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
+ j,
+ clock_info->sclk * 10,
+ clock_info->mclk * 10,
+ clock_info->voltage.voltage);
+ }
+ }
+}
+
+static ssize_t radeon_get_pm_profile(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+ int cp = rdev->pm.profile;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ (cp == PM_PROFILE_AUTO) ? "auto" :
+ (cp == PM_PROFILE_LOW) ? "low" :
+ (cp == PM_PROFILE_MID) ? "mid" :
+ (cp == PM_PROFILE_HIGH) ? "high" : "default");
+}
+
+static ssize_t radeon_set_pm_profile(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+
+ mutex_lock(&rdev->pm.mutex);
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ if (strncmp("default", buf, strlen("default")) == 0)
+ rdev->pm.profile = PM_PROFILE_DEFAULT;
+ else if (strncmp("auto", buf, strlen("auto")) == 0)
+ rdev->pm.profile = PM_PROFILE_AUTO;
+ else if (strncmp("low", buf, strlen("low")) == 0)
+ rdev->pm.profile = PM_PROFILE_LOW;
+ else if (strncmp("mid", buf, strlen("mid")) == 0)
+ rdev->pm.profile = PM_PROFILE_MID;
+ else if (strncmp("high", buf, strlen("high")) == 0)
+ rdev->pm.profile = PM_PROFILE_HIGH;
+ else {
+ count = -EINVAL;
+ goto fail;
+ }
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ } else
+ count = -EINVAL;
+
+fail:
+ mutex_unlock(&rdev->pm.mutex);
+
+ return count;
+}
+
+static ssize_t radeon_get_pm_method(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+ int pm = rdev->pm.pm_method;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
+}
+
+static ssize_t radeon_set_pm_method(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+
+
+ if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.pm_method = PM_METHOD_DYNPM;
+ rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+ mutex_unlock(&rdev->pm.mutex);
+ } else if (strncmp("profile", buf, strlen("profile")) == 0) {
+ mutex_lock(&rdev->pm.mutex);
+ /* disable dynpm */
+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ mutex_unlock(&rdev->pm.mutex);
+ cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+ } else {
+ count = -EINVAL;
+ goto fail;
+ }
+ radeon_pm_compute_clocks(rdev);
+fail:
+ return count;
+}
+
+static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
+static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
+
+static ssize_t radeon_hwmon_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+ int temp;
+
+ switch (rdev->pm.int_thermal_type) {
+ case THERMAL_TYPE_RV6XX:
+ temp = rv6xx_get_temp(rdev);
+ break;
+ case THERMAL_TYPE_RV770:
+ temp = rv770_get_temp(rdev);
+ break;
+ case THERMAL_TYPE_EVERGREEN:
+ case THERMAL_TYPE_NI:
+ temp = evergreen_get_temp(rdev);
+ break;
+ case THERMAL_TYPE_SUMO:
+ temp = sumo_get_temp(rdev);
+ break;
+ case THERMAL_TYPE_SI:
+ temp = si_get_temp(rdev);
+ break;
+ default:
+ temp = 0;
+ break;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+}
+
+static ssize_t radeon_hwmon_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "radeon\n");
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
+
+static struct attribute *hwmon_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_name.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group hwmon_attrgroup = {
+ .attrs = hwmon_attributes,
+};
+
+static int radeon_hwmon_init(struct radeon_device *rdev)
+{
+ int err = 0;
+
+ rdev->pm.int_hwmon_dev = NULL;
+
+ switch (rdev->pm.int_thermal_type) {
+ case THERMAL_TYPE_RV6XX:
+ case THERMAL_TYPE_RV770:
+ case THERMAL_TYPE_EVERGREEN:
+ case THERMAL_TYPE_NI:
+ case THERMAL_TYPE_SUMO:
+ case THERMAL_TYPE_SI:
+ /* No support for TN yet */
+ if (rdev->family == CHIP_ARUBA)
+ return err;
+ rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
+ if (IS_ERR(rdev->pm.int_hwmon_dev)) {
+ err = PTR_ERR(rdev->pm.int_hwmon_dev);
+ dev_err(rdev->dev,
+ "Unable to register hwmon device: %d\n", err);
+ break;
+ }
+ dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
+ err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
+ &hwmon_attrgroup);
+ if (err) {
+ dev_err(rdev->dev,
+ "Unable to create hwmon sysfs file: %d\n", err);
+ hwmon_device_unregister(rdev->dev);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static void radeon_hwmon_fini(struct radeon_device *rdev)
+{
+ if (rdev->pm.int_hwmon_dev) {
+ sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
+ hwmon_device_unregister(rdev->pm.int_hwmon_dev);
+ }
+}
+
+void radeon_pm_suspend(struct radeon_device *rdev)
+{
+ mutex_lock(&rdev->pm.mutex);
+ if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
+ rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
+ }
+ mutex_unlock(&rdev->pm.mutex);
+
+ cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+}
+
+void radeon_pm_resume(struct radeon_device *rdev)
+{
+ /* set up the default clocks if the MC ucode is loaded */
+ if ((rdev->family >= CHIP_BARTS) &&
+ (rdev->family <= CHIP_CAYMAN) &&
+ rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
+ if (rdev->pm.default_vddci)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+ SET_VOLTAGE_TYPE_ASIC_VDDCI);
+ if (rdev->pm.default_sclk)
+ radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+ if (rdev->pm.default_mclk)
+ radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+ }
+ /* asic init will reset the default power state */
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
+ rdev->pm.current_sclk = rdev->pm.default_sclk;
+ rdev->pm.current_mclk = rdev->pm.default_mclk;
+ rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
+ if (rdev->pm.pm_method == PM_METHOD_DYNPM
+ && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
+ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+ schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ }
+ mutex_unlock(&rdev->pm.mutex);
+ radeon_pm_compute_clocks(rdev);
+}
+
+int radeon_pm_init(struct radeon_device *rdev)
+{
+ int ret;
+
+ /* default to profile method */
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ rdev->pm.profile = PM_PROFILE_DEFAULT;
+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ rdev->pm.dynpm_can_upclock = true;
+ rdev->pm.dynpm_can_downclock = true;
+ rdev->pm.default_sclk = rdev->clock.default_sclk;
+ rdev->pm.default_mclk = rdev->clock.default_mclk;
+ rdev->pm.current_sclk = rdev->clock.default_sclk;
+ rdev->pm.current_mclk = rdev->clock.default_mclk;
+ rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
+
+ if (rdev->bios) {
+ if (rdev->is_atom_bios)
+ radeon_atombios_get_power_modes(rdev);
+ else
+ radeon_combios_get_power_modes(rdev);
+ radeon_pm_print_states(rdev);
+ radeon_pm_init_profile(rdev);
+ /* set up the default clocks if the MC ucode is loaded */
+ if ((rdev->family >= CHIP_BARTS) &&
+ (rdev->family <= CHIP_CAYMAN) &&
+ rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
+ if (rdev->pm.default_vddci)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+ SET_VOLTAGE_TYPE_ASIC_VDDCI);
+ if (rdev->pm.default_sclk)
+ radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+ if (rdev->pm.default_mclk)
+ radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+ }
+ }
+
+ /* set up the internal thermal sensor if applicable */
+ ret = radeon_hwmon_init(rdev);
+ if (ret)
+ return ret;
+
+ INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
+
+ if (rdev->pm.num_power_states > 1) {
+ /* where's the best place to put these? */
+ ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+ if (ret)
+ DRM_ERROR("failed to create device file for power profile\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_method);
+ if (ret)
+ DRM_ERROR("failed to create device file for power method\n");
+
+ if (radeon_debugfs_pm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for PM!\n");
+ }
+
+ DRM_INFO("radeon: power management initialized\n");
+ }
+
+ return 0;
+}
+
+void radeon_pm_fini(struct radeon_device *rdev)
+{
+ if (rdev->pm.num_power_states > 1) {
+ mutex_lock(&rdev->pm.mutex);
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ rdev->pm.profile = PM_PROFILE_DEFAULT;
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ /* reset default clocks */
+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+ radeon_pm_set_clocks(rdev);
+ }
+ mutex_unlock(&rdev->pm.mutex);
+
+ cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+
+ device_remove_file(rdev->dev, &dev_attr_power_profile);
+ device_remove_file(rdev->dev, &dev_attr_power_method);
+ }
+
+ if (rdev->pm.power_state)
+ kfree(rdev->pm.power_state);
+
+ radeon_hwmon_fini(rdev);
+}
+
+void radeon_pm_compute_clocks(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+
+ if (rdev->pm.num_power_states < 2)
+ return;
+
+ mutex_lock(&rdev->pm.mutex);
+
+ rdev->pm.active_crtcs = 0;
+ rdev->pm.active_crtc_count = 0;
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
+ rdev->pm.active_crtc_count++;
+ }
+ }
+
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
+ if (rdev->pm.active_crtc_count > 1) {
+ if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+
+ rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+ radeon_pm_get_dynpm_state(rdev);
+ radeon_pm_set_clocks(rdev);
+
+ DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
+ }
+ } else if (rdev->pm.active_crtc_count == 1) {
+ /* TODO: Increase clocks if needed for current mode */
+
+ if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
+ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
+ radeon_pm_get_dynpm_state(rdev);
+ radeon_pm_set_clocks(rdev);
+
+ schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
+ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+ schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
+ }
+ } else { /* count == 0 */
+ if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+
+ rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
+ radeon_pm_get_dynpm_state(rdev);
+ radeon_pm_set_clocks(rdev);
+ }
+ }
+ }
+ }
+
+ mutex_unlock(&rdev->pm.mutex);
+}
+
+static bool radeon_pm_in_vbl(struct radeon_device *rdev)
+{
+ int crtc, vpos, hpos, vbl_status;
+ bool in_vbl = true;
+
+ /* Iterate over all active crtc's. All crtc's must be in vblank,
+ * otherwise return in_vbl == false.
+ */
+ for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
+ if (rdev->pm.active_crtcs & (1 << crtc)) {
+ vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
+ if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
+ !(vbl_status & DRM_SCANOUTPOS_INVBL))
+ in_vbl = false;
+ }
+ }
+
+ return in_vbl;
+}
+
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
+{
+ u32 stat_crtc = 0;
+ bool in_vbl = radeon_pm_in_vbl(rdev);
+
+ if (in_vbl == false)
+ DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
+ finish ? "exit" : "entry");
+ return in_vbl;
+}
+
+static void radeon_dynpm_idle_work_handler(struct work_struct *work)
+{
+ struct radeon_device *rdev;
+ int resched;
+ rdev = container_of(work, struct radeon_device,
+ pm.dynpm_idle_work.work);
+
+ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
+ mutex_lock(&rdev->pm.mutex);
+ if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
+ int not_processed = 0;
+ int i;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_ring *ring = &rdev->ring[i];
+
+ if (ring->ready) {
+ not_processed += radeon_fence_count_emitted(rdev, i);
+ if (not_processed >= 3)
+ break;
+ }
+ }
+
+ if (not_processed >= 3) { /* should upclock */
+ if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+ rdev->pm.dynpm_can_upclock) {
+ rdev->pm.dynpm_planned_action =
+ DYNPM_ACTION_UPCLOCK;
+ rdev->pm.dynpm_action_timeout = jiffies +
+ msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
+ }
+ } else if (not_processed == 0) { /* should downclock */
+ if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+ rdev->pm.dynpm_can_downclock) {
+ rdev->pm.dynpm_planned_action =
+ DYNPM_ACTION_DOWNCLOCK;
+ rdev->pm.dynpm_action_timeout = jiffies +
+ msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
+ }
+ }
+
+ /* Note, radeon_pm_set_clocks is called with static_switch set
+ * to false since we want to wait for vbl to avoid flicker.
+ */
+ if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
+ jiffies > rdev->pm.dynpm_action_timeout) {
+ radeon_pm_get_dynpm_state(rdev);
+ radeon_pm_set_clocks(rdev);
+ }
+
+ schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ }
+ mutex_unlock(&rdev->pm.mutex);
+ ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
+ /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
+ if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
+ seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
+ else
+ seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
+ if (rdev->asic->pm.get_memory_clock)
+ seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+ if (rdev->pm.current_vddc)
+ seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
+ if (rdev->asic->pm.get_pcie_lanes)
+ seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
+
+ return 0;
+}
+
+static struct drm_info_list radeon_pm_info_list[] = {
+ {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
+};
+#endif
+
+static int radeon_debugfs_pm_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
new file mode 100644
index 0000000..4940af7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * based on nouveau_prime.c
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+
+#include "radeon.h"
+#include <drm/radeon_drm.h>
+
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
+ int npages = bo->tbo.num_pages;
+
+ return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
+}
+
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
+ int ret;
+
+ ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
+ &bo->dma_buf_vmap);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return bo->dma_buf_vmap.virtual;
+}
+
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
+
+ ttm_bo_kunmap(&bo->dma_buf_vmap);
+}
+
+struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_bo *bo;
+ int ret;
+
+ ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
+ RADEON_GEM_DOMAIN_GTT, sg, &bo);
+ if (ret)
+ return ERR_PTR(ret);
+ bo->gem_base.driver_private = bo;
+
+ mutex_lock(&rdev->gem.mutex);
+ list_add_tail(&bo->list, &rdev->gem.objects);
+ mutex_unlock(&rdev->gem.mutex);
+
+ return &bo->gem_base;
+}
+
+int radeon_gem_prime_pin(struct drm_gem_object *obj)
+{
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
+ int ret = 0;
+
+ ret = radeon_bo_reserve(bo, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ /* pin buffer into GTT */
+ ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
+ if (ret) {
+ radeon_bo_unreserve(bo);
+ return ret;
+ }
+ radeon_bo_unreserve(bo);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
new file mode 100644
index 0000000..7e2c2b7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -0,0 +1,3724 @@
+/*
+ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
+ * VA Linux Systems Inc., Fremont, California.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL ATI, VA LINUX SYSTEMS AND/OR
+ * THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Authors:
+ * Kevin E. Martin <martin@xfree86.org>
+ * Rickard E. Faith <faith@valinux.com>
+ * Alan Hourihane <alanh@fairlite.demon.co.uk>
+ *
+ * References:
+ *
+ * !!!! FIXME !!!!
+ * RAGE 128 VR/ RAGE 128 GL Register Reference Manual (Technical
+ * Reference Manual P/N RRG-G04100-C Rev. 0.04), ATI Technologies: April
+ * 1999.
+ *
+ * !!!! FIXME !!!!
+ * RAGE 128 Software Development Manual (Technical Reference Manual P/N
+ * SDK-G04000 Rev. 0.01), ATI Technologies: June 1999.
+ *
+ */
+
+/* !!!! FIXME !!!! NOTE: THIS FILE HAS BEEN CONVERTED FROM r128_reg.h
+ * AND CONTAINS REGISTERS AND REGISTER DEFINITIONS THAT ARE NOT CORRECT
+ * ON THE RADEON. A FULL AUDIT OF THIS CODE IS NEEDED! */
+#ifndef _RADEON_REG_H_
+#define _RADEON_REG_H_
+
+#include "r300_reg.h"
+#include "r500_reg.h"
+#include "r600_reg.h"
+#include "evergreen_reg.h"
+#include "ni_reg.h"
+#include "si_reg.h"
+
+#define RADEON_MC_AGP_LOCATION 0x014c
+#define RADEON_MC_AGP_START_MASK 0x0000FFFF
+#define RADEON_MC_AGP_START_SHIFT 0
+#define RADEON_MC_AGP_TOP_MASK 0xFFFF0000
+#define RADEON_MC_AGP_TOP_SHIFT 16
+#define RADEON_MC_FB_LOCATION 0x0148
+#define RADEON_MC_FB_START_MASK 0x0000FFFF
+#define RADEON_MC_FB_START_SHIFT 0
+#define RADEON_MC_FB_TOP_MASK 0xFFFF0000
+#define RADEON_MC_FB_TOP_SHIFT 16
+#define RADEON_AGP_BASE_2 0x015c /* r200+ only */
+#define RADEON_AGP_BASE 0x0170
+
+#define ATI_DATATYPE_VQ 0
+#define ATI_DATATYPE_CI4 1
+#define ATI_DATATYPE_CI8 2
+#define ATI_DATATYPE_ARGB1555 3
+#define ATI_DATATYPE_RGB565 4
+#define ATI_DATATYPE_RGB888 5
+#define ATI_DATATYPE_ARGB8888 6
+#define ATI_DATATYPE_RGB332 7
+#define ATI_DATATYPE_Y8 8
+#define ATI_DATATYPE_RGB8 9
+#define ATI_DATATYPE_CI16 10
+#define ATI_DATATYPE_VYUY_422 11
+#define ATI_DATATYPE_YVYU_422 12
+#define ATI_DATATYPE_AYUV_444 14
+#define ATI_DATATYPE_ARGB4444 15
+
+ /* Registers for 2D/Video/Overlay */
+#define RADEON_ADAPTER_ID 0x0f2c /* PCI */
+#define RADEON_AGP_BASE 0x0170
+#define RADEON_AGP_CNTL 0x0174
+# define RADEON_AGP_APER_SIZE_256MB (0x00 << 0)
+# define RADEON_AGP_APER_SIZE_128MB (0x20 << 0)
+# define RADEON_AGP_APER_SIZE_64MB (0x30 << 0)
+# define RADEON_AGP_APER_SIZE_32MB (0x38 << 0)
+# define RADEON_AGP_APER_SIZE_16MB (0x3c << 0)
+# define RADEON_AGP_APER_SIZE_8MB (0x3e << 0)
+# define RADEON_AGP_APER_SIZE_4MB (0x3f << 0)
+# define RADEON_AGP_APER_SIZE_MASK (0x3f << 0)
+#define RADEON_STATUS_PCI_CONFIG 0x06
+# define RADEON_CAP_LIST 0x100000
+#define RADEON_CAPABILITIES_PTR_PCI_CONFIG 0x34 /* offset in PCI config*/
+# define RADEON_CAP_PTR_MASK 0xfc /* mask off reserved bits of CAP_PTR */
+# define RADEON_CAP_ID_NULL 0x00 /* End of capability list */
+# define RADEON_CAP_ID_AGP 0x02 /* AGP capability ID */
+# define RADEON_CAP_ID_EXP 0x10 /* PCI Express */
+#define RADEON_AGP_COMMAND 0x0f60 /* PCI */
+#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config*/
+# define RADEON_AGP_ENABLE (1<<8)
+#define RADEON_AGP_PLL_CNTL 0x000b /* PLL */
+#define RADEON_AGP_STATUS 0x0f5c /* PCI */
+# define RADEON_AGP_1X_MODE 0x01
+# define RADEON_AGP_2X_MODE 0x02
+# define RADEON_AGP_4X_MODE 0x04
+# define RADEON_AGP_FW_MODE 0x10
+# define RADEON_AGP_MODE_MASK 0x17
+# define RADEON_AGPv3_MODE 0x08
+# define RADEON_AGPv3_4X_MODE 0x01
+# define RADEON_AGPv3_8X_MODE 0x02
+#define RADEON_ATTRDR 0x03c1 /* VGA */
+#define RADEON_ATTRDW 0x03c0 /* VGA */
+#define RADEON_ATTRX 0x03c0 /* VGA */
+#define RADEON_AUX_SC_CNTL 0x1660
+# define RADEON_AUX1_SC_EN (1 << 0)
+# define RADEON_AUX1_SC_MODE_OR (0 << 1)
+# define RADEON_AUX1_SC_MODE_NAND (1 << 1)
+# define RADEON_AUX2_SC_EN (1 << 2)
+# define RADEON_AUX2_SC_MODE_OR (0 << 3)
+# define RADEON_AUX2_SC_MODE_NAND (1 << 3)
+# define RADEON_AUX3_SC_EN (1 << 4)
+# define RADEON_AUX3_SC_MODE_OR (0 << 5)
+# define RADEON_AUX3_SC_MODE_NAND (1 << 5)
+#define RADEON_AUX1_SC_BOTTOM 0x1670
+#define RADEON_AUX1_SC_LEFT 0x1664
+#define RADEON_AUX1_SC_RIGHT 0x1668
+#define RADEON_AUX1_SC_TOP 0x166c
+#define RADEON_AUX2_SC_BOTTOM 0x1680
+#define RADEON_AUX2_SC_LEFT 0x1674
+#define RADEON_AUX2_SC_RIGHT 0x1678
+#define RADEON_AUX2_SC_TOP 0x167c
+#define RADEON_AUX3_SC_BOTTOM 0x1690
+#define RADEON_AUX3_SC_LEFT 0x1684
+#define RADEON_AUX3_SC_RIGHT 0x1688
+#define RADEON_AUX3_SC_TOP 0x168c
+#define RADEON_AUX_WINDOW_HORZ_CNTL 0x02d8
+#define RADEON_AUX_WINDOW_VERT_CNTL 0x02dc
+
+#define RADEON_BASE_CODE 0x0f0b
+#define RADEON_BIOS_0_SCRATCH 0x0010
+# define RADEON_FP_PANEL_SCALABLE (1 << 16)
+# define RADEON_FP_PANEL_SCALE_EN (1 << 17)
+# define RADEON_FP_CHIP_SCALE_EN (1 << 18)
+# define RADEON_DRIVER_BRIGHTNESS_EN (1 << 26)
+# define RADEON_DISPLAY_ROT_MASK (3 << 28)
+# define RADEON_DISPLAY_ROT_00 (0 << 28)
+# define RADEON_DISPLAY_ROT_90 (1 << 28)
+# define RADEON_DISPLAY_ROT_180 (2 << 28)
+# define RADEON_DISPLAY_ROT_270 (3 << 28)
+#define RADEON_BIOS_1_SCRATCH 0x0014
+#define RADEON_BIOS_2_SCRATCH 0x0018
+#define RADEON_BIOS_3_SCRATCH 0x001c
+#define RADEON_BIOS_4_SCRATCH 0x0020
+# define RADEON_CRT1_ATTACHED_MASK (3 << 0)
+# define RADEON_CRT1_ATTACHED_MONO (1 << 0)
+# define RADEON_CRT1_ATTACHED_COLOR (2 << 0)
+# define RADEON_LCD1_ATTACHED (1 << 2)
+# define RADEON_DFP1_ATTACHED (1 << 3)
+# define RADEON_TV1_ATTACHED_MASK (3 << 4)
+# define RADEON_TV1_ATTACHED_COMP (1 << 4)
+# define RADEON_TV1_ATTACHED_SVIDEO (2 << 4)
+# define RADEON_CRT2_ATTACHED_MASK (3 << 8)
+# define RADEON_CRT2_ATTACHED_MONO (1 << 8)
+# define RADEON_CRT2_ATTACHED_COLOR (2 << 8)
+# define RADEON_DFP2_ATTACHED (1 << 11)
+#define RADEON_BIOS_5_SCRATCH 0x0024
+# define RADEON_LCD1_ON (1 << 0)
+# define RADEON_CRT1_ON (1 << 1)
+# define RADEON_TV1_ON (1 << 2)
+# define RADEON_DFP1_ON (1 << 3)
+# define RADEON_CRT2_ON (1 << 5)
+# define RADEON_CV1_ON (1 << 6)
+# define RADEON_DFP2_ON (1 << 7)
+# define RADEON_LCD1_CRTC_MASK (1 << 8)
+# define RADEON_LCD1_CRTC_SHIFT 8
+# define RADEON_CRT1_CRTC_MASK (1 << 9)
+# define RADEON_CRT1_CRTC_SHIFT 9
+# define RADEON_TV1_CRTC_MASK (1 << 10)
+# define RADEON_TV1_CRTC_SHIFT 10
+# define RADEON_DFP1_CRTC_MASK (1 << 11)
+# define RADEON_DFP1_CRTC_SHIFT 11
+# define RADEON_CRT2_CRTC_MASK (1 << 12)
+# define RADEON_CRT2_CRTC_SHIFT 12
+# define RADEON_CV1_CRTC_MASK (1 << 13)
+# define RADEON_CV1_CRTC_SHIFT 13
+# define RADEON_DFP2_CRTC_MASK (1 << 14)
+# define RADEON_DFP2_CRTC_SHIFT 14
+# define RADEON_ACC_REQ_LCD1 (1 << 16)
+# define RADEON_ACC_REQ_CRT1 (1 << 17)
+# define RADEON_ACC_REQ_TV1 (1 << 18)
+# define RADEON_ACC_REQ_DFP1 (1 << 19)
+# define RADEON_ACC_REQ_CRT2 (1 << 21)
+# define RADEON_ACC_REQ_TV2 (1 << 22)
+# define RADEON_ACC_REQ_DFP2 (1 << 23)
+#define RADEON_BIOS_6_SCRATCH 0x0028
+# define RADEON_ACC_MODE_CHANGE (1 << 2)
+# define RADEON_EXT_DESKTOP_MODE (1 << 3)
+# define RADEON_LCD_DPMS_ON (1 << 20)
+# define RADEON_CRT_DPMS_ON (1 << 21)
+# define RADEON_TV_DPMS_ON (1 << 22)
+# define RADEON_DFP_DPMS_ON (1 << 23)
+# define RADEON_DPMS_MASK (3 << 24)
+# define RADEON_DPMS_ON (0 << 24)
+# define RADEON_DPMS_STANDBY (1 << 24)
+# define RADEON_DPMS_SUSPEND (2 << 24)
+# define RADEON_DPMS_OFF (3 << 24)
+# define RADEON_SCREEN_BLANKING (1 << 26)
+# define RADEON_DRIVER_CRITICAL (1 << 27)
+# define RADEON_DISPLAY_SWITCHING_DIS (1 << 30)
+#define RADEON_BIOS_7_SCRATCH 0x002c
+# define RADEON_SYS_HOTKEY (1 << 10)
+# define RADEON_DRV_LOADED (1 << 12)
+#define RADEON_BIOS_ROM 0x0f30 /* PCI */
+#define RADEON_BIST 0x0f0f /* PCI */
+#define RADEON_BRUSH_DATA0 0x1480
+#define RADEON_BRUSH_DATA1 0x1484
+#define RADEON_BRUSH_DATA10 0x14a8
+#define RADEON_BRUSH_DATA11 0x14ac
+#define RADEON_BRUSH_DATA12 0x14b0
+#define RADEON_BRUSH_DATA13 0x14b4
+#define RADEON_BRUSH_DATA14 0x14b8
+#define RADEON_BRUSH_DATA15 0x14bc
+#define RADEON_BRUSH_DATA16 0x14c0
+#define RADEON_BRUSH_DATA17 0x14c4
+#define RADEON_BRUSH_DATA18 0x14c8
+#define RADEON_BRUSH_DATA19 0x14cc
+#define RADEON_BRUSH_DATA2 0x1488
+#define RADEON_BRUSH_DATA20 0x14d0
+#define RADEON_BRUSH_DATA21 0x14d4
+#define RADEON_BRUSH_DATA22 0x14d8
+#define RADEON_BRUSH_DATA23 0x14dc
+#define RADEON_BRUSH_DATA24 0x14e0
+#define RADEON_BRUSH_DATA25 0x14e4
+#define RADEON_BRUSH_DATA26 0x14e8
+#define RADEON_BRUSH_DATA27 0x14ec
+#define RADEON_BRUSH_DATA28 0x14f0
+#define RADEON_BRUSH_DATA29 0x14f4
+#define RADEON_BRUSH_DATA3 0x148c
+#define RADEON_BRUSH_DATA30 0x14f8
+#define RADEON_BRUSH_DATA31 0x14fc
+#define RADEON_BRUSH_DATA32 0x1500
+#define RADEON_BRUSH_DATA33 0x1504
+#define RADEON_BRUSH_DATA34 0x1508
+#define RADEON_BRUSH_DATA35 0x150c
+#define RADEON_BRUSH_DATA36 0x1510
+#define RADEON_BRUSH_DATA37 0x1514
+#define RADEON_BRUSH_DATA38 0x1518
+#define RADEON_BRUSH_DATA39 0x151c
+#define RADEON_BRUSH_DATA4 0x1490
+#define RADEON_BRUSH_DATA40 0x1520
+#define RADEON_BRUSH_DATA41 0x1524
+#define RADEON_BRUSH_DATA42 0x1528
+#define RADEON_BRUSH_DATA43 0x152c
+#define RADEON_BRUSH_DATA44 0x1530
+#define RADEON_BRUSH_DATA45 0x1534
+#define RADEON_BRUSH_DATA46 0x1538
+#define RADEON_BRUSH_DATA47 0x153c
+#define RADEON_BRUSH_DATA48 0x1540
+#define RADEON_BRUSH_DATA49 0x1544
+#define RADEON_BRUSH_DATA5 0x1494
+#define RADEON_BRUSH_DATA50 0x1548
+#define RADEON_BRUSH_DATA51 0x154c
+#define RADEON_BRUSH_DATA52 0x1550
+#define RADEON_BRUSH_DATA53 0x1554
+#define RADEON_BRUSH_DATA54 0x1558
+#define RADEON_BRUSH_DATA55 0x155c
+#define RADEON_BRUSH_DATA56 0x1560
+#define RADEON_BRUSH_DATA57 0x1564
+#define RADEON_BRUSH_DATA58 0x1568
+#define RADEON_BRUSH_DATA59 0x156c
+#define RADEON_BRUSH_DATA6 0x1498
+#define RADEON_BRUSH_DATA60 0x1570
+#define RADEON_BRUSH_DATA61 0x1574
+#define RADEON_BRUSH_DATA62 0x1578
+#define RADEON_BRUSH_DATA63 0x157c
+#define RADEON_BRUSH_DATA7 0x149c
+#define RADEON_BRUSH_DATA8 0x14a0
+#define RADEON_BRUSH_DATA9 0x14a4
+#define RADEON_BRUSH_SCALE 0x1470
+#define RADEON_BRUSH_Y_X 0x1474
+#define RADEON_BUS_CNTL 0x0030
+# define RADEON_BUS_MASTER_DIS (1 << 6)
+# define RADEON_BUS_BIOS_DIS_ROM (1 << 12)
+# define RS600_BUS_MASTER_DIS (1 << 14)
+# define RS600_MSI_REARM (1 << 20) /* rs600/rs690/rs740 */
+# define RADEON_BUS_RD_DISCARD_EN (1 << 24)
+# define RADEON_BUS_RD_ABORT_EN (1 << 25)
+# define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28)
+# define RADEON_BUS_WRT_BURST (1 << 29)
+# define RADEON_BUS_READ_BURST (1 << 30)
+#define RADEON_BUS_CNTL1 0x0034
+# define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4)
+#define RV370_BUS_CNTL 0x004c
+# define RV370_BUS_BIOS_DIS_ROM (1 << 2)
+/* rv370/rv380, rv410, r423/r430/r480, r5xx */
+#define RADEON_MSI_REARM_EN 0x0160
+# define RV370_MSI_REARM_EN (1 << 0)
+
+/* #define RADEON_PCIE_INDEX 0x0030 */
+/* #define RADEON_PCIE_DATA 0x0034 */
+#define RADEON_PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE */
+# define RADEON_PCIE_LC_LINK_WIDTH_SHIFT 0
+# define RADEON_PCIE_LC_LINK_WIDTH_MASK 0x7
+# define RADEON_PCIE_LC_LINK_WIDTH_X0 0
+# define RADEON_PCIE_LC_LINK_WIDTH_X1 1
+# define RADEON_PCIE_LC_LINK_WIDTH_X2 2
+# define RADEON_PCIE_LC_LINK_WIDTH_X4 3
+# define RADEON_PCIE_LC_LINK_WIDTH_X8 4
+# define RADEON_PCIE_LC_LINK_WIDTH_X12 5
+# define RADEON_PCIE_LC_LINK_WIDTH_X16 6
+# define RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT 4
+# define RADEON_PCIE_LC_LINK_WIDTH_RD_MASK 0x70
+# define RADEON_PCIE_LC_RECONFIG_NOW (1 << 8)
+# define RADEON_PCIE_LC_RECONFIG_LATER (1 << 9)
+# define RADEON_PCIE_LC_SHORT_RECONFIG_EN (1 << 10)
+# define R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define R600_PCIE_LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define R600_PCIE_LC_RENEGOTIATE_EN (1 << 10)
+# define R600_PCIE_LC_SHORT_RECONFIG_EN (1 << 11)
+# define R600_PCIE_LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define R600_PCIE_LC_UPCONFIGURE_DIS (1 << 13)
+
+#define R600_TARGET_AND_CURRENT_PROFILE_INDEX 0x70c
+#define R700_TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
+
+#define RADEON_CACHE_CNTL 0x1724
+#define RADEON_CACHE_LINE 0x0f0c /* PCI */
+#define RADEON_CAPABILITIES_ID 0x0f50 /* PCI */
+#define RADEON_CAPABILITIES_PTR 0x0f34 /* PCI */
+#define RADEON_CLK_PIN_CNTL 0x0001 /* PLL */
+# define RADEON_DONT_USE_XTALIN (1 << 4)
+# define RADEON_SCLK_DYN_START_CNTL (1 << 15)
+#define RADEON_CLOCK_CNTL_DATA 0x000c
+#define RADEON_CLOCK_CNTL_INDEX 0x0008
+# define RADEON_PLL_WR_EN (1 << 7)
+# define RADEON_PLL_DIV_SEL (3 << 8)
+# define RADEON_PLL2_DIV_SEL_MASK (~(3 << 8))
+#define RADEON_CLK_PWRMGT_CNTL 0x0014
+# define RADEON_ENGIN_DYNCLK_MODE (1 << 12)
+# define RADEON_ACTIVE_HILO_LAT_MASK (3 << 13)
+# define RADEON_ACTIVE_HILO_LAT_SHIFT 13
+# define RADEON_DISP_DYN_STOP_LAT_MASK (1 << 12)
+# define RADEON_MC_BUSY (1 << 16)
+# define RADEON_DLL_READY (1 << 19)
+# define RADEON_CG_NO1_DEBUG_0 (1 << 24)
+# define RADEON_CG_NO1_DEBUG_MASK (0x1f << 24)
+# define RADEON_DYN_STOP_MODE_MASK (7 << 21)
+# define RADEON_TVPLL_PWRMGT_OFF (1 << 30)
+# define RADEON_TVCLK_TURNOFF (1 << 31)
+#define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */
+# define RADEON_PM_MODE_SEL (1 << 13)
+# define RADEON_TCL_BYPASS_DISABLE (1 << 20)
+#define RADEON_CLR_CMP_CLR_3D 0x1a24
+#define RADEON_CLR_CMP_CLR_DST 0x15c8
+#define RADEON_CLR_CMP_CLR_SRC 0x15c4
+#define RADEON_CLR_CMP_CNTL 0x15c0
+# define RADEON_SRC_CMP_EQ_COLOR (4 << 0)
+# define RADEON_SRC_CMP_NEQ_COLOR (5 << 0)
+# define RADEON_CLR_CMP_SRC_SOURCE (1 << 24)
+#define RADEON_CLR_CMP_MASK 0x15cc
+# define RADEON_CLR_CMP_MSK 0xffffffff
+#define RADEON_CLR_CMP_MASK_3D 0x1A28
+#define RADEON_COMMAND 0x0f04 /* PCI */
+#define RADEON_COMPOSITE_SHADOW_ID 0x1a0c
+#define RADEON_CONFIG_APER_0_BASE 0x0100
+#define RADEON_CONFIG_APER_1_BASE 0x0104
+#define RADEON_CONFIG_APER_SIZE 0x0108
+#define RADEON_CONFIG_BONDS 0x00e8
+#define RADEON_CONFIG_CNTL 0x00e0
+# define RADEON_CFG_VGA_RAM_EN (1 << 8)
+# define RADEON_CFG_VGA_IO_DIS (1 << 9)
+# define RADEON_CFG_ATI_REV_A11 (0 << 16)
+# define RADEON_CFG_ATI_REV_A12 (1 << 16)
+# define RADEON_CFG_ATI_REV_A13 (2 << 16)
+# define RADEON_CFG_ATI_REV_ID_MASK (0xf << 16)
+#define RADEON_CONFIG_MEMSIZE 0x00f8
+#define RADEON_CONFIG_MEMSIZE_EMBEDDED 0x0114
+#define RADEON_CONFIG_REG_1_BASE 0x010c
+#define RADEON_CONFIG_REG_APER_SIZE 0x0110
+#define RADEON_CONFIG_XSTRAP 0x00e4
+#define RADEON_CONSTANT_COLOR_C 0x1d34
+# define RADEON_CONSTANT_COLOR_MASK 0x00ffffff
+# define RADEON_CONSTANT_COLOR_ONE 0x00ffffff
+# define RADEON_CONSTANT_COLOR_ZERO 0x00000000
+#define RADEON_CRC_CMDFIFO_ADDR 0x0740
+#define RADEON_CRC_CMDFIFO_DOUT 0x0744
+#define RADEON_GRPH_BUFFER_CNTL 0x02f0
+# define RADEON_GRPH_START_REQ_MASK (0x7f)
+# define RADEON_GRPH_START_REQ_SHIFT 0
+# define RADEON_GRPH_STOP_REQ_MASK (0x7f<<8)
+# define RADEON_GRPH_STOP_REQ_SHIFT 8
+# define RADEON_GRPH_CRITICAL_POINT_MASK (0x7f<<16)
+# define RADEON_GRPH_CRITICAL_POINT_SHIFT 16
+# define RADEON_GRPH_CRITICAL_CNTL (1<<28)
+# define RADEON_GRPH_BUFFER_SIZE (1<<29)
+# define RADEON_GRPH_CRITICAL_AT_SOF (1<<30)
+# define RADEON_GRPH_STOP_CNTL (1<<31)
+#define RADEON_GRPH2_BUFFER_CNTL 0x03f0
+# define RADEON_GRPH2_START_REQ_MASK (0x7f)
+# define RADEON_GRPH2_START_REQ_SHIFT 0
+# define RADEON_GRPH2_STOP_REQ_MASK (0x7f<<8)
+# define RADEON_GRPH2_STOP_REQ_SHIFT 8
+# define RADEON_GRPH2_CRITICAL_POINT_MASK (0x7f<<16)
+# define RADEON_GRPH2_CRITICAL_POINT_SHIFT 16
+# define RADEON_GRPH2_CRITICAL_CNTL (1<<28)
+# define RADEON_GRPH2_BUFFER_SIZE (1<<29)
+# define RADEON_GRPH2_CRITICAL_AT_SOF (1<<30)
+# define RADEON_GRPH2_STOP_CNTL (1<<31)
+#define RADEON_CRTC_CRNT_FRAME 0x0214
+#define RADEON_CRTC_EXT_CNTL 0x0054
+# define RADEON_CRTC_VGA_XOVERSCAN (1 << 0)
+# define RADEON_VGA_ATI_LINEAR (1 << 3)
+# define RADEON_XCRT_CNT_EN (1 << 6)
+# define RADEON_CRTC_HSYNC_DIS (1 << 8)
+# define RADEON_CRTC_VSYNC_DIS (1 << 9)
+# define RADEON_CRTC_DISPLAY_DIS (1 << 10)
+# define RADEON_CRTC_SYNC_TRISTAT (1 << 11)
+# define RADEON_CRTC_CRT_ON (1 << 15)
+#define RADEON_CRTC_EXT_CNTL_DPMS_BYTE 0x0055
+# define RADEON_CRTC_HSYNC_DIS_BYTE (1 << 0)
+# define RADEON_CRTC_VSYNC_DIS_BYTE (1 << 1)
+# define RADEON_CRTC_DISPLAY_DIS_BYTE (1 << 2)
+#define RADEON_CRTC_GEN_CNTL 0x0050
+# define RADEON_CRTC_DBL_SCAN_EN (1 << 0)
+# define RADEON_CRTC_INTERLACE_EN (1 << 1)
+# define RADEON_CRTC_CSYNC_EN (1 << 4)
+# define RADEON_CRTC_ICON_EN (1 << 15)
+# define RADEON_CRTC_CUR_EN (1 << 16)
+# define RADEON_CRTC_VSTAT_MODE_MASK (3 << 17)
+# define RADEON_CRTC_CUR_MODE_MASK (7 << 20)
+# define RADEON_CRTC_CUR_MODE_SHIFT 20
+# define RADEON_CRTC_CUR_MODE_MONO 0
+# define RADEON_CRTC_CUR_MODE_24BPP 2
+# define RADEON_CRTC_EXT_DISP_EN (1 << 24)
+# define RADEON_CRTC_EN (1 << 25)
+# define RADEON_CRTC_DISP_REQ_EN_B (1 << 26)
+#define RADEON_CRTC2_GEN_CNTL 0x03f8
+# define RADEON_CRTC2_DBL_SCAN_EN (1 << 0)
+# define RADEON_CRTC2_INTERLACE_EN (1 << 1)
+# define RADEON_CRTC2_SYNC_TRISTAT (1 << 4)
+# define RADEON_CRTC2_HSYNC_TRISTAT (1 << 5)
+# define RADEON_CRTC2_VSYNC_TRISTAT (1 << 6)
+# define RADEON_CRTC2_CRT2_ON (1 << 7)
+# define RADEON_CRTC2_PIX_WIDTH_SHIFT 8
+# define RADEON_CRTC2_PIX_WIDTH_MASK (0xf << 8)
+# define RADEON_CRTC2_ICON_EN (1 << 15)
+# define RADEON_CRTC2_CUR_EN (1 << 16)
+# define RADEON_CRTC2_CUR_MODE_MASK (7 << 20)
+# define RADEON_CRTC2_DISP_DIS (1 << 23)
+# define RADEON_CRTC2_EN (1 << 25)
+# define RADEON_CRTC2_DISP_REQ_EN_B (1 << 26)
+# define RADEON_CRTC2_CSYNC_EN (1 << 27)
+# define RADEON_CRTC2_HSYNC_DIS (1 << 28)
+# define RADEON_CRTC2_VSYNC_DIS (1 << 29)
+#define RADEON_CRTC_MORE_CNTL 0x27c
+# define RADEON_CRTC_AUTO_HORZ_CENTER_EN (1<<2)
+# define RADEON_CRTC_AUTO_VERT_CENTER_EN (1<<3)
+# define RADEON_CRTC_H_CUTOFF_ACTIVE_EN (1<<4)
+# define RADEON_CRTC_V_CUTOFF_ACTIVE_EN (1<<5)
+#define RADEON_CRTC_GUI_TRIG_VLINE 0x0218
+#define RADEON_CRTC_H_SYNC_STRT_WID 0x0204
+# define RADEON_CRTC_H_SYNC_STRT_PIX (0x07 << 0)
+# define RADEON_CRTC_H_SYNC_STRT_CHAR (0x3ff << 3)
+# define RADEON_CRTC_H_SYNC_STRT_CHAR_SHIFT 3
+# define RADEON_CRTC_H_SYNC_WID (0x3f << 16)
+# define RADEON_CRTC_H_SYNC_WID_SHIFT 16
+# define RADEON_CRTC_H_SYNC_POL (1 << 23)
+#define RADEON_CRTC2_H_SYNC_STRT_WID 0x0304
+# define RADEON_CRTC2_H_SYNC_STRT_PIX (0x07 << 0)
+# define RADEON_CRTC2_H_SYNC_STRT_CHAR (0x3ff << 3)
+# define RADEON_CRTC2_H_SYNC_STRT_CHAR_SHIFT 3
+# define RADEON_CRTC2_H_SYNC_WID (0x3f << 16)
+# define RADEON_CRTC2_H_SYNC_WID_SHIFT 16
+# define RADEON_CRTC2_H_SYNC_POL (1 << 23)
+#define RADEON_CRTC_H_TOTAL_DISP 0x0200
+# define RADEON_CRTC_H_TOTAL (0x03ff << 0)
+# define RADEON_CRTC_H_TOTAL_SHIFT 0
+# define RADEON_CRTC_H_DISP (0x01ff << 16)
+# define RADEON_CRTC_H_DISP_SHIFT 16
+#define RADEON_CRTC2_H_TOTAL_DISP 0x0300
+# define RADEON_CRTC2_H_TOTAL (0x03ff << 0)
+# define RADEON_CRTC2_H_TOTAL_SHIFT 0
+# define RADEON_CRTC2_H_DISP (0x01ff << 16)
+# define RADEON_CRTC2_H_DISP_SHIFT 16
+
+#define RADEON_CRTC_OFFSET_RIGHT 0x0220
+#define RADEON_CRTC_OFFSET 0x0224
+# define RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET (1<<30)
+# define RADEON_CRTC_OFFSET__OFFSET_LOCK (1<<31)
+
+#define RADEON_CRTC2_OFFSET 0x0324
+# define RADEON_CRTC2_OFFSET__GUI_TRIG_OFFSET (1<<30)
+# define RADEON_CRTC2_OFFSET__OFFSET_LOCK (1<<31)
+#define RADEON_CRTC_OFFSET_CNTL 0x0228
+# define RADEON_CRTC_TILE_LINE_SHIFT 0
+# define RADEON_CRTC_TILE_LINE_RIGHT_SHIFT 4
+# define R300_CRTC_X_Y_MODE_EN_RIGHT (1 << 6)
+# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_MASK (3 << 7)
+# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_AUTO (0 << 7)
+# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_SINGLE (1 << 7)
+# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DOUBLE (2 << 7)
+# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DIS (3 << 7)
+# define R300_CRTC_X_Y_MODE_EN (1 << 9)
+# define R300_CRTC_MICRO_TILE_BUFFER_MASK (3 << 10)
+# define R300_CRTC_MICRO_TILE_BUFFER_AUTO (0 << 10)
+# define R300_CRTC_MICRO_TILE_BUFFER_SINGLE (1 << 10)
+# define R300_CRTC_MICRO_TILE_BUFFER_DOUBLE (2 << 10)
+# define R300_CRTC_MICRO_TILE_BUFFER_DIS (3 << 10)
+# define R300_CRTC_MICRO_TILE_EN_RIGHT (1 << 12)
+# define R300_CRTC_MICRO_TILE_EN (1 << 13)
+# define R300_CRTC_MACRO_TILE_EN_RIGHT (1 << 14)
+# define R300_CRTC_MACRO_TILE_EN (1 << 15)
+# define RADEON_CRTC_TILE_EN_RIGHT (1 << 14)
+# define RADEON_CRTC_TILE_EN (1 << 15)
+# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16)
+# define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17)
+# define RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN (1 << 28)
+# define RADEON_CRTC_GUI_TRIG_OFFSET_RIGHT_EN (1 << 29)
+
+#define R300_CRTC_TILE_X0_Y0 0x0350
+#define R300_CRTC2_TILE_X0_Y0 0x0358
+
+#define RADEON_CRTC2_OFFSET_CNTL 0x0328
+# define RADEON_CRTC2_OFFSET_FLIP_CNTL (1 << 16)
+# define RADEON_CRTC2_TILE_EN (1 << 15)
+#define RADEON_CRTC_PITCH 0x022c
+# define RADEON_CRTC_PITCH__SHIFT 0
+# define RADEON_CRTC_PITCH__RIGHT_SHIFT 16
+
+#define RADEON_CRTC2_PITCH 0x032c
+#define RADEON_CRTC_STATUS 0x005c
+# define RADEON_CRTC_VBLANK_CUR (1 << 0)
+# define RADEON_CRTC_VBLANK_SAVE (1 << 1)
+# define RADEON_CRTC_VBLANK_SAVE_CLEAR (1 << 1)
+#define RADEON_CRTC2_STATUS 0x03fc
+# define RADEON_CRTC2_VBLANK_CUR (1 << 0)
+# define RADEON_CRTC2_VBLANK_SAVE (1 << 1)
+# define RADEON_CRTC2_VBLANK_SAVE_CLEAR (1 << 1)
+#define RADEON_CRTC_V_SYNC_STRT_WID 0x020c
+# define RADEON_CRTC_V_SYNC_STRT (0x7ff << 0)
+# define RADEON_CRTC_V_SYNC_STRT_SHIFT 0
+# define RADEON_CRTC_V_SYNC_WID (0x1f << 16)
+# define RADEON_CRTC_V_SYNC_WID_SHIFT 16
+# define RADEON_CRTC_V_SYNC_POL (1 << 23)
+#define RADEON_CRTC2_V_SYNC_STRT_WID 0x030c
+# define RADEON_CRTC2_V_SYNC_STRT (0x7ff << 0)
+# define RADEON_CRTC2_V_SYNC_STRT_SHIFT 0
+# define RADEON_CRTC2_V_SYNC_WID (0x1f << 16)
+# define RADEON_CRTC2_V_SYNC_WID_SHIFT 16
+# define RADEON_CRTC2_V_SYNC_POL (1 << 23)
+#define RADEON_CRTC_V_TOTAL_DISP 0x0208
+# define RADEON_CRTC_V_TOTAL (0x07ff << 0)
+# define RADEON_CRTC_V_TOTAL_SHIFT 0
+# define RADEON_CRTC_V_DISP (0x07ff << 16)
+# define RADEON_CRTC_V_DISP_SHIFT 16
+#define RADEON_CRTC2_V_TOTAL_DISP 0x0308
+# define RADEON_CRTC2_V_TOTAL (0x07ff << 0)
+# define RADEON_CRTC2_V_TOTAL_SHIFT 0
+# define RADEON_CRTC2_V_DISP (0x07ff << 16)
+# define RADEON_CRTC2_V_DISP_SHIFT 16
+#define RADEON_CRTC_VLINE_CRNT_VLINE 0x0210
+# define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16)
+#define RADEON_CRTC2_CRNT_FRAME 0x0314
+#define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318
+#define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310
+#define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */
+#define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */
+#define RADEON_CUR_CLR0 0x026c
+#define RADEON_CUR_CLR1 0x0270
+#define RADEON_CUR_HORZ_VERT_OFF 0x0268
+#define RADEON_CUR_HORZ_VERT_POSN 0x0264
+#define RADEON_CUR_OFFSET 0x0260
+# define RADEON_CUR_LOCK (1 << 31)
+#define RADEON_CUR2_CLR0 0x036c
+#define RADEON_CUR2_CLR1 0x0370
+#define RADEON_CUR2_HORZ_VERT_OFF 0x0368
+#define RADEON_CUR2_HORZ_VERT_POSN 0x0364
+#define RADEON_CUR2_OFFSET 0x0360
+# define RADEON_CUR2_LOCK (1 << 31)
+
+#define RADEON_DAC_CNTL 0x0058
+# define RADEON_DAC_RANGE_CNTL (3 << 0)
+# define RADEON_DAC_RANGE_CNTL_PS2 (2 << 0)
+# define RADEON_DAC_RANGE_CNTL_MASK 0x03
+# define RADEON_DAC_BLANKING (1 << 2)
+# define RADEON_DAC_CMP_EN (1 << 3)
+# define RADEON_DAC_CMP_OUTPUT (1 << 7)
+# define RADEON_DAC_8BIT_EN (1 << 8)
+# define RADEON_DAC_TVO_EN (1 << 10)
+# define RADEON_DAC_VGA_ADR_EN (1 << 13)
+# define RADEON_DAC_PDWN (1 << 15)
+# define RADEON_DAC_MASK_ALL (0xff << 24)
+#define RADEON_DAC_CNTL2 0x007c
+# define RADEON_DAC2_TV_CLK_SEL (0 << 1)
+# define RADEON_DAC2_DAC_CLK_SEL (1 << 0)
+# define RADEON_DAC2_DAC2_CLK_SEL (1 << 1)
+# define RADEON_DAC2_PALETTE_ACC_CTL (1 << 5)
+# define RADEON_DAC2_CMP_EN (1 << 7)
+# define RADEON_DAC2_CMP_OUT_R (1 << 8)
+# define RADEON_DAC2_CMP_OUT_G (1 << 9)
+# define RADEON_DAC2_CMP_OUT_B (1 << 10)
+# define RADEON_DAC2_CMP_OUTPUT (1 << 11)
+#define RADEON_DAC_EXT_CNTL 0x0280
+# define RADEON_DAC2_FORCE_BLANK_OFF_EN (1 << 0)
+# define RADEON_DAC2_FORCE_DATA_EN (1 << 1)
+# define RADEON_DAC_FORCE_BLANK_OFF_EN (1 << 4)
+# define RADEON_DAC_FORCE_DATA_EN (1 << 5)
+# define RADEON_DAC_FORCE_DATA_SEL_MASK (3 << 6)
+# define RADEON_DAC_FORCE_DATA_SEL_R (0 << 6)
+# define RADEON_DAC_FORCE_DATA_SEL_G (1 << 6)
+# define RADEON_DAC_FORCE_DATA_SEL_B (2 << 6)
+# define RADEON_DAC_FORCE_DATA_SEL_RGB (3 << 6)
+# define RADEON_DAC_FORCE_DATA_MASK 0x0003ff00
+# define RADEON_DAC_FORCE_DATA_SHIFT 8
+#define RADEON_DAC_MACRO_CNTL 0x0d04
+# define RADEON_DAC_PDWN_R (1 << 16)
+# define RADEON_DAC_PDWN_G (1 << 17)
+# define RADEON_DAC_PDWN_B (1 << 18)
+#define RADEON_DISP_PWR_MAN 0x0d08
+# define RADEON_DISP_PWR_MAN_D3_CRTC_EN (1 << 0)
+# define RADEON_DISP_PWR_MAN_D3_CRTC2_EN (1 << 4)
+# define RADEON_DISP_PWR_MAN_DPMS_ON (0 << 8)
+# define RADEON_DISP_PWR_MAN_DPMS_STANDBY (1 << 8)
+# define RADEON_DISP_PWR_MAN_DPMS_SUSPEND (2 << 8)
+# define RADEON_DISP_PWR_MAN_DPMS_OFF (3 << 8)
+# define RADEON_DISP_D3_RST (1 << 16)
+# define RADEON_DISP_D3_REG_RST (1 << 17)
+# define RADEON_DISP_D3_GRPH_RST (1 << 18)
+# define RADEON_DISP_D3_SUBPIC_RST (1 << 19)
+# define RADEON_DISP_D3_OV0_RST (1 << 20)
+# define RADEON_DISP_D1D2_GRPH_RST (1 << 21)
+# define RADEON_DISP_D1D2_SUBPIC_RST (1 << 22)
+# define RADEON_DISP_D1D2_OV0_RST (1 << 23)
+# define RADEON_DIG_TMDS_ENABLE_RST (1 << 24)
+# define RADEON_TV_ENABLE_RST (1 << 25)
+# define RADEON_AUTO_PWRUP_EN (1 << 26)
+#define RADEON_TV_DAC_CNTL 0x088c
+# define RADEON_TV_DAC_NBLANK (1 << 0)
+# define RADEON_TV_DAC_NHOLD (1 << 1)
+# define RADEON_TV_DAC_PEDESTAL (1 << 2)
+# define RADEON_TV_MONITOR_DETECT_EN (1 << 4)
+# define RADEON_TV_DAC_CMPOUT (1 << 5)
+# define RADEON_TV_DAC_STD_MASK (3 << 8)
+# define RADEON_TV_DAC_STD_PAL (0 << 8)
+# define RADEON_TV_DAC_STD_NTSC (1 << 8)
+# define RADEON_TV_DAC_STD_PS2 (2 << 8)
+# define RADEON_TV_DAC_STD_RS343 (3 << 8)
+# define RADEON_TV_DAC_BGSLEEP (1 << 6)
+# define RADEON_TV_DAC_BGADJ_MASK (0xf << 16)
+# define RADEON_TV_DAC_BGADJ_SHIFT 16
+# define RADEON_TV_DAC_DACADJ_MASK (0xf << 20)
+# define RADEON_TV_DAC_DACADJ_SHIFT 20
+# define RADEON_TV_DAC_RDACPD (1 << 24)
+# define RADEON_TV_DAC_GDACPD (1 << 25)
+# define RADEON_TV_DAC_BDACPD (1 << 26)
+# define RADEON_TV_DAC_RDACDET (1 << 29)
+# define RADEON_TV_DAC_GDACDET (1 << 30)
+# define RADEON_TV_DAC_BDACDET (1 << 31)
+# define R420_TV_DAC_DACADJ_MASK (0x1f << 20)
+# define R420_TV_DAC_RDACPD (1 << 25)
+# define R420_TV_DAC_GDACPD (1 << 26)
+# define R420_TV_DAC_BDACPD (1 << 27)
+# define R420_TV_DAC_TVENABLE (1 << 28)
+#define RADEON_DISP_HW_DEBUG 0x0d14
+# define RADEON_CRT2_DISP1_SEL (1 << 5)
+#define RADEON_DISP_OUTPUT_CNTL 0x0d64
+# define RADEON_DISP_DAC_SOURCE_MASK 0x03
+# define RADEON_DISP_DAC2_SOURCE_MASK 0x0c
+# define RADEON_DISP_DAC_SOURCE_CRTC2 0x01
+# define RADEON_DISP_DAC_SOURCE_RMX 0x02
+# define RADEON_DISP_DAC_SOURCE_LTU 0x03
+# define RADEON_DISP_DAC2_SOURCE_CRTC2 0x04
+# define RADEON_DISP_TVDAC_SOURCE_MASK (0x03 << 2)
+# define RADEON_DISP_TVDAC_SOURCE_CRTC 0x0
+# define RADEON_DISP_TVDAC_SOURCE_CRTC2 (0x01 << 2)
+# define RADEON_DISP_TVDAC_SOURCE_RMX (0x02 << 2)
+# define RADEON_DISP_TVDAC_SOURCE_LTU (0x03 << 2)
+# define RADEON_DISP_TRANS_MATRIX_MASK (0x03 << 4)
+# define RADEON_DISP_TRANS_MATRIX_ALPHA_MSB (0x00 << 4)
+# define RADEON_DISP_TRANS_MATRIX_GRAPHICS (0x01 << 4)
+# define RADEON_DISP_TRANS_MATRIX_VIDEO (0x02 << 4)
+# define RADEON_DISP_TV_SOURCE_CRTC (1 << 16) /* crtc1 or crtc2 */
+# define RADEON_DISP_TV_SOURCE_LTU (0 << 16) /* linear transform unit */
+#define RADEON_DISP_TV_OUT_CNTL 0x0d6c
+# define RADEON_DISP_TV_PATH_SRC_CRTC2 (1 << 16)
+# define RADEON_DISP_TV_PATH_SRC_CRTC1 (0 << 16)
+#define RADEON_DAC_CRC_SIG 0x02cc
+#define RADEON_DAC_DATA 0x03c9 /* VGA */
+#define RADEON_DAC_MASK 0x03c6 /* VGA */
+#define RADEON_DAC_R_INDEX 0x03c7 /* VGA */
+#define RADEON_DAC_W_INDEX 0x03c8 /* VGA */
+#define RADEON_DDA_CONFIG 0x02e0
+#define RADEON_DDA_ON_OFF 0x02e4
+#define RADEON_DEFAULT_OFFSET 0x16e0
+#define RADEON_DEFAULT_PITCH 0x16e4
+#define RADEON_DEFAULT_SC_BOTTOM_RIGHT 0x16e8
+# define RADEON_DEFAULT_SC_RIGHT_MAX (0x1fff << 0)
+# define RADEON_DEFAULT_SC_BOTTOM_MAX (0x1fff << 16)
+#define RADEON_DESTINATION_3D_CLR_CMP_VAL 0x1820
+#define RADEON_DESTINATION_3D_CLR_CMP_MSK 0x1824
+#define RADEON_DEVICE_ID 0x0f02 /* PCI */
+#define RADEON_DISP_MISC_CNTL 0x0d00
+# define RADEON_SOFT_RESET_GRPH_PP (1 << 0)
+#define RADEON_DISP_MERGE_CNTL 0x0d60
+# define RADEON_DISP_ALPHA_MODE_MASK 0x03
+# define RADEON_DISP_ALPHA_MODE_KEY 0
+# define RADEON_DISP_ALPHA_MODE_PER_PIXEL 1
+# define RADEON_DISP_ALPHA_MODE_GLOBAL 2
+# define RADEON_DISP_RGB_OFFSET_EN (1 << 8)
+# define RADEON_DISP_GRPH_ALPHA_MASK (0xff << 16)
+# define RADEON_DISP_OV0_ALPHA_MASK (0xff << 24)
+# define RADEON_DISP_LIN_TRANS_BYPASS (0x01 << 9)
+#define RADEON_DISP2_MERGE_CNTL 0x0d68
+# define RADEON_DISP2_RGB_OFFSET_EN (1 << 8)
+#define RADEON_DISP_LIN_TRANS_GRPH_A 0x0d80
+#define RADEON_DISP_LIN_TRANS_GRPH_B 0x0d84
+#define RADEON_DISP_LIN_TRANS_GRPH_C 0x0d88
+#define RADEON_DISP_LIN_TRANS_GRPH_D 0x0d8c
+#define RADEON_DISP_LIN_TRANS_GRPH_E 0x0d90
+#define RADEON_DISP_LIN_TRANS_GRPH_F 0x0d98
+#define RADEON_DP_BRUSH_BKGD_CLR 0x1478
+#define RADEON_DP_BRUSH_FRGD_CLR 0x147c
+#define RADEON_DP_CNTL 0x16c0
+# define RADEON_DST_X_LEFT_TO_RIGHT (1 << 0)
+# define RADEON_DST_Y_TOP_TO_BOTTOM (1 << 1)
+# define RADEON_DP_DST_TILE_LINEAR (0 << 3)
+# define RADEON_DP_DST_TILE_MACRO (1 << 3)
+# define RADEON_DP_DST_TILE_MICRO (2 << 3)
+# define RADEON_DP_DST_TILE_BOTH (3 << 3)
+#define RADEON_DP_CNTL_XDIR_YDIR_YMAJOR 0x16d0
+# define RADEON_DST_Y_MAJOR (1 << 2)
+# define RADEON_DST_Y_DIR_TOP_TO_BOTTOM (1 << 15)
+# define RADEON_DST_X_DIR_LEFT_TO_RIGHT (1 << 31)
+#define RADEON_DP_DATATYPE 0x16c4
+# define RADEON_HOST_BIG_ENDIAN_EN (1 << 29)
+#define RADEON_DP_GUI_MASTER_CNTL 0x146c
+# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
+# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
+# define RADEON_GMC_SRC_CLIPPING (1 << 2)
+# define RADEON_GMC_DST_CLIPPING (1 << 3)
+# define RADEON_GMC_BRUSH_DATATYPE_MASK (0x0f << 4)
+# define RADEON_GMC_BRUSH_8X8_MONO_FG_BG (0 << 4)
+# define RADEON_GMC_BRUSH_8X8_MONO_FG_LA (1 << 4)
+# define RADEON_GMC_BRUSH_1X8_MONO_FG_BG (4 << 4)
+# define RADEON_GMC_BRUSH_1X8_MONO_FG_LA (5 << 4)
+# define RADEON_GMC_BRUSH_32x1_MONO_FG_BG (6 << 4)
+# define RADEON_GMC_BRUSH_32x1_MONO_FG_LA (7 << 4)
+# define RADEON_GMC_BRUSH_32x32_MONO_FG_BG (8 << 4)
+# define RADEON_GMC_BRUSH_32x32_MONO_FG_LA (9 << 4)
+# define RADEON_GMC_BRUSH_8x8_COLOR (10 << 4)
+# define RADEON_GMC_BRUSH_1X8_COLOR (12 << 4)
+# define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4)
+# define RADEON_GMC_BRUSH_NONE (15 << 4)
+# define RADEON_GMC_DST_8BPP_CI (2 << 8)
+# define RADEON_GMC_DST_15BPP (3 << 8)
+# define RADEON_GMC_DST_16BPP (4 << 8)
+# define RADEON_GMC_DST_24BPP (5 << 8)
+# define RADEON_GMC_DST_32BPP (6 << 8)
+# define RADEON_GMC_DST_8BPP_RGB (7 << 8)
+# define RADEON_GMC_DST_Y8 (8 << 8)
+# define RADEON_GMC_DST_RGB8 (9 << 8)
+# define RADEON_GMC_DST_VYUY (11 << 8)
+# define RADEON_GMC_DST_YVYU (12 << 8)
+# define RADEON_GMC_DST_AYUV444 (14 << 8)
+# define RADEON_GMC_DST_ARGB4444 (15 << 8)
+# define RADEON_GMC_DST_DATATYPE_MASK (0x0f << 8)
+# define RADEON_GMC_DST_DATATYPE_SHIFT 8
+# define RADEON_GMC_SRC_DATATYPE_MASK (3 << 12)
+# define RADEON_GMC_SRC_DATATYPE_MONO_FG_BG (0 << 12)
+# define RADEON_GMC_SRC_DATATYPE_MONO_FG_LA (1 << 12)
+# define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12)
+# define RADEON_GMC_BYTE_PIX_ORDER (1 << 14)
+# define RADEON_GMC_BYTE_MSB_TO_LSB (0 << 14)
+# define RADEON_GMC_BYTE_LSB_TO_MSB (1 << 14)
+# define RADEON_GMC_CONVERSION_TEMP (1 << 15)
+# define RADEON_GMC_CONVERSION_TEMP_6500 (0 << 15)
+# define RADEON_GMC_CONVERSION_TEMP_9300 (1 << 15)
+# define RADEON_GMC_ROP3_MASK (0xff << 16)
+# define RADEON_DP_SRC_SOURCE_MASK (7 << 24)
+# define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24)
+# define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24)
+# define RADEON_GMC_3D_FCN_EN (1 << 27)
+# define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28)
+# define RADEON_GMC_AUX_CLIP_DIS (1 << 29)
+# define RADEON_GMC_WR_MSK_DIS (1 << 30)
+# define RADEON_GMC_LD_BRUSH_Y_X (1 << 31)
+# define RADEON_ROP3_ZERO 0x00000000
+# define RADEON_ROP3_DSa 0x00880000
+# define RADEON_ROP3_SDna 0x00440000
+# define RADEON_ROP3_S 0x00cc0000
+# define RADEON_ROP3_DSna 0x00220000
+# define RADEON_ROP3_D 0x00aa0000
+# define RADEON_ROP3_DSx 0x00660000
+# define RADEON_ROP3_DSo 0x00ee0000
+# define RADEON_ROP3_DSon 0x00110000
+# define RADEON_ROP3_DSxn 0x00990000
+# define RADEON_ROP3_Dn 0x00550000
+# define RADEON_ROP3_SDno 0x00dd0000
+# define RADEON_ROP3_Sn 0x00330000
+# define RADEON_ROP3_DSno 0x00bb0000
+# define RADEON_ROP3_DSan 0x00770000
+# define RADEON_ROP3_ONE 0x00ff0000
+# define RADEON_ROP3_DPa 0x00a00000
+# define RADEON_ROP3_PDna 0x00500000
+# define RADEON_ROP3_P 0x00f00000
+# define RADEON_ROP3_DPna 0x000a0000
+# define RADEON_ROP3_D 0x00aa0000
+# define RADEON_ROP3_DPx 0x005a0000
+# define RADEON_ROP3_DPo 0x00fa0000
+# define RADEON_ROP3_DPon 0x00050000
+# define RADEON_ROP3_PDxn 0x00a50000
+# define RADEON_ROP3_PDno 0x00f50000
+# define RADEON_ROP3_Pn 0x000f0000
+# define RADEON_ROP3_DPno 0x00af0000
+# define RADEON_ROP3_DPan 0x005f0000
+#define RADEON_DP_GUI_MASTER_CNTL_C 0x1c84
+#define RADEON_DP_MIX 0x16c8
+#define RADEON_DP_SRC_BKGD_CLR 0x15dc
+#define RADEON_DP_SRC_FRGD_CLR 0x15d8
+#define RADEON_DP_WRITE_MASK 0x16cc
+#define RADEON_DST_BRES_DEC 0x1630
+#define RADEON_DST_BRES_ERR 0x1628
+#define RADEON_DST_BRES_INC 0x162c
+#define RADEON_DST_BRES_LNTH 0x1634
+#define RADEON_DST_BRES_LNTH_SUB 0x1638
+#define RADEON_DST_HEIGHT 0x1410
+#define RADEON_DST_HEIGHT_WIDTH 0x143c
+#define RADEON_DST_HEIGHT_WIDTH_8 0x158c
+#define RADEON_DST_HEIGHT_WIDTH_BW 0x15b4
+#define RADEON_DST_HEIGHT_Y 0x15a0
+#define RADEON_DST_LINE_START 0x1600
+#define RADEON_DST_LINE_END 0x1604
+#define RADEON_DST_LINE_PATCOUNT 0x1608
+# define RADEON_BRES_CNTL_SHIFT 8
+#define RADEON_DST_OFFSET 0x1404
+#define RADEON_DST_PITCH 0x1408
+#define RADEON_DST_PITCH_OFFSET 0x142c
+#define RADEON_DST_PITCH_OFFSET_C 0x1c80
+# define RADEON_PITCH_SHIFT 21
+# define RADEON_DST_TILE_LINEAR (0 << 30)
+# define RADEON_DST_TILE_MACRO (1 << 30)
+# define RADEON_DST_TILE_MICRO (2 << 30)
+# define RADEON_DST_TILE_BOTH (3 << 30)
+#define RADEON_DST_WIDTH 0x140c
+#define RADEON_DST_WIDTH_HEIGHT 0x1598
+#define RADEON_DST_WIDTH_X 0x1588
+#define RADEON_DST_WIDTH_X_INCY 0x159c
+#define RADEON_DST_X 0x141c
+#define RADEON_DST_X_SUB 0x15a4
+#define RADEON_DST_X_Y 0x1594
+#define RADEON_DST_Y 0x1420
+#define RADEON_DST_Y_SUB 0x15a8
+#define RADEON_DST_Y_X 0x1438
+
+#define RADEON_FCP_CNTL 0x0910
+# define RADEON_FCP0_SRC_PCICLK 0
+# define RADEON_FCP0_SRC_PCLK 1
+# define RADEON_FCP0_SRC_PCLKb 2
+# define RADEON_FCP0_SRC_HREF 3
+# define RADEON_FCP0_SRC_GND 4
+# define RADEON_FCP0_SRC_HREFb 5
+#define RADEON_FLUSH_1 0x1704
+#define RADEON_FLUSH_2 0x1708
+#define RADEON_FLUSH_3 0x170c
+#define RADEON_FLUSH_4 0x1710
+#define RADEON_FLUSH_5 0x1714
+#define RADEON_FLUSH_6 0x1718
+#define RADEON_FLUSH_7 0x171c
+#define RADEON_FOG_3D_TABLE_START 0x1810
+#define RADEON_FOG_3D_TABLE_END 0x1814
+#define RADEON_FOG_3D_TABLE_DENSITY 0x181c
+#define RADEON_FOG_TABLE_INDEX 0x1a14
+#define RADEON_FOG_TABLE_DATA 0x1a18
+#define RADEON_FP_CRTC_H_TOTAL_DISP 0x0250
+#define RADEON_FP_CRTC_V_TOTAL_DISP 0x0254
+# define RADEON_FP_CRTC_H_TOTAL_MASK 0x000003ff
+# define RADEON_FP_CRTC_H_DISP_MASK 0x01ff0000
+# define RADEON_FP_CRTC_V_TOTAL_MASK 0x00000fff
+# define RADEON_FP_CRTC_V_DISP_MASK 0x0fff0000
+# define RADEON_FP_H_SYNC_STRT_CHAR_MASK 0x00001ff8
+# define RADEON_FP_H_SYNC_WID_MASK 0x003f0000
+# define RADEON_FP_V_SYNC_STRT_MASK 0x00000fff
+# define RADEON_FP_V_SYNC_WID_MASK 0x001f0000
+# define RADEON_FP_CRTC_H_TOTAL_SHIFT 0x00000000
+# define RADEON_FP_CRTC_H_DISP_SHIFT 0x00000010
+# define RADEON_FP_CRTC_V_TOTAL_SHIFT 0x00000000
+# define RADEON_FP_CRTC_V_DISP_SHIFT 0x00000010
+# define RADEON_FP_H_SYNC_STRT_CHAR_SHIFT 0x00000003
+# define RADEON_FP_H_SYNC_WID_SHIFT 0x00000010
+# define RADEON_FP_V_SYNC_STRT_SHIFT 0x00000000
+# define RADEON_FP_V_SYNC_WID_SHIFT 0x00000010
+#define RADEON_FP_GEN_CNTL 0x0284
+# define RADEON_FP_FPON (1 << 0)
+# define RADEON_FP_BLANK_EN (1 << 1)
+# define RADEON_FP_TMDS_EN (1 << 2)
+# define RADEON_FP_PANEL_FORMAT (1 << 3)
+# define RADEON_FP_EN_TMDS (1 << 7)
+# define RADEON_FP_DETECT_SENSE (1 << 8)
+# define RADEON_FP_DETECT_INT_POL (1 << 9)
+# define R200_FP_SOURCE_SEL_MASK (3 << 10)
+# define R200_FP_SOURCE_SEL_CRTC1 (0 << 10)
+# define R200_FP_SOURCE_SEL_CRTC2 (1 << 10)
+# define R200_FP_SOURCE_SEL_RMX (2 << 10)
+# define R200_FP_SOURCE_SEL_TRANS (3 << 10)
+# define RADEON_FP_SEL_CRTC1 (0 << 13)
+# define RADEON_FP_SEL_CRTC2 (1 << 13)
+# define R300_HPD_SEL(x) ((x) << 13)
+# define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
+# define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
+# define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
+# define RADEON_FP_CRTC_USE_SHADOW_VEND (1 << 18)
+# define RADEON_FP_RMX_HVSYNC_CONTROL_EN (1 << 20)
+# define RADEON_FP_DFP_SYNC_SEL (1 << 21)
+# define RADEON_FP_CRTC_LOCK_8DOT (1 << 22)
+# define RADEON_FP_CRT_SYNC_SEL (1 << 23)
+# define RADEON_FP_USE_SHADOW_EN (1 << 24)
+# define RADEON_FP_CRT_SYNC_ALT (1 << 26)
+#define RADEON_FP2_GEN_CNTL 0x0288
+# define RADEON_FP2_BLANK_EN (1 << 1)
+# define RADEON_FP2_ON (1 << 2)
+# define RADEON_FP2_PANEL_FORMAT (1 << 3)
+# define RADEON_FP2_DETECT_SENSE (1 << 8)
+# define RADEON_FP2_DETECT_INT_POL (1 << 9)
+# define R200_FP2_SOURCE_SEL_MASK (3 << 10)
+# define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10)
+# define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10)
+# define R200_FP2_SOURCE_SEL_RMX (2 << 10)
+# define R200_FP2_SOURCE_SEL_TRANS_UNIT (3 << 10)
+# define RADEON_FP2_SRC_SEL_MASK (3 << 13)
+# define RADEON_FP2_SRC_SEL_CRTC2 (1 << 13)
+# define RADEON_FP2_FP_POL (1 << 16)
+# define RADEON_FP2_LP_POL (1 << 17)
+# define RADEON_FP2_SCK_POL (1 << 18)
+# define RADEON_FP2_LCD_CNTL_MASK (7 << 19)
+# define RADEON_FP2_PAD_FLOP_EN (1 << 22)
+# define RADEON_FP2_CRC_EN (1 << 23)
+# define RADEON_FP2_CRC_READ_EN (1 << 24)
+# define RADEON_FP2_DVO_EN (1 << 25)
+# define RADEON_FP2_DVO_RATE_SEL_SDR (1 << 26)
+# define R200_FP2_DVO_RATE_SEL_SDR (1 << 27)
+# define R300_FP2_DVO_CLOCK_MODE_SINGLE (1 << 28)
+# define R300_FP2_DVO_DUAL_CHANNEL_EN (1 << 29)
+#define RADEON_FP_H_SYNC_STRT_WID 0x02c4
+#define RADEON_FP_H2_SYNC_STRT_WID 0x03c4
+#define RADEON_FP_HORZ_STRETCH 0x028c
+#define RADEON_FP_HORZ2_STRETCH 0x038c
+# define RADEON_HORZ_STRETCH_RATIO_MASK 0xffff
+# define RADEON_HORZ_STRETCH_RATIO_MAX 4096
+# define RADEON_HORZ_PANEL_SIZE (0x1ff << 16)
+# define RADEON_HORZ_PANEL_SHIFT 16
+# define RADEON_HORZ_STRETCH_PIXREP (0 << 25)
+# define RADEON_HORZ_STRETCH_BLEND (1 << 26)
+# define RADEON_HORZ_STRETCH_ENABLE (1 << 25)
+# define RADEON_HORZ_AUTO_RATIO (1 << 27)
+# define RADEON_HORZ_FP_LOOP_STRETCH (0x7 << 28)
+# define RADEON_HORZ_AUTO_RATIO_INC (1 << 31)
+#define RADEON_FP_HORZ_VERT_ACTIVE 0x0278
+#define RADEON_FP_V_SYNC_STRT_WID 0x02c8
+#define RADEON_FP_VERT_STRETCH 0x0290
+#define RADEON_FP_V2_SYNC_STRT_WID 0x03c8
+#define RADEON_FP_VERT2_STRETCH 0x0390
+# define RADEON_VERT_PANEL_SIZE (0xfff << 12)
+# define RADEON_VERT_PANEL_SHIFT 12
+# define RADEON_VERT_STRETCH_RATIO_MASK 0xfff
+# define RADEON_VERT_STRETCH_RATIO_SHIFT 0
+# define RADEON_VERT_STRETCH_RATIO_MAX 4096
+# define RADEON_VERT_STRETCH_ENABLE (1 << 25)
+# define RADEON_VERT_STRETCH_LINEREP (0 << 26)
+# define RADEON_VERT_STRETCH_BLEND (1 << 26)
+# define RADEON_VERT_AUTO_RATIO_EN (1 << 27)
+# define RADEON_VERT_AUTO_RATIO_INC (1 << 31)
+# define RADEON_VERT_STRETCH_RESERVED 0x71000000
+#define RS400_FP_2ND_GEN_CNTL 0x0384
+# define RS400_FP_2ND_ON (1 << 0)
+# define RS400_FP_2ND_BLANK_EN (1 << 1)
+# define RS400_TMDS_2ND_EN (1 << 2)
+# define RS400_PANEL_FORMAT_2ND (1 << 3)
+# define RS400_FP_2ND_EN_TMDS (1 << 7)
+# define RS400_FP_2ND_DETECT_SENSE (1 << 8)
+# define RS400_FP_2ND_SOURCE_SEL_MASK (3 << 10)
+# define RS400_FP_2ND_SOURCE_SEL_CRTC1 (0 << 10)
+# define RS400_FP_2ND_SOURCE_SEL_CRTC2 (1 << 10)
+# define RS400_FP_2ND_SOURCE_SEL_RMX (2 << 10)
+# define RS400_FP_2ND_DETECT_EN (1 << 12)
+# define RS400_HPD_2ND_SEL (1 << 13)
+#define RS400_FP2_2_GEN_CNTL 0x0388
+# define RS400_FP2_2_BLANK_EN (1 << 1)
+# define RS400_FP2_2_ON (1 << 2)
+# define RS400_FP2_2_PANEL_FORMAT (1 << 3)
+# define RS400_FP2_2_DETECT_SENSE (1 << 8)
+# define RS400_FP2_2_SOURCE_SEL_MASK (3 << 10)
+# define RS400_FP2_2_SOURCE_SEL_CRTC1 (0 << 10)
+# define RS400_FP2_2_SOURCE_SEL_CRTC2 (1 << 10)
+# define RS400_FP2_2_SOURCE_SEL_RMX (2 << 10)
+# define RS400_FP2_2_DVO2_EN (1 << 25)
+#define RS400_TMDS2_CNTL 0x0394
+#define RS400_TMDS2_TRANSMITTER_CNTL 0x03a4
+# define RS400_TMDS2_PLLEN (1 << 0)
+# define RS400_TMDS2_PLLRST (1 << 1)
+
+#define RADEON_GEN_INT_CNTL 0x0040
+# define RADEON_CRTC_VBLANK_MASK (1 << 0)
+# define RADEON_FP_DETECT_MASK (1 << 4)
+# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
+# define RADEON_FP2_DETECT_MASK (1 << 10)
+# define RADEON_GUI_IDLE_MASK (1 << 19)
+# define RADEON_SW_INT_ENABLE (1 << 25)
+#define RADEON_GEN_INT_STATUS 0x0044
+# define AVIVO_DISPLAY_INT_STATUS (1 << 0)
+# define RADEON_CRTC_VBLANK_STAT (1 << 0)
+# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
+# define RADEON_FP_DETECT_STAT (1 << 4)
+# define RADEON_FP_DETECT_STAT_ACK (1 << 4)
+# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
+# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
+# define RADEON_FP2_DETECT_STAT (1 << 10)
+# define RADEON_FP2_DETECT_STAT_ACK (1 << 10)
+# define RADEON_GUI_IDLE_STAT (1 << 19)
+# define RADEON_GUI_IDLE_STAT_ACK (1 << 19)
+# define RADEON_SW_INT_FIRE (1 << 26)
+# define RADEON_SW_INT_TEST (1 << 25)
+# define RADEON_SW_INT_TEST_ACK (1 << 25)
+#define RADEON_GENENB 0x03c3 /* VGA */
+#define RADEON_GENFC_RD 0x03ca /* VGA */
+#define RADEON_GENFC_WT 0x03da /* VGA, 0x03ba */
+#define RADEON_GENMO_RD 0x03cc /* VGA */
+#define RADEON_GENMO_WT 0x03c2 /* VGA */
+#define RADEON_GENS0 0x03c2 /* VGA */
+#define RADEON_GENS1 0x03da /* VGA, 0x03ba */
+#define RADEON_GPIO_MONID 0x0068 /* DDC interface via I2C */ /* DDC3 */
+#define RADEON_GPIO_MONIDB 0x006c
+#define RADEON_GPIO_CRT2_DDC 0x006c
+#define RADEON_GPIO_DVI_DDC 0x0064 /* DDC2 */
+#define RADEON_GPIO_VGA_DDC 0x0060 /* DDC1 */
+# define RADEON_GPIO_A_0 (1 << 0)
+# define RADEON_GPIO_A_1 (1 << 1)
+# define RADEON_GPIO_Y_0 (1 << 8)
+# define RADEON_GPIO_Y_1 (1 << 9)
+# define RADEON_GPIO_Y_SHIFT_0 8
+# define RADEON_GPIO_Y_SHIFT_1 9
+# define RADEON_GPIO_EN_0 (1 << 16)
+# define RADEON_GPIO_EN_1 (1 << 17)
+# define RADEON_GPIO_MASK_0 (1 << 24) /*??*/
+# define RADEON_GPIO_MASK_1 (1 << 25) /*??*/
+#define RADEON_GRPH8_DATA 0x03cf /* VGA */
+#define RADEON_GRPH8_IDX 0x03ce /* VGA */
+#define RADEON_GUI_SCRATCH_REG0 0x15e0
+#define RADEON_GUI_SCRATCH_REG1 0x15e4
+#define RADEON_GUI_SCRATCH_REG2 0x15e8
+#define RADEON_GUI_SCRATCH_REG3 0x15ec
+#define RADEON_GUI_SCRATCH_REG4 0x15f0
+#define RADEON_GUI_SCRATCH_REG5 0x15f4
+
+#define RADEON_HEADER 0x0f0e /* PCI */
+#define RADEON_HOST_DATA0 0x17c0
+#define RADEON_HOST_DATA1 0x17c4
+#define RADEON_HOST_DATA2 0x17c8
+#define RADEON_HOST_DATA3 0x17cc
+#define RADEON_HOST_DATA4 0x17d0
+#define RADEON_HOST_DATA5 0x17d4
+#define RADEON_HOST_DATA6 0x17d8
+#define RADEON_HOST_DATA7 0x17dc
+#define RADEON_HOST_DATA_LAST 0x17e0
+#define RADEON_HOST_PATH_CNTL 0x0130
+# define RADEON_HP_LIN_RD_CACHE_DIS (1 << 24)
+# define RADEON_HDP_READ_BUFFER_INVALIDATE (1 << 27)
+# define RADEON_HDP_SOFT_RESET (1 << 26)
+# define RADEON_HDP_APER_CNTL (1 << 23)
+#define RADEON_HTOTAL_CNTL 0x0009 /* PLL */
+# define RADEON_HTOT_CNTL_VGA_EN (1 << 28)
+#define RADEON_HTOTAL2_CNTL 0x002e /* PLL */
+
+ /* Multimedia I2C bus */
+#define RADEON_I2C_CNTL_0 0x0090
+# define RADEON_I2C_DONE (1 << 0)
+# define RADEON_I2C_NACK (1 << 1)
+# define RADEON_I2C_HALT (1 << 2)
+# define RADEON_I2C_SOFT_RST (1 << 5)
+# define RADEON_I2C_DRIVE_EN (1 << 6)
+# define RADEON_I2C_DRIVE_SEL (1 << 7)
+# define RADEON_I2C_START (1 << 8)
+# define RADEON_I2C_STOP (1 << 9)
+# define RADEON_I2C_RECEIVE (1 << 10)
+# define RADEON_I2C_ABORT (1 << 11)
+# define RADEON_I2C_GO (1 << 12)
+# define RADEON_I2C_PRESCALE_SHIFT 16
+#define RADEON_I2C_CNTL_1 0x0094
+# define RADEON_I2C_DATA_COUNT_SHIFT 0
+# define RADEON_I2C_ADDR_COUNT_SHIFT 4
+# define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
+# define RADEON_I2C_SEL (1 << 16)
+# define RADEON_I2C_EN (1 << 17)
+# define RADEON_I2C_TIME_LIMIT_SHIFT 24
+#define RADEON_I2C_DATA 0x0098
+
+#define RADEON_DVI_I2C_CNTL_0 0x02e0
+# define R200_DVI_I2C_PIN_SEL(x) ((x) << 3)
+# define R200_SEL_DDC1 0 /* depends on asic */
+# define R200_SEL_DDC2 1 /* depends on asic */
+# define R200_SEL_DDC3 2 /* depends on asic */
+# define RADEON_SW_WANTS_TO_USE_DVI_I2C (1 << 13)
+# define RADEON_SW_CAN_USE_DVI_I2C (1 << 13)
+# define RADEON_SW_DONE_USING_DVI_I2C (1 << 14)
+# define RADEON_HW_NEEDS_DVI_I2C (1 << 14)
+# define RADEON_ABORT_HW_DVI_I2C (1 << 15)
+# define RADEON_HW_USING_DVI_I2C (1 << 15)
+#define RADEON_DVI_I2C_CNTL_1 0x02e4
+#define RADEON_DVI_I2C_DATA 0x02e8
+
+#define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */
+#define RADEON_INTERRUPT_PIN 0x0f3d /* PCI */
+#define RADEON_IO_BASE 0x0f14 /* PCI */
+
+#define RADEON_LATENCY 0x0f0d /* PCI */
+#define RADEON_LEAD_BRES_DEC 0x1608
+#define RADEON_LEAD_BRES_LNTH 0x161c
+#define RADEON_LEAD_BRES_LNTH_SUB 0x1624
+#define RADEON_LVDS_GEN_CNTL 0x02d0
+# define RADEON_LVDS_ON (1 << 0)
+# define RADEON_LVDS_DISPLAY_DIS (1 << 1)
+# define RADEON_LVDS_PANEL_TYPE (1 << 2)
+# define RADEON_LVDS_PANEL_FORMAT (1 << 3)
+# define RADEON_LVDS_NO_FM (0 << 4)
+# define RADEON_LVDS_2_GREY (1 << 4)
+# define RADEON_LVDS_4_GREY (2 << 4)
+# define RADEON_LVDS_RST_FM (1 << 6)
+# define RADEON_LVDS_EN (1 << 7)
+# define RADEON_LVDS_BL_MOD_LEVEL_SHIFT 8
+# define RADEON_LVDS_BL_MOD_LEVEL_MASK (0xff << 8)
+# define RADEON_LVDS_BL_MOD_EN (1 << 16)
+# define RADEON_LVDS_BL_CLK_SEL (1 << 17)
+# define RADEON_LVDS_DIGON (1 << 18)
+# define RADEON_LVDS_BLON (1 << 19)
+# define RADEON_LVDS_FP_POL_LOW (1 << 20)
+# define RADEON_LVDS_LP_POL_LOW (1 << 21)
+# define RADEON_LVDS_DTM_POL_LOW (1 << 22)
+# define RADEON_LVDS_SEL_CRTC2 (1 << 23)
+# define RADEON_LVDS_FPDI_EN (1 << 27)
+# define RADEON_LVDS_HSYNC_DELAY_SHIFT 28
+#define RADEON_LVDS_PLL_CNTL 0x02d4
+# define RADEON_HSYNC_DELAY_SHIFT 28
+# define RADEON_HSYNC_DELAY_MASK (0xf << 28)
+# define RADEON_LVDS_PLL_EN (1 << 16)
+# define RADEON_LVDS_PLL_RESET (1 << 17)
+# define R300_LVDS_SRC_SEL_MASK (3 << 18)
+# define R300_LVDS_SRC_SEL_CRTC1 (0 << 18)
+# define R300_LVDS_SRC_SEL_CRTC2 (1 << 18)
+# define R300_LVDS_SRC_SEL_RMX (2 << 18)
+#define RADEON_LVDS_SS_GEN_CNTL 0x02ec
+# define RADEON_LVDS_PWRSEQ_DELAY1_SHIFT 16
+# define RADEON_LVDS_PWRSEQ_DELAY2_SHIFT 20
+
+#define RADEON_MAX_LATENCY 0x0f3f /* PCI */
+#define RADEON_DISPLAY_BASE_ADDR 0x23c
+#define RADEON_DISPLAY2_BASE_ADDR 0x33c
+#define RADEON_OV0_BASE_ADDR 0x43c
+#define RADEON_NB_TOM 0x15c
+#define R300_MC_INIT_MISC_LAT_TIMER 0x180
+# define R300_MC_DISP0R_INIT_LAT_SHIFT 8
+# define R300_MC_DISP0R_INIT_LAT_MASK 0xf
+# define R300_MC_DISP1R_INIT_LAT_SHIFT 12
+# define R300_MC_DISP1R_INIT_LAT_MASK 0xf
+#define RADEON_MCLK_CNTL 0x0012 /* PLL */
+# define RADEON_MCLKA_SRC_SEL_MASK 0x7
+# define RADEON_FORCEON_MCLKA (1 << 16)
+# define RADEON_FORCEON_MCLKB (1 << 17)
+# define RADEON_FORCEON_YCLKA (1 << 18)
+# define RADEON_FORCEON_YCLKB (1 << 19)
+# define RADEON_FORCEON_MC (1 << 20)
+# define RADEON_FORCEON_AIC (1 << 21)
+# define R300_DISABLE_MC_MCLKA (1 << 21)
+# define R300_DISABLE_MC_MCLKB (1 << 21)
+#define RADEON_MCLK_MISC 0x001f /* PLL */
+# define RADEON_MC_MCLK_MAX_DYN_STOP_LAT (1 << 12)
+# define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
+# define RADEON_MC_MCLK_DYN_ENABLE (1 << 14)
+# define RADEON_IO_MCLK_DYN_ENABLE (1 << 15)
+
+#define RADEON_GPIOPAD_MASK 0x0198
+#define RADEON_GPIOPAD_A 0x019c
+#define RADEON_GPIOPAD_EN 0x01a0
+#define RADEON_GPIOPAD_Y 0x01a4
+#define RADEON_MDGPIO_MASK 0x01a8
+#define RADEON_MDGPIO_A 0x01ac
+#define RADEON_MDGPIO_EN 0x01b0
+#define RADEON_MDGPIO_Y 0x01b4
+
+#define RADEON_MEM_ADDR_CONFIG 0x0148
+#define RADEON_MEM_BASE 0x0f10 /* PCI */
+#define RADEON_MEM_CNTL 0x0140
+# define RADEON_MEM_NUM_CHANNELS_MASK 0x01
+# define RADEON_MEM_USE_B_CH_ONLY (1 << 1)
+# define RV100_HALF_MODE (1 << 3)
+# define R300_MEM_NUM_CHANNELS_MASK 0x03
+# define R300_MEM_USE_CD_CH_ONLY (1 << 2)
+#define RADEON_MEM_TIMING_CNTL 0x0144 /* EXT_MEM_CNTL */
+#define RADEON_MEM_INIT_LAT_TIMER 0x0154
+#define RADEON_MEM_INTF_CNTL 0x014c
+#define RADEON_MEM_SDRAM_MODE_REG 0x0158
+# define RADEON_SDRAM_MODE_MASK 0xffff0000
+# define RADEON_B3MEM_RESET_MASK 0x6fffffff
+# define RADEON_MEM_CFG_TYPE_DDR (1 << 30)
+#define RADEON_MEM_STR_CNTL 0x0150
+# define RADEON_MEM_PWRUP_COMPL_A (1 << 0)
+# define RADEON_MEM_PWRUP_COMPL_B (1 << 1)
+# define R300_MEM_PWRUP_COMPL_C (1 << 2)
+# define R300_MEM_PWRUP_COMPL_D (1 << 3)
+# define RADEON_MEM_PWRUP_COMPLETE 0x03
+# define R300_MEM_PWRUP_COMPLETE 0x0f
+#define RADEON_MC_STATUS 0x0150
+# define RADEON_MC_IDLE (1 << 2)
+# define R300_MC_IDLE (1 << 4)
+#define RADEON_MEM_VGA_RP_SEL 0x003c
+#define RADEON_MEM_VGA_WP_SEL 0x0038
+#define RADEON_MIN_GRANT 0x0f3e /* PCI */
+#define RADEON_MM_DATA 0x0004
+#define RADEON_MM_INDEX 0x0000
+# define RADEON_MM_APER (1 << 31)
+#define RADEON_MPLL_CNTL 0x000e /* PLL */
+#define RADEON_MPP_TB_CONFIG 0x01c0 /* ? */
+#define RADEON_MPP_GP_CONFIG 0x01c8 /* ? */
+#define RADEON_SEPROM_CNTL1 0x01c0
+# define RADEON_SCK_PRESCALE_SHIFT 24
+# define RADEON_SCK_PRESCALE_MASK (0xff << 24)
+#define R300_MC_IND_INDEX 0x01f8
+# define R300_MC_IND_ADDR_MASK 0x3f
+# define R300_MC_IND_WR_EN (1 << 8)
+#define R300_MC_IND_DATA 0x01fc
+#define R300_MC_READ_CNTL_AB 0x017c
+# define R300_MEM_RBS_POSITION_A_MASK 0x03
+#define R300_MC_READ_CNTL_CD_mcind 0x24
+# define R300_MEM_RBS_POSITION_C_MASK 0x03
+
+#define RADEON_N_VIF_COUNT 0x0248
+
+#define RADEON_OV0_AUTO_FLIP_CNTL 0x0470
+# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_NUM 0x00000007
+# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_REPEAT_FIELD 0x00000008
+# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_ODD 0x00000010
+# define RADEON_OV0_AUTO_FLIP_CNTL_IGNORE_REPEAT_FIELD 0x00000020
+# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_EOF_TOGGLE 0x00000040
+# define RADEON_OV0_AUTO_FLIP_CNTL_VID_PORT_SELECT 0x00000300
+# define RADEON_OV0_AUTO_FLIP_CNTL_P1_FIRST_LINE_EVEN 0x00010000
+# define RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_EVEN_DOWN 0x00040000
+# define RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_ODD_DOWN 0x00080000
+# define RADEON_OV0_AUTO_FLIP_CNTL_FIELD_POL_SOURCE 0x00800000
+
+#define RADEON_OV0_COLOUR_CNTL 0x04E0
+#define RADEON_OV0_DEINTERLACE_PATTERN 0x0474
+#define RADEON_OV0_EXCLUSIVE_HORZ 0x0408
+# define RADEON_EXCL_HORZ_START_MASK 0x000000ff
+# define RADEON_EXCL_HORZ_END_MASK 0x0000ff00
+# define RADEON_EXCL_HORZ_BACK_PORCH_MASK 0x00ff0000
+# define RADEON_EXCL_HORZ_EXCLUSIVE_EN 0x80000000
+#define RADEON_OV0_EXCLUSIVE_VERT 0x040C
+# define RADEON_EXCL_VERT_START_MASK 0x000003ff
+# define RADEON_EXCL_VERT_END_MASK 0x03ff0000
+#define RADEON_OV0_FILTER_CNTL 0x04A0
+# define RADEON_FILTER_PROGRAMMABLE_COEF 0x0
+# define RADEON_FILTER_HC_COEF_HORZ_Y 0x1
+# define RADEON_FILTER_HC_COEF_HORZ_UV 0x2
+# define RADEON_FILTER_HC_COEF_VERT_Y 0x4
+# define RADEON_FILTER_HC_COEF_VERT_UV 0x8
+# define RADEON_FILTER_HARDCODED_COEF 0xf
+# define RADEON_FILTER_COEF_MASK 0xf
+
+#define RADEON_OV0_FOUR_TAP_COEF_0 0x04B0
+#define RADEON_OV0_FOUR_TAP_COEF_1 0x04B4
+#define RADEON_OV0_FOUR_TAP_COEF_2 0x04B8
+#define RADEON_OV0_FOUR_TAP_COEF_3 0x04BC
+#define RADEON_OV0_FOUR_TAP_COEF_4 0x04C0
+#define RADEON_OV0_FLAG_CNTL 0x04DC
+#define RADEON_OV0_GAMMA_000_00F 0x0d40
+#define RADEON_OV0_GAMMA_010_01F 0x0d44
+#define RADEON_OV0_GAMMA_020_03F 0x0d48
+#define RADEON_OV0_GAMMA_040_07F 0x0d4c
+#define RADEON_OV0_GAMMA_080_0BF 0x0e00
+#define RADEON_OV0_GAMMA_0C0_0FF 0x0e04
+#define RADEON_OV0_GAMMA_100_13F 0x0e08
+#define RADEON_OV0_GAMMA_140_17F 0x0e0c
+#define RADEON_OV0_GAMMA_180_1BF 0x0e10
+#define RADEON_OV0_GAMMA_1C0_1FF 0x0e14
+#define RADEON_OV0_GAMMA_200_23F 0x0e18
+#define RADEON_OV0_GAMMA_240_27F 0x0e1c
+#define RADEON_OV0_GAMMA_280_2BF 0x0e20
+#define RADEON_OV0_GAMMA_2C0_2FF 0x0e24
+#define RADEON_OV0_GAMMA_300_33F 0x0e28
+#define RADEON_OV0_GAMMA_340_37F 0x0e2c
+#define RADEON_OV0_GAMMA_380_3BF 0x0d50
+#define RADEON_OV0_GAMMA_3C0_3FF 0x0d54
+#define RADEON_OV0_GRAPHICS_KEY_CLR_LOW 0x04EC
+#define RADEON_OV0_GRAPHICS_KEY_CLR_HIGH 0x04F0
+#define RADEON_OV0_H_INC 0x0480
+#define RADEON_OV0_KEY_CNTL 0x04F4
+# define RADEON_VIDEO_KEY_FN_MASK 0x00000003L
+# define RADEON_VIDEO_KEY_FN_FALSE 0x00000000L
+# define RADEON_VIDEO_KEY_FN_TRUE 0x00000001L
+# define RADEON_VIDEO_KEY_FN_EQ 0x00000002L
+# define RADEON_VIDEO_KEY_FN_NE 0x00000003L
+# define RADEON_GRAPHIC_KEY_FN_MASK 0x00000030L
+# define RADEON_GRAPHIC_KEY_FN_FALSE 0x00000000L
+# define RADEON_GRAPHIC_KEY_FN_TRUE 0x00000010L
+# define RADEON_GRAPHIC_KEY_FN_EQ 0x00000020L
+# define RADEON_GRAPHIC_KEY_FN_NE 0x00000030L
+# define RADEON_CMP_MIX_MASK 0x00000100L
+# define RADEON_CMP_MIX_OR 0x00000000L
+# define RADEON_CMP_MIX_AND 0x00000100L
+#define RADEON_OV0_LIN_TRANS_A 0x0d20
+#define RADEON_OV0_LIN_TRANS_B 0x0d24
+#define RADEON_OV0_LIN_TRANS_C 0x0d28
+#define RADEON_OV0_LIN_TRANS_D 0x0d2c
+#define RADEON_OV0_LIN_TRANS_E 0x0d30
+#define RADEON_OV0_LIN_TRANS_F 0x0d34
+#define RADEON_OV0_P1_BLANK_LINES_AT_TOP 0x0430
+# define RADEON_P1_BLNK_LN_AT_TOP_M1_MASK 0x00000fffL
+# define RADEON_P1_ACTIVE_LINES_M1 0x0fff0000L
+#define RADEON_OV0_P1_H_ACCUM_INIT 0x0488
+#define RADEON_OV0_P1_V_ACCUM_INIT 0x0428
+# define RADEON_OV0_P1_MAX_LN_IN_PER_LN_OUT 0x00000003L
+# define RADEON_OV0_P1_V_ACCUM_INIT_MASK 0x01ff8000L
+#define RADEON_OV0_P1_X_START_END 0x0494
+#define RADEON_OV0_P2_X_START_END 0x0498
+#define RADEON_OV0_P23_BLANK_LINES_AT_TOP 0x0434
+# define RADEON_P23_BLNK_LN_AT_TOP_M1_MASK 0x000007ffL
+# define RADEON_P23_ACTIVE_LINES_M1 0x07ff0000L
+#define RADEON_OV0_P23_H_ACCUM_INIT 0x048C
+#define RADEON_OV0_P23_V_ACCUM_INIT 0x042C
+#define RADEON_OV0_P3_X_START_END 0x049C
+#define RADEON_OV0_REG_LOAD_CNTL 0x0410
+# define RADEON_REG_LD_CTL_LOCK 0x00000001L
+# define RADEON_REG_LD_CTL_VBLANK_DURING_LOCK 0x00000002L
+# define RADEON_REG_LD_CTL_STALL_GUI_UNTIL_FLIP 0x00000004L
+# define RADEON_REG_LD_CTL_LOCK_READBACK 0x00000008L
+# define RADEON_REG_LD_CTL_FLIP_READBACK 0x00000010L
+#define RADEON_OV0_SCALE_CNTL 0x0420
+# define RADEON_SCALER_HORZ_PICK_NEAREST 0x00000004L
+# define RADEON_SCALER_VERT_PICK_NEAREST 0x00000008L
+# define RADEON_SCALER_SIGNED_UV 0x00000010L
+# define RADEON_SCALER_GAMMA_SEL_MASK 0x00000060L
+# define RADEON_SCALER_GAMMA_SEL_BRIGHT 0x00000000L
+# define RADEON_SCALER_GAMMA_SEL_G22 0x00000020L
+# define RADEON_SCALER_GAMMA_SEL_G18 0x00000040L
+# define RADEON_SCALER_GAMMA_SEL_G14 0x00000060L
+# define RADEON_SCALER_COMCORE_SHIFT_UP_ONE 0x00000080L
+# define RADEON_SCALER_SURFAC_FORMAT 0x00000f00L
+# define RADEON_SCALER_SOURCE_15BPP 0x00000300L
+# define RADEON_SCALER_SOURCE_16BPP 0x00000400L
+# define RADEON_SCALER_SOURCE_32BPP 0x00000600L
+# define RADEON_SCALER_SOURCE_YUV9 0x00000900L
+# define RADEON_SCALER_SOURCE_YUV12 0x00000A00L
+# define RADEON_SCALER_SOURCE_VYUY422 0x00000B00L
+# define RADEON_SCALER_SOURCE_YVYU422 0x00000C00L
+# define RADEON_SCALER_ADAPTIVE_DEINT 0x00001000L
+# define RADEON_SCALER_TEMPORAL_DEINT 0x00002000L
+# define RADEON_SCALER_CRTC_SEL 0x00004000L
+# define RADEON_SCALER_SMART_SWITCH 0x00008000L
+# define RADEON_SCALER_BURST_PER_PLANE 0x007F0000L
+# define RADEON_SCALER_DOUBLE_BUFFER 0x01000000L
+# define RADEON_SCALER_DIS_LIMIT 0x08000000L
+# define RADEON_SCALER_LIN_TRANS_BYPASS 0x10000000L
+# define RADEON_SCALER_INT_EMU 0x20000000L
+# define RADEON_SCALER_ENABLE 0x40000000L
+# define RADEON_SCALER_SOFT_RESET 0x80000000L
+#define RADEON_OV0_STEP_BY 0x0484
+#define RADEON_OV0_TEST 0x04F8
+#define RADEON_OV0_V_INC 0x0424
+#define RADEON_OV0_VID_BUF_PITCH0_VALUE 0x0460
+#define RADEON_OV0_VID_BUF_PITCH1_VALUE 0x0464
+#define RADEON_OV0_VID_BUF0_BASE_ADRS 0x0440
+# define RADEON_VIF_BUF0_PITCH_SEL 0x00000001L
+# define RADEON_VIF_BUF0_TILE_ADRS 0x00000002L
+# define RADEON_VIF_BUF0_BASE_ADRS_MASK 0x03fffff0L
+# define RADEON_VIF_BUF0_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF1_BASE_ADRS 0x0444
+# define RADEON_VIF_BUF1_PITCH_SEL 0x00000001L
+# define RADEON_VIF_BUF1_TILE_ADRS 0x00000002L
+# define RADEON_VIF_BUF1_BASE_ADRS_MASK 0x03fffff0L
+# define RADEON_VIF_BUF1_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF2_BASE_ADRS 0x0448
+# define RADEON_VIF_BUF2_PITCH_SEL 0x00000001L
+# define RADEON_VIF_BUF2_TILE_ADRS 0x00000002L
+# define RADEON_VIF_BUF2_BASE_ADRS_MASK 0x03fffff0L
+# define RADEON_VIF_BUF2_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF3_BASE_ADRS 0x044C
+#define RADEON_OV0_VID_BUF4_BASE_ADRS 0x0450
+#define RADEON_OV0_VID_BUF5_BASE_ADRS 0x0454
+#define RADEON_OV0_VIDEO_KEY_CLR_HIGH 0x04E8
+#define RADEON_OV0_VIDEO_KEY_CLR_LOW 0x04E4
+#define RADEON_OV0_Y_X_START 0x0400
+#define RADEON_OV0_Y_X_END 0x0404
+#define RADEON_OV1_Y_X_START 0x0600
+#define RADEON_OV1_Y_X_END 0x0604
+#define RADEON_OVR_CLR 0x0230
+#define RADEON_OVR_WID_LEFT_RIGHT 0x0234
+#define RADEON_OVR_WID_TOP_BOTTOM 0x0238
+#define RADEON_OVR2_CLR 0x0330
+#define RADEON_OVR2_WID_LEFT_RIGHT 0x0334
+#define RADEON_OVR2_WID_TOP_BOTTOM 0x0338
+
+/* first capture unit */
+
+#define RADEON_CAP0_BUF0_OFFSET 0x0920
+#define RADEON_CAP0_BUF1_OFFSET 0x0924
+#define RADEON_CAP0_BUF0_EVEN_OFFSET 0x0928
+#define RADEON_CAP0_BUF1_EVEN_OFFSET 0x092C
+
+#define RADEON_CAP0_BUF_PITCH 0x0930
+#define RADEON_CAP0_V_WINDOW 0x0934
+#define RADEON_CAP0_H_WINDOW 0x0938
+#define RADEON_CAP0_VBI0_OFFSET 0x093C
+#define RADEON_CAP0_VBI1_OFFSET 0x0940
+#define RADEON_CAP0_VBI_V_WINDOW 0x0944
+#define RADEON_CAP0_VBI_H_WINDOW 0x0948
+#define RADEON_CAP0_PORT_MODE_CNTL 0x094C
+#define RADEON_CAP0_TRIG_CNTL 0x0950
+#define RADEON_CAP0_DEBUG 0x0954
+#define RADEON_CAP0_CONFIG 0x0958
+# define RADEON_CAP0_CONFIG_CONTINUOS 0x00000001
+# define RADEON_CAP0_CONFIG_START_FIELD_EVEN 0x00000002
+# define RADEON_CAP0_CONFIG_START_BUF_GET 0x00000004
+# define RADEON_CAP0_CONFIG_START_BUF_SET 0x00000008
+# define RADEON_CAP0_CONFIG_BUF_TYPE_ALT 0x00000010
+# define RADEON_CAP0_CONFIG_BUF_TYPE_FRAME 0x00000020
+# define RADEON_CAP0_CONFIG_ONESHOT_MODE_FRAME 0x00000040
+# define RADEON_CAP0_CONFIG_BUF_MODE_DOUBLE 0x00000080
+# define RADEON_CAP0_CONFIG_BUF_MODE_TRIPLE 0x00000100
+# define RADEON_CAP0_CONFIG_MIRROR_EN 0x00000200
+# define RADEON_CAP0_CONFIG_ONESHOT_MIRROR_EN 0x00000400
+# define RADEON_CAP0_CONFIG_VIDEO_SIGNED_UV 0x00000800
+# define RADEON_CAP0_CONFIG_ANC_DECODE_EN 0x00001000
+# define RADEON_CAP0_CONFIG_VBI_EN 0x00002000
+# define RADEON_CAP0_CONFIG_SOFT_PULL_DOWN_EN 0x00004000
+# define RADEON_CAP0_CONFIG_VIP_EXTEND_FLAG_EN 0x00008000
+# define RADEON_CAP0_CONFIG_FAKE_FIELD_EN 0x00010000
+# define RADEON_CAP0_CONFIG_ODD_ONE_MORE_LINE 0x00020000
+# define RADEON_CAP0_CONFIG_EVEN_ONE_MORE_LINE 0x00040000
+# define RADEON_CAP0_CONFIG_HORZ_DIVIDE_2 0x00080000
+# define RADEON_CAP0_CONFIG_HORZ_DIVIDE_4 0x00100000
+# define RADEON_CAP0_CONFIG_VERT_DIVIDE_2 0x00200000
+# define RADEON_CAP0_CONFIG_VERT_DIVIDE_4 0x00400000
+# define RADEON_CAP0_CONFIG_FORMAT_BROOKTREE 0x00000000
+# define RADEON_CAP0_CONFIG_FORMAT_CCIR656 0x00800000
+# define RADEON_CAP0_CONFIG_FORMAT_ZV 0x01000000
+# define RADEON_CAP0_CONFIG_FORMAT_VIP 0x01800000
+# define RADEON_CAP0_CONFIG_FORMAT_TRANSPORT 0x02000000
+# define RADEON_CAP0_CONFIG_HORZ_DECIMATOR 0x04000000
+# define RADEON_CAP0_CONFIG_VIDEO_IN_YVYU422 0x00000000
+# define RADEON_CAP0_CONFIG_VIDEO_IN_VYUY422 0x20000000
+# define RADEON_CAP0_CONFIG_VBI_DIVIDE_2 0x40000000
+# define RADEON_CAP0_CONFIG_VBI_DIVIDE_4 0x80000000
+#define RADEON_CAP0_ANC_ODD_OFFSET 0x095C
+#define RADEON_CAP0_ANC_EVEN_OFFSET 0x0960
+#define RADEON_CAP0_ANC_H_WINDOW 0x0964
+#define RADEON_CAP0_VIDEO_SYNC_TEST 0x0968
+#define RADEON_CAP0_ONESHOT_BUF_OFFSET 0x096C
+#define RADEON_CAP0_BUF_STATUS 0x0970
+/* #define RADEON_CAP0_DWNSC_XRATIO 0x0978 */
+/* #define RADEON_CAP0_XSHARPNESS 0x097C */
+#define RADEON_CAP0_VBI2_OFFSET 0x0980
+#define RADEON_CAP0_VBI3_OFFSET 0x0984
+#define RADEON_CAP0_ANC2_OFFSET 0x0988
+#define RADEON_CAP0_ANC3_OFFSET 0x098C
+#define RADEON_VID_BUFFER_CONTROL 0x0900
+
+/* second capture unit */
+
+#define RADEON_CAP1_BUF0_OFFSET 0x0990
+#define RADEON_CAP1_BUF1_OFFSET 0x0994
+#define RADEON_CAP1_BUF0_EVEN_OFFSET 0x0998
+#define RADEON_CAP1_BUF1_EVEN_OFFSET 0x099C
+
+#define RADEON_CAP1_BUF_PITCH 0x09A0
+#define RADEON_CAP1_V_WINDOW 0x09A4
+#define RADEON_CAP1_H_WINDOW 0x09A8
+#define RADEON_CAP1_VBI_ODD_OFFSET 0x09AC
+#define RADEON_CAP1_VBI_EVEN_OFFSET 0x09B0
+#define RADEON_CAP1_VBI_V_WINDOW 0x09B4
+#define RADEON_CAP1_VBI_H_WINDOW 0x09B8
+#define RADEON_CAP1_PORT_MODE_CNTL 0x09BC
+#define RADEON_CAP1_TRIG_CNTL 0x09C0
+#define RADEON_CAP1_DEBUG 0x09C4
+#define RADEON_CAP1_CONFIG 0x09C8
+#define RADEON_CAP1_ANC_ODD_OFFSET 0x09CC
+#define RADEON_CAP1_ANC_EVEN_OFFSET 0x09D0
+#define RADEON_CAP1_ANC_H_WINDOW 0x09D4
+#define RADEON_CAP1_VIDEO_SYNC_TEST 0x09D8
+#define RADEON_CAP1_ONESHOT_BUF_OFFSET 0x09DC
+#define RADEON_CAP1_BUF_STATUS 0x09E0
+#define RADEON_CAP1_DWNSC_XRATIO 0x09E8
+#define RADEON_CAP1_XSHARPNESS 0x09EC
+
+/* misc multimedia registers */
+
+#define RADEON_IDCT_RUNS 0x1F80
+#define RADEON_IDCT_LEVELS 0x1F84
+#define RADEON_IDCT_CONTROL 0x1FBC
+#define RADEON_IDCT_AUTH_CONTROL 0x1F88
+#define RADEON_IDCT_AUTH 0x1F8C
+
+#define RADEON_P2PLL_CNTL 0x002a /* P2PLL */
+# define RADEON_P2PLL_RESET (1 << 0)
+# define RADEON_P2PLL_SLEEP (1 << 1)
+# define RADEON_P2PLL_PVG_MASK (7 << 11)
+# define RADEON_P2PLL_PVG_SHIFT 11
+# define RADEON_P2PLL_ATOMIC_UPDATE_EN (1 << 16)
+# define RADEON_P2PLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
+# define RADEON_P2PLL_ATOMIC_UPDATE_VSYNC (1 << 18)
+#define RADEON_P2PLL_DIV_0 0x002c
+# define RADEON_P2PLL_FB0_DIV_MASK 0x07ff
+# define RADEON_P2PLL_POST0_DIV_MASK 0x00070000
+#define RADEON_P2PLL_REF_DIV 0x002B /* PLL */
+# define RADEON_P2PLL_REF_DIV_MASK 0x03ff
+# define RADEON_P2PLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */
+# define RADEON_P2PLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */
+# define R300_PPLL_REF_DIV_ACC_MASK (0x3ff << 18)
+# define R300_PPLL_REF_DIV_ACC_SHIFT 18
+#define RADEON_PALETTE_DATA 0x00b4
+#define RADEON_PALETTE_30_DATA 0x00b8
+#define RADEON_PALETTE_INDEX 0x00b0
+#define RADEON_PCI_GART_PAGE 0x017c
+#define RADEON_PIXCLKS_CNTL 0x002d
+# define RADEON_PIX2CLK_SRC_SEL_MASK 0x03
+# define RADEON_PIX2CLK_SRC_SEL_CPUCLK 0x00
+# define RADEON_PIX2CLK_SRC_SEL_PSCANCLK 0x01
+# define RADEON_PIX2CLK_SRC_SEL_BYTECLK 0x02
+# define RADEON_PIX2CLK_SRC_SEL_P2PLLCLK 0x03
+# define RADEON_PIX2CLK_ALWAYS_ONb (1<<6)
+# define RADEON_PIX2CLK_DAC_ALWAYS_ONb (1<<7)
+# define RADEON_PIXCLK_TV_SRC_SEL (1 << 8)
+# define RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb (1 << 9)
+# define R300_DVOCLK_ALWAYS_ONb (1 << 10)
+# define RADEON_PIXCLK_BLEND_ALWAYS_ONb (1 << 11)
+# define RADEON_PIXCLK_GV_ALWAYS_ONb (1 << 12)
+# define RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb (1 << 13)
+# define R300_PIXCLK_DVO_ALWAYS_ONb (1 << 13)
+# define RADEON_PIXCLK_LVDS_ALWAYS_ONb (1 << 14)
+# define RADEON_PIXCLK_TMDS_ALWAYS_ONb (1 << 15)
+# define R300_PIXCLK_TRANS_ALWAYS_ONb (1 << 16)
+# define R300_PIXCLK_TVO_ALWAYS_ONb (1 << 17)
+# define R300_P2G2CLK_ALWAYS_ONb (1 << 18)
+# define R300_P2G2CLK_DAC_ALWAYS_ONb (1 << 19)
+# define R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF (1 << 23)
+#define RADEON_PLANE_3D_MASK_C 0x1d44
+#define RADEON_PLL_TEST_CNTL 0x0013 /* PLL */
+# define RADEON_PLL_MASK_READ_B (1 << 9)
+#define RADEON_PMI_CAP_ID 0x0f5c /* PCI */
+#define RADEON_PMI_DATA 0x0f63 /* PCI */
+#define RADEON_PMI_NXT_CAP_PTR 0x0f5d /* PCI */
+#define RADEON_PMI_PMC_REG 0x0f5e /* PCI */
+#define RADEON_PMI_PMCSR_REG 0x0f60 /* PCI */
+#define RADEON_PMI_REGISTER 0x0f5c /* PCI */
+#define RADEON_PPLL_CNTL 0x0002 /* PLL */
+# define RADEON_PPLL_RESET (1 << 0)
+# define RADEON_PPLL_SLEEP (1 << 1)
+# define RADEON_PPLL_PVG_MASK (7 << 11)
+# define RADEON_PPLL_PVG_SHIFT 11
+# define RADEON_PPLL_ATOMIC_UPDATE_EN (1 << 16)
+# define RADEON_PPLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
+# define RADEON_PPLL_ATOMIC_UPDATE_VSYNC (1 << 18)
+#define RADEON_PPLL_DIV_0 0x0004 /* PLL */
+#define RADEON_PPLL_DIV_1 0x0005 /* PLL */
+#define RADEON_PPLL_DIV_2 0x0006 /* PLL */
+#define RADEON_PPLL_DIV_3 0x0007 /* PLL */
+# define RADEON_PPLL_FB3_DIV_MASK 0x07ff
+# define RADEON_PPLL_POST3_DIV_MASK 0x00070000
+#define RADEON_PPLL_REF_DIV 0x0003 /* PLL */
+# define RADEON_PPLL_REF_DIV_MASK 0x03ff
+# define RADEON_PPLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */
+# define RADEON_PPLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */
+#define RADEON_PWR_MNGMT_CNTL_STATUS 0x0f60 /* PCI */
+
+#define RADEON_RBBM_GUICNTL 0x172c
+# define RADEON_HOST_DATA_SWAP_NONE (0 << 0)
+# define RADEON_HOST_DATA_SWAP_16BIT (1 << 0)
+# define RADEON_HOST_DATA_SWAP_32BIT (2 << 0)
+# define RADEON_HOST_DATA_SWAP_HDW (3 << 0)
+#define RADEON_RBBM_SOFT_RESET 0x00f0
+# define RADEON_SOFT_RESET_CP (1 << 0)
+# define RADEON_SOFT_RESET_HI (1 << 1)
+# define RADEON_SOFT_RESET_SE (1 << 2)
+# define RADEON_SOFT_RESET_RE (1 << 3)
+# define RADEON_SOFT_RESET_PP (1 << 4)
+# define RADEON_SOFT_RESET_E2 (1 << 5)
+# define RADEON_SOFT_RESET_RB (1 << 6)
+# define RADEON_SOFT_RESET_HDP (1 << 7)
+#define RADEON_RBBM_STATUS 0x0e40
+# define RADEON_RBBM_FIFOCNT_MASK 0x007f
+# define RADEON_RBBM_ACTIVE (1 << 31)
+#define RADEON_RB2D_DSTCACHE_CTLSTAT 0x342c
+# define RADEON_RB2D_DC_FLUSH (3 << 0)
+# define RADEON_RB2D_DC_FREE (3 << 2)
+# define RADEON_RB2D_DC_FLUSH_ALL 0xf
+# define RADEON_RB2D_DC_BUSY (1 << 31)
+#define RADEON_RB2D_DSTCACHE_MODE 0x3428
+#define RADEON_DSTCACHE_CTLSTAT 0x1714
+
+#define RADEON_RB3D_ZCACHE_MODE 0x3250
+#define RADEON_RB3D_ZCACHE_CTLSTAT 0x3254
+# define RADEON_RB3D_ZC_FLUSH_ALL 0x5
+#define RADEON_RB3D_DSTCACHE_MODE 0x3258
+# define RADEON_RB3D_DC_CACHE_ENABLE (0)
+# define RADEON_RB3D_DC_2D_CACHE_DISABLE (1)
+# define RADEON_RB3D_DC_3D_CACHE_DISABLE (2)
+# define RADEON_RB3D_DC_CACHE_DISABLE (3)
+# define RADEON_RB3D_DC_2D_CACHE_LINESIZE_128 (1 << 2)
+# define RADEON_RB3D_DC_3D_CACHE_LINESIZE_128 (2 << 2)
+# define RADEON_RB3D_DC_2D_CACHE_AUTOFLUSH (1 << 8)
+# define RADEON_RB3D_DC_3D_CACHE_AUTOFLUSH (2 << 8)
+# define R200_RB3D_DC_2D_CACHE_AUTOFREE (1 << 10)
+# define R200_RB3D_DC_3D_CACHE_AUTOFREE (2 << 10)
+# define RADEON_RB3D_DC_FORCE_RMW (1 << 16)
+# define RADEON_RB3D_DC_DISABLE_RI_FILL (1 << 24)
+# define RADEON_RB3D_DC_DISABLE_RI_READ (1 << 25)
+
+#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325C
+# define RADEON_RB3D_DC_FLUSH (3 << 0)
+# define RADEON_RB3D_DC_FREE (3 << 2)
+# define RADEON_RB3D_DC_FLUSH_ALL 0xf
+# define RADEON_RB3D_DC_BUSY (1 << 31)
+
+#define RADEON_REG_BASE 0x0f18 /* PCI */
+#define RADEON_REGPROG_INF 0x0f09 /* PCI */
+#define RADEON_REVISION_ID 0x0f08 /* PCI */
+
+#define RADEON_SC_BOTTOM 0x164c
+#define RADEON_SC_BOTTOM_RIGHT 0x16f0
+#define RADEON_SC_BOTTOM_RIGHT_C 0x1c8c
+#define RADEON_SC_LEFT 0x1640
+#define RADEON_SC_RIGHT 0x1644
+#define RADEON_SC_TOP 0x1648
+#define RADEON_SC_TOP_LEFT 0x16ec
+#define RADEON_SC_TOP_LEFT_C 0x1c88
+# define RADEON_SC_SIGN_MASK_LO 0x8000
+# define RADEON_SC_SIGN_MASK_HI 0x80000000
+#define RADEON_M_SPLL_REF_FB_DIV 0x000a /* PLL */
+# define RADEON_M_SPLL_REF_DIV_SHIFT 0
+# define RADEON_M_SPLL_REF_DIV_MASK 0xff
+# define RADEON_MPLL_FB_DIV_SHIFT 8
+# define RADEON_MPLL_FB_DIV_MASK 0xff
+# define RADEON_SPLL_FB_DIV_SHIFT 16
+# define RADEON_SPLL_FB_DIV_MASK 0xff
+#define RADEON_SPLL_CNTL 0x000c /* PLL */
+# define RADEON_SPLL_SLEEP (1 << 0)
+# define RADEON_SPLL_RESET (1 << 1)
+# define RADEON_SPLL_PCP_MASK 0x7
+# define RADEON_SPLL_PCP_SHIFT 8
+# define RADEON_SPLL_PVG_MASK 0x7
+# define RADEON_SPLL_PVG_SHIFT 11
+# define RADEON_SPLL_PDC_MASK 0x3
+# define RADEON_SPLL_PDC_SHIFT 14
+#define RADEON_SCLK_CNTL 0x000d /* PLL */
+# define RADEON_SCLK_SRC_SEL_MASK 0x0007
+# define RADEON_DYN_STOP_LAT_MASK 0x00007ff8
+# define RADEON_CP_MAX_DYN_STOP_LAT 0x0008
+# define RADEON_SCLK_FORCEON_MASK 0xffff8000
+# define RADEON_SCLK_FORCE_DISP2 (1<<15)
+# define RADEON_SCLK_FORCE_CP (1<<16)
+# define RADEON_SCLK_FORCE_HDP (1<<17)
+# define RADEON_SCLK_FORCE_DISP1 (1<<18)
+# define RADEON_SCLK_FORCE_TOP (1<<19)
+# define RADEON_SCLK_FORCE_E2 (1<<20)
+# define RADEON_SCLK_FORCE_SE (1<<21)
+# define RADEON_SCLK_FORCE_IDCT (1<<22)
+# define RADEON_SCLK_FORCE_VIP (1<<23)
+# define RADEON_SCLK_FORCE_RE (1<<24)
+# define RADEON_SCLK_FORCE_PB (1<<25)
+# define RADEON_SCLK_FORCE_TAM (1<<26)
+# define RADEON_SCLK_FORCE_TDM (1<<27)
+# define RADEON_SCLK_FORCE_RB (1<<28)
+# define RADEON_SCLK_FORCE_TV_SCLK (1<<29)
+# define RADEON_SCLK_FORCE_SUBPIC (1<<30)
+# define RADEON_SCLK_FORCE_OV0 (1<<31)
+# define R300_SCLK_FORCE_VAP (1<<21)
+# define R300_SCLK_FORCE_SR (1<<25)
+# define R300_SCLK_FORCE_PX (1<<26)
+# define R300_SCLK_FORCE_TX (1<<27)
+# define R300_SCLK_FORCE_US (1<<28)
+# define R300_SCLK_FORCE_SU (1<<30)
+#define R300_SCLK_CNTL2 0x1e /* PLL */
+# define R300_SCLK_TCL_MAX_DYN_STOP_LAT (1<<10)
+# define R300_SCLK_GA_MAX_DYN_STOP_LAT (1<<11)
+# define R300_SCLK_CBA_MAX_DYN_STOP_LAT (1<<12)
+# define R300_SCLK_FORCE_TCL (1<<13)
+# define R300_SCLK_FORCE_CBA (1<<14)
+# define R300_SCLK_FORCE_GA (1<<15)
+#define RADEON_SCLK_MORE_CNTL 0x0035 /* PLL */
+# define RADEON_SCLK_MORE_MAX_DYN_STOP_LAT 0x0007
+# define RADEON_SCLK_MORE_FORCEON 0x0700
+#define RADEON_SDRAM_MODE_REG 0x0158
+#define RADEON_SEQ8_DATA 0x03c5 /* VGA */
+#define RADEON_SEQ8_IDX 0x03c4 /* VGA */
+#define RADEON_SNAPSHOT_F_COUNT 0x0244
+#define RADEON_SNAPSHOT_VH_COUNTS 0x0240
+#define RADEON_SNAPSHOT_VIF_COUNT 0x024c
+#define RADEON_SRC_OFFSET 0x15ac
+#define RADEON_SRC_PITCH 0x15b0
+#define RADEON_SRC_PITCH_OFFSET 0x1428
+#define RADEON_SRC_SC_BOTTOM 0x165c
+#define RADEON_SRC_SC_BOTTOM_RIGHT 0x16f4
+#define RADEON_SRC_SC_RIGHT 0x1654
+#define RADEON_SRC_X 0x1414
+#define RADEON_SRC_X_Y 0x1590
+#define RADEON_SRC_Y 0x1418
+#define RADEON_SRC_Y_X 0x1434
+#define RADEON_STATUS 0x0f06 /* PCI */
+#define RADEON_SUBPIC_CNTL 0x0540 /* ? */
+#define RADEON_SUB_CLASS 0x0f0a /* PCI */
+#define RADEON_SURFACE_CNTL 0x0b00
+# define RADEON_SURF_TRANSLATION_DIS (1 << 8)
+# define RADEON_NONSURF_AP0_SWP_16BPP (1 << 20)
+# define RADEON_NONSURF_AP0_SWP_32BPP (1 << 21)
+# define RADEON_NONSURF_AP1_SWP_16BPP (1 << 22)
+# define RADEON_NONSURF_AP1_SWP_32BPP (1 << 23)
+#define RADEON_SURFACE0_INFO 0x0b0c
+# define RADEON_SURF_TILE_COLOR_MACRO (0 << 16)
+# define RADEON_SURF_TILE_COLOR_BOTH (1 << 16)
+# define RADEON_SURF_TILE_DEPTH_32BPP (2 << 16)
+# define RADEON_SURF_TILE_DEPTH_16BPP (3 << 16)
+# define R200_SURF_TILE_NONE (0 << 16)
+# define R200_SURF_TILE_COLOR_MACRO (1 << 16)
+# define R200_SURF_TILE_COLOR_MICRO (2 << 16)
+# define R200_SURF_TILE_COLOR_BOTH (3 << 16)
+# define R200_SURF_TILE_DEPTH_32BPP (4 << 16)
+# define R200_SURF_TILE_DEPTH_16BPP (5 << 16)
+# define R300_SURF_TILE_NONE (0 << 16)
+# define R300_SURF_TILE_COLOR_MACRO (1 << 16)
+# define R300_SURF_TILE_DEPTH_32BPP (2 << 16)
+# define RADEON_SURF_AP0_SWP_16BPP (1 << 20)
+# define RADEON_SURF_AP0_SWP_32BPP (1 << 21)
+# define RADEON_SURF_AP1_SWP_16BPP (1 << 22)
+# define RADEON_SURF_AP1_SWP_32BPP (1 << 23)
+#define RADEON_SURFACE0_LOWER_BOUND 0x0b04
+#define RADEON_SURFACE0_UPPER_BOUND 0x0b08
+#define RADEON_SURFACE1_INFO 0x0b1c
+#define RADEON_SURFACE1_LOWER_BOUND 0x0b14
+#define RADEON_SURFACE1_UPPER_BOUND 0x0b18
+#define RADEON_SURFACE2_INFO 0x0b2c
+#define RADEON_SURFACE2_LOWER_BOUND 0x0b24
+#define RADEON_SURFACE2_UPPER_BOUND 0x0b28
+#define RADEON_SURFACE3_INFO 0x0b3c
+#define RADEON_SURFACE3_LOWER_BOUND 0x0b34
+#define RADEON_SURFACE3_UPPER_BOUND 0x0b38
+#define RADEON_SURFACE4_INFO 0x0b4c
+#define RADEON_SURFACE4_LOWER_BOUND 0x0b44
+#define RADEON_SURFACE4_UPPER_BOUND 0x0b48
+#define RADEON_SURFACE5_INFO 0x0b5c
+#define RADEON_SURFACE5_LOWER_BOUND 0x0b54
+#define RADEON_SURFACE5_UPPER_BOUND 0x0b58
+#define RADEON_SURFACE6_INFO 0x0b6c
+#define RADEON_SURFACE6_LOWER_BOUND 0x0b64
+#define RADEON_SURFACE6_UPPER_BOUND 0x0b68
+#define RADEON_SURFACE7_INFO 0x0b7c
+#define RADEON_SURFACE7_LOWER_BOUND 0x0b74
+#define RADEON_SURFACE7_UPPER_BOUND 0x0b78
+#define RADEON_SW_SEMAPHORE 0x013c
+
+#define RADEON_TEST_DEBUG_CNTL 0x0120
+#define RADEON_TEST_DEBUG_CNTL__TEST_DEBUG_OUT_EN 0x00000001
+
+#define RADEON_TEST_DEBUG_MUX 0x0124
+#define RADEON_TEST_DEBUG_OUT 0x012c
+#define RADEON_TMDS_PLL_CNTL 0x02a8
+#define RADEON_TMDS_TRANSMITTER_CNTL 0x02a4
+# define RADEON_TMDS_TRANSMITTER_PLLEN 1
+# define RADEON_TMDS_TRANSMITTER_PLLRST 2
+#define RADEON_TRAIL_BRES_DEC 0x1614
+#define RADEON_TRAIL_BRES_ERR 0x160c
+#define RADEON_TRAIL_BRES_INC 0x1610
+#define RADEON_TRAIL_X 0x1618
+#define RADEON_TRAIL_X_SUB 0x1620
+
+#define RADEON_VCLK_ECP_CNTL 0x0008 /* PLL */
+# define RADEON_VCLK_SRC_SEL_MASK 0x03
+# define RADEON_VCLK_SRC_SEL_CPUCLK 0x00
+# define RADEON_VCLK_SRC_SEL_PSCANCLK 0x01
+# define RADEON_VCLK_SRC_SEL_BYTECLK 0x02
+# define RADEON_VCLK_SRC_SEL_PPLLCLK 0x03
+# define RADEON_PIXCLK_ALWAYS_ONb (1<<6)
+# define RADEON_PIXCLK_DAC_ALWAYS_ONb (1<<7)
+# define R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF (1<<23)
+
+#define RADEON_VENDOR_ID 0x0f00 /* PCI */
+#define RADEON_VGA_DDA_CONFIG 0x02e8
+#define RADEON_VGA_DDA_ON_OFF 0x02ec
+#define RADEON_VID_BUFFER_CONTROL 0x0900
+#define RADEON_VIDEOMUX_CNTL 0x0190
+
+/* VIP bus */
+#define RADEON_VIPH_CH0_DATA 0x0c00
+#define RADEON_VIPH_CH1_DATA 0x0c04
+#define RADEON_VIPH_CH2_DATA 0x0c08
+#define RADEON_VIPH_CH3_DATA 0x0c0c
+#define RADEON_VIPH_CH0_ADDR 0x0c10
+#define RADEON_VIPH_CH1_ADDR 0x0c14
+#define RADEON_VIPH_CH2_ADDR 0x0c18
+#define RADEON_VIPH_CH3_ADDR 0x0c1c
+#define RADEON_VIPH_CH0_SBCNT 0x0c20
+#define RADEON_VIPH_CH1_SBCNT 0x0c24
+#define RADEON_VIPH_CH2_SBCNT 0x0c28
+#define RADEON_VIPH_CH3_SBCNT 0x0c2c
+#define RADEON_VIPH_CH0_ABCNT 0x0c30
+#define RADEON_VIPH_CH1_ABCNT 0x0c34
+#define RADEON_VIPH_CH2_ABCNT 0x0c38
+#define RADEON_VIPH_CH3_ABCNT 0x0c3c
+#define RADEON_VIPH_CONTROL 0x0c40
+# define RADEON_VIP_BUSY 0
+# define RADEON_VIP_IDLE 1
+# define RADEON_VIP_RESET 2
+# define RADEON_VIPH_EN (1 << 21)
+#define RADEON_VIPH_DV_LAT 0x0c44
+#define RADEON_VIPH_BM_CHUNK 0x0c48
+#define RADEON_VIPH_DV_INT 0x0c4c
+#define RADEON_VIPH_TIMEOUT_STAT 0x0c50
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_STAT 0x00000010
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_AK 0x00000010
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REGR_DIS 0x01000000
+
+#define RADEON_VIPH_REG_DATA 0x0084
+#define RADEON_VIPH_REG_ADDR 0x0080
+
+
+#define RADEON_WAIT_UNTIL 0x1720
+# define RADEON_WAIT_CRTC_PFLIP (1 << 0)
+# define RADEON_WAIT_RE_CRTC_VLINE (1 << 1)
+# define RADEON_WAIT_FE_CRTC_VLINE (1 << 2)
+# define RADEON_WAIT_CRTC_VLINE (1 << 3)
+# define RADEON_WAIT_DMA_VID_IDLE (1 << 8)
+# define RADEON_WAIT_DMA_GUI_IDLE (1 << 9)
+# define RADEON_WAIT_CMDFIFO (1 << 10) /* wait for CMDFIFO_ENTRIES */
+# define RADEON_WAIT_OV0_FLIP (1 << 11)
+# define RADEON_WAIT_AGP_FLUSH (1 << 13)
+# define RADEON_WAIT_2D_IDLE (1 << 14)
+# define RADEON_WAIT_3D_IDLE (1 << 15)
+# define RADEON_WAIT_2D_IDLECLEAN (1 << 16)
+# define RADEON_WAIT_3D_IDLECLEAN (1 << 17)
+# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18)
+# define RADEON_CMDFIFO_ENTRIES_SHIFT 10
+# define RADEON_CMDFIFO_ENTRIES_MASK 0x7f
+# define RADEON_WAIT_VAP_IDLE (1 << 28)
+# define RADEON_WAIT_BOTH_CRTC_PFLIP (1 << 30)
+# define RADEON_ENG_DISPLAY_SELECT_CRTC0 (0 << 31)
+# define RADEON_ENG_DISPLAY_SELECT_CRTC1 (1 << 31)
+
+#define RADEON_X_MPLL_REF_FB_DIV 0x000a /* PLL */
+#define RADEON_XCLK_CNTL 0x000d /* PLL */
+#define RADEON_XDLL_CNTL 0x000c /* PLL */
+#define RADEON_XPLL_CNTL 0x000b /* PLL */
+
+
+
+ /* Registers for 3D/TCL */
+#define RADEON_PP_BORDER_COLOR_0 0x1d40
+#define RADEON_PP_BORDER_COLOR_1 0x1d44
+#define RADEON_PP_BORDER_COLOR_2 0x1d48
+#define RADEON_PP_CNTL 0x1c38
+# define RADEON_STIPPLE_ENABLE (1 << 0)
+# define RADEON_SCISSOR_ENABLE (1 << 1)
+# define RADEON_PATTERN_ENABLE (1 << 2)
+# define RADEON_SHADOW_ENABLE (1 << 3)
+# define RADEON_TEX_ENABLE_MASK (0xf << 4)
+# define RADEON_TEX_0_ENABLE (1 << 4)
+# define RADEON_TEX_1_ENABLE (1 << 5)
+# define RADEON_TEX_2_ENABLE (1 << 6)
+# define RADEON_TEX_3_ENABLE (1 << 7)
+# define RADEON_TEX_BLEND_ENABLE_MASK (0xf << 12)
+# define RADEON_TEX_BLEND_0_ENABLE (1 << 12)
+# define RADEON_TEX_BLEND_1_ENABLE (1 << 13)
+# define RADEON_TEX_BLEND_2_ENABLE (1 << 14)
+# define RADEON_TEX_BLEND_3_ENABLE (1 << 15)
+# define RADEON_PLANAR_YUV_ENABLE (1 << 20)
+# define RADEON_SPECULAR_ENABLE (1 << 21)
+# define RADEON_FOG_ENABLE (1 << 22)
+# define RADEON_ALPHA_TEST_ENABLE (1 << 23)
+# define RADEON_ANTI_ALIAS_NONE (0 << 24)
+# define RADEON_ANTI_ALIAS_LINE (1 << 24)
+# define RADEON_ANTI_ALIAS_POLY (2 << 24)
+# define RADEON_ANTI_ALIAS_LINE_POLY (3 << 24)
+# define RADEON_BUMP_MAP_ENABLE (1 << 26)
+# define RADEON_BUMPED_MAP_T0 (0 << 27)
+# define RADEON_BUMPED_MAP_T1 (1 << 27)
+# define RADEON_BUMPED_MAP_T2 (2 << 27)
+# define RADEON_TEX_3D_ENABLE_0 (1 << 29)
+# define RADEON_TEX_3D_ENABLE_1 (1 << 30)
+# define RADEON_MC_ENABLE (1 << 31)
+#define RADEON_PP_FOG_COLOR 0x1c18
+# define RADEON_FOG_COLOR_MASK 0x00ffffff
+# define RADEON_FOG_VERTEX (0 << 24)
+# define RADEON_FOG_TABLE (1 << 24)
+# define RADEON_FOG_USE_DEPTH (0 << 25)
+# define RADEON_FOG_USE_DIFFUSE_ALPHA (2 << 25)
+# define RADEON_FOG_USE_SPEC_ALPHA (3 << 25)
+#define RADEON_PP_LUM_MATRIX 0x1d00
+#define RADEON_PP_MISC 0x1c14
+# define RADEON_REF_ALPHA_MASK 0x000000ff
+# define RADEON_ALPHA_TEST_FAIL (0 << 8)
+# define RADEON_ALPHA_TEST_LESS (1 << 8)
+# define RADEON_ALPHA_TEST_LEQUAL (2 << 8)
+# define RADEON_ALPHA_TEST_EQUAL (3 << 8)
+# define RADEON_ALPHA_TEST_GEQUAL (4 << 8)
+# define RADEON_ALPHA_TEST_GREATER (5 << 8)
+# define RADEON_ALPHA_TEST_NEQUAL (6 << 8)
+# define RADEON_ALPHA_TEST_PASS (7 << 8)
+# define RADEON_ALPHA_TEST_OP_MASK (7 << 8)
+# define RADEON_CHROMA_FUNC_FAIL (0 << 16)
+# define RADEON_CHROMA_FUNC_PASS (1 << 16)
+# define RADEON_CHROMA_FUNC_NEQUAL (2 << 16)
+# define RADEON_CHROMA_FUNC_EQUAL (3 << 16)
+# define RADEON_CHROMA_KEY_NEAREST (0 << 18)
+# define RADEON_CHROMA_KEY_ZERO (1 << 18)
+# define RADEON_SHADOW_ID_AUTO_INC (1 << 20)
+# define RADEON_SHADOW_FUNC_EQUAL (0 << 21)
+# define RADEON_SHADOW_FUNC_NEQUAL (1 << 21)
+# define RADEON_SHADOW_PASS_1 (0 << 22)
+# define RADEON_SHADOW_PASS_2 (1 << 22)
+# define RADEON_RIGHT_HAND_CUBE_D3D (0 << 24)
+# define RADEON_RIGHT_HAND_CUBE_OGL (1 << 24)
+#define RADEON_PP_ROT_MATRIX_0 0x1d58
+#define RADEON_PP_ROT_MATRIX_1 0x1d5c
+#define RADEON_PP_TXFILTER_0 0x1c54
+#define RADEON_PP_TXFILTER_1 0x1c6c
+#define RADEON_PP_TXFILTER_2 0x1c84
+# define RADEON_MAG_FILTER_NEAREST (0 << 0)
+# define RADEON_MAG_FILTER_LINEAR (1 << 0)
+# define RADEON_MAG_FILTER_MASK (1 << 0)
+# define RADEON_MIN_FILTER_NEAREST (0 << 1)
+# define RADEON_MIN_FILTER_LINEAR (1 << 1)
+# define RADEON_MIN_FILTER_NEAREST_MIP_NEAREST (2 << 1)
+# define RADEON_MIN_FILTER_NEAREST_MIP_LINEAR (3 << 1)
+# define RADEON_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 1)
+# define RADEON_MIN_FILTER_LINEAR_MIP_LINEAR (7 << 1)
+# define RADEON_MIN_FILTER_ANISO_NEAREST (8 << 1)
+# define RADEON_MIN_FILTER_ANISO_LINEAR (9 << 1)
+# define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 << 1)
+# define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (11 << 1)
+# define RADEON_MIN_FILTER_MASK (15 << 1)
+# define RADEON_MAX_ANISO_1_TO_1 (0 << 5)
+# define RADEON_MAX_ANISO_2_TO_1 (1 << 5)
+# define RADEON_MAX_ANISO_4_TO_1 (2 << 5)
+# define RADEON_MAX_ANISO_8_TO_1 (3 << 5)
+# define RADEON_MAX_ANISO_16_TO_1 (4 << 5)
+# define RADEON_MAX_ANISO_MASK (7 << 5)
+# define RADEON_LOD_BIAS_MASK (0xff << 8)
+# define RADEON_LOD_BIAS_SHIFT 8
+# define RADEON_MAX_MIP_LEVEL_MASK (0x0f << 16)
+# define RADEON_MAX_MIP_LEVEL_SHIFT 16
+# define RADEON_YUV_TO_RGB (1 << 20)
+# define RADEON_YUV_TEMPERATURE_COOL (0 << 21)
+# define RADEON_YUV_TEMPERATURE_HOT (1 << 21)
+# define RADEON_YUV_TEMPERATURE_MASK (1 << 21)
+# define RADEON_WRAPEN_S (1 << 22)
+# define RADEON_CLAMP_S_WRAP (0 << 23)
+# define RADEON_CLAMP_S_MIRROR (1 << 23)
+# define RADEON_CLAMP_S_CLAMP_LAST (2 << 23)
+# define RADEON_CLAMP_S_MIRROR_CLAMP_LAST (3 << 23)
+# define RADEON_CLAMP_S_CLAMP_BORDER (4 << 23)
+# define RADEON_CLAMP_S_MIRROR_CLAMP_BORDER (5 << 23)
+# define RADEON_CLAMP_S_CLAMP_GL (6 << 23)
+# define RADEON_CLAMP_S_MIRROR_CLAMP_GL (7 << 23)
+# define RADEON_CLAMP_S_MASK (7 << 23)
+# define RADEON_WRAPEN_T (1 << 26)
+# define RADEON_CLAMP_T_WRAP (0 << 27)
+# define RADEON_CLAMP_T_MIRROR (1 << 27)
+# define RADEON_CLAMP_T_CLAMP_LAST (2 << 27)
+# define RADEON_CLAMP_T_MIRROR_CLAMP_LAST (3 << 27)
+# define RADEON_CLAMP_T_CLAMP_BORDER (4 << 27)
+# define RADEON_CLAMP_T_MIRROR_CLAMP_BORDER (5 << 27)
+# define RADEON_CLAMP_T_CLAMP_GL (6 << 27)
+# define RADEON_CLAMP_T_MIRROR_CLAMP_GL (7 << 27)
+# define RADEON_CLAMP_T_MASK (7 << 27)
+# define RADEON_BORDER_MODE_OGL (0 << 31)
+# define RADEON_BORDER_MODE_D3D (1 << 31)
+#define RADEON_PP_TXFORMAT_0 0x1c58
+#define RADEON_PP_TXFORMAT_1 0x1c70
+#define RADEON_PP_TXFORMAT_2 0x1c88
+# define RADEON_TXFORMAT_I8 (0 << 0)
+# define RADEON_TXFORMAT_AI88 (1 << 0)
+# define RADEON_TXFORMAT_RGB332 (2 << 0)
+# define RADEON_TXFORMAT_ARGB1555 (3 << 0)
+# define RADEON_TXFORMAT_RGB565 (4 << 0)
+# define RADEON_TXFORMAT_ARGB4444 (5 << 0)
+# define RADEON_TXFORMAT_ARGB8888 (6 << 0)
+# define RADEON_TXFORMAT_RGBA8888 (7 << 0)
+# define RADEON_TXFORMAT_Y8 (8 << 0)
+# define RADEON_TXFORMAT_VYUY422 (10 << 0)
+# define RADEON_TXFORMAT_YVYU422 (11 << 0)
+# define RADEON_TXFORMAT_DXT1 (12 << 0)
+# define RADEON_TXFORMAT_DXT23 (14 << 0)
+# define RADEON_TXFORMAT_DXT45 (15 << 0)
+# define RADEON_TXFORMAT_SHADOW16 (16 << 0)
+# define RADEON_TXFORMAT_SHADOW32 (17 << 0)
+# define RADEON_TXFORMAT_DUDV88 (18 << 0)
+# define RADEON_TXFORMAT_LDUDV655 (19 << 0)
+# define RADEON_TXFORMAT_LDUDUV8888 (20 << 0)
+# define RADEON_TXFORMAT_FORMAT_MASK (31 << 0)
+# define RADEON_TXFORMAT_FORMAT_SHIFT 0
+# define RADEON_TXFORMAT_APPLE_YUV_MODE (1 << 5)
+# define RADEON_TXFORMAT_ALPHA_IN_MAP (1 << 6)
+# define RADEON_TXFORMAT_NON_POWER2 (1 << 7)
+# define RADEON_TXFORMAT_WIDTH_MASK (15 << 8)
+# define RADEON_TXFORMAT_WIDTH_SHIFT 8
+# define RADEON_TXFORMAT_HEIGHT_MASK (15 << 12)
+# define RADEON_TXFORMAT_HEIGHT_SHIFT 12
+# define RADEON_TXFORMAT_F5_WIDTH_MASK (15 << 16)
+# define RADEON_TXFORMAT_F5_WIDTH_SHIFT 16
+# define RADEON_TXFORMAT_F5_HEIGHT_MASK (15 << 20)
+# define RADEON_TXFORMAT_F5_HEIGHT_SHIFT 20
+# define RADEON_TXFORMAT_ST_ROUTE_STQ0 (0 << 24)
+# define RADEON_TXFORMAT_ST_ROUTE_MASK (3 << 24)
+# define RADEON_TXFORMAT_ST_ROUTE_STQ1 (1 << 24)
+# define RADEON_TXFORMAT_ST_ROUTE_STQ2 (2 << 24)
+# define RADEON_TXFORMAT_ENDIAN_NO_SWAP (0 << 26)
+# define RADEON_TXFORMAT_ENDIAN_16BPP_SWAP (1 << 26)
+# define RADEON_TXFORMAT_ENDIAN_32BPP_SWAP (2 << 26)
+# define RADEON_TXFORMAT_ENDIAN_HALFDW_SWAP (3 << 26)
+# define RADEON_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28)
+# define RADEON_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29)
+# define RADEON_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30)
+# define RADEON_TXFORMAT_PERSPECTIVE_ENABLE (1 << 31)
+#define RADEON_PP_CUBIC_FACES_0 0x1d24
+#define RADEON_PP_CUBIC_FACES_1 0x1d28
+#define RADEON_PP_CUBIC_FACES_2 0x1d2c
+# define RADEON_FACE_WIDTH_1_SHIFT 0
+# define RADEON_FACE_HEIGHT_1_SHIFT 4
+# define RADEON_FACE_WIDTH_1_MASK (0xf << 0)
+# define RADEON_FACE_HEIGHT_1_MASK (0xf << 4)
+# define RADEON_FACE_WIDTH_2_SHIFT 8
+# define RADEON_FACE_HEIGHT_2_SHIFT 12
+# define RADEON_FACE_WIDTH_2_MASK (0xf << 8)
+# define RADEON_FACE_HEIGHT_2_MASK (0xf << 12)
+# define RADEON_FACE_WIDTH_3_SHIFT 16
+# define RADEON_FACE_HEIGHT_3_SHIFT 20
+# define RADEON_FACE_WIDTH_3_MASK (0xf << 16)
+# define RADEON_FACE_HEIGHT_3_MASK (0xf << 20)
+# define RADEON_FACE_WIDTH_4_SHIFT 24
+# define RADEON_FACE_HEIGHT_4_SHIFT 28
+# define RADEON_FACE_WIDTH_4_MASK (0xf << 24)
+# define RADEON_FACE_HEIGHT_4_MASK (0xf << 28)
+
+#define RADEON_PP_TXOFFSET_0 0x1c5c
+#define RADEON_PP_TXOFFSET_1 0x1c74
+#define RADEON_PP_TXOFFSET_2 0x1c8c
+# define RADEON_TXO_ENDIAN_NO_SWAP (0 << 0)
+# define RADEON_TXO_ENDIAN_BYTE_SWAP (1 << 0)
+# define RADEON_TXO_ENDIAN_WORD_SWAP (2 << 0)
+# define RADEON_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
+# define RADEON_TXO_MACRO_LINEAR (0 << 2)
+# define RADEON_TXO_MACRO_TILE (1 << 2)
+# define RADEON_TXO_MICRO_LINEAR (0 << 3)
+# define RADEON_TXO_MICRO_TILE_X2 (1 << 3)
+# define RADEON_TXO_MICRO_TILE_OPT (2 << 3)
+# define RADEON_TXO_OFFSET_MASK 0xffffffe0
+# define RADEON_TXO_OFFSET_SHIFT 5
+
+#define RADEON_PP_CUBIC_OFFSET_T0_0 0x1dd0 /* bits [31:5] */
+#define RADEON_PP_CUBIC_OFFSET_T0_1 0x1dd4
+#define RADEON_PP_CUBIC_OFFSET_T0_2 0x1dd8
+#define RADEON_PP_CUBIC_OFFSET_T0_3 0x1ddc
+#define RADEON_PP_CUBIC_OFFSET_T0_4 0x1de0
+#define RADEON_PP_CUBIC_OFFSET_T1_0 0x1e00
+#define RADEON_PP_CUBIC_OFFSET_T1_1 0x1e04
+#define RADEON_PP_CUBIC_OFFSET_T1_2 0x1e08
+#define RADEON_PP_CUBIC_OFFSET_T1_3 0x1e0c
+#define RADEON_PP_CUBIC_OFFSET_T1_4 0x1e10
+#define RADEON_PP_CUBIC_OFFSET_T2_0 0x1e14
+#define RADEON_PP_CUBIC_OFFSET_T2_1 0x1e18
+#define RADEON_PP_CUBIC_OFFSET_T2_2 0x1e1c
+#define RADEON_PP_CUBIC_OFFSET_T2_3 0x1e20
+#define RADEON_PP_CUBIC_OFFSET_T2_4 0x1e24
+
+#define RADEON_PP_TEX_SIZE_0 0x1d04 /* NPOT */
+#define RADEON_PP_TEX_SIZE_1 0x1d0c
+#define RADEON_PP_TEX_SIZE_2 0x1d14
+# define RADEON_TEX_USIZE_MASK (0x7ff << 0)
+# define RADEON_TEX_USIZE_SHIFT 0
+# define RADEON_TEX_VSIZE_MASK (0x7ff << 16)
+# define RADEON_TEX_VSIZE_SHIFT 16
+# define RADEON_SIGNED_RGB_MASK (1 << 30)
+# define RADEON_SIGNED_RGB_SHIFT 30
+# define RADEON_SIGNED_ALPHA_MASK (1 << 31)
+# define RADEON_SIGNED_ALPHA_SHIFT 31
+#define RADEON_PP_TEX_PITCH_0 0x1d08 /* NPOT */
+#define RADEON_PP_TEX_PITCH_1 0x1d10 /* NPOT */
+#define RADEON_PP_TEX_PITCH_2 0x1d18 /* NPOT */
+/* note: bits 13-5: 32 byte aligned stride of texture map */
+
+#define RADEON_PP_TXCBLEND_0 0x1c60
+#define RADEON_PP_TXCBLEND_1 0x1c78
+#define RADEON_PP_TXCBLEND_2 0x1c90
+# define RADEON_COLOR_ARG_A_SHIFT 0
+# define RADEON_COLOR_ARG_A_MASK (0x1f << 0)
+# define RADEON_COLOR_ARG_A_ZERO (0 << 0)
+# define RADEON_COLOR_ARG_A_CURRENT_COLOR (2 << 0)
+# define RADEON_COLOR_ARG_A_CURRENT_ALPHA (3 << 0)
+# define RADEON_COLOR_ARG_A_DIFFUSE_COLOR (4 << 0)
+# define RADEON_COLOR_ARG_A_DIFFUSE_ALPHA (5 << 0)
+# define RADEON_COLOR_ARG_A_SPECULAR_COLOR (6 << 0)
+# define RADEON_COLOR_ARG_A_SPECULAR_ALPHA (7 << 0)
+# define RADEON_COLOR_ARG_A_TFACTOR_COLOR (8 << 0)
+# define RADEON_COLOR_ARG_A_TFACTOR_ALPHA (9 << 0)
+# define RADEON_COLOR_ARG_A_T0_COLOR (10 << 0)
+# define RADEON_COLOR_ARG_A_T0_ALPHA (11 << 0)
+# define RADEON_COLOR_ARG_A_T1_COLOR (12 << 0)
+# define RADEON_COLOR_ARG_A_T1_ALPHA (13 << 0)
+# define RADEON_COLOR_ARG_A_T2_COLOR (14 << 0)
+# define RADEON_COLOR_ARG_A_T2_ALPHA (15 << 0)
+# define RADEON_COLOR_ARG_A_T3_COLOR (16 << 0)
+# define RADEON_COLOR_ARG_A_T3_ALPHA (17 << 0)
+# define RADEON_COLOR_ARG_B_SHIFT 5
+# define RADEON_COLOR_ARG_B_MASK (0x1f << 5)
+# define RADEON_COLOR_ARG_B_ZERO (0 << 5)
+# define RADEON_COLOR_ARG_B_CURRENT_COLOR (2 << 5)
+# define RADEON_COLOR_ARG_B_CURRENT_ALPHA (3 << 5)
+# define RADEON_COLOR_ARG_B_DIFFUSE_COLOR (4 << 5)
+# define RADEON_COLOR_ARG_B_DIFFUSE_ALPHA (5 << 5)
+# define RADEON_COLOR_ARG_B_SPECULAR_COLOR (6 << 5)
+# define RADEON_COLOR_ARG_B_SPECULAR_ALPHA (7 << 5)
+# define RADEON_COLOR_ARG_B_TFACTOR_COLOR (8 << 5)
+# define RADEON_COLOR_ARG_B_TFACTOR_ALPHA (9 << 5)
+# define RADEON_COLOR_ARG_B_T0_COLOR (10 << 5)
+# define RADEON_COLOR_ARG_B_T0_ALPHA (11 << 5)
+# define RADEON_COLOR_ARG_B_T1_COLOR (12 << 5)
+# define RADEON_COLOR_ARG_B_T1_ALPHA (13 << 5)
+# define RADEON_COLOR_ARG_B_T2_COLOR (14 << 5)
+# define RADEON_COLOR_ARG_B_T2_ALPHA (15 << 5)
+# define RADEON_COLOR_ARG_B_T3_COLOR (16 << 5)
+# define RADEON_COLOR_ARG_B_T3_ALPHA (17 << 5)
+# define RADEON_COLOR_ARG_C_SHIFT 10
+# define RADEON_COLOR_ARG_C_MASK (0x1f << 10)
+# define RADEON_COLOR_ARG_C_ZERO (0 << 10)
+# define RADEON_COLOR_ARG_C_CURRENT_COLOR (2 << 10)
+# define RADEON_COLOR_ARG_C_CURRENT_ALPHA (3 << 10)
+# define RADEON_COLOR_ARG_C_DIFFUSE_COLOR (4 << 10)
+# define RADEON_COLOR_ARG_C_DIFFUSE_ALPHA (5 << 10)
+# define RADEON_COLOR_ARG_C_SPECULAR_COLOR (6 << 10)
+# define RADEON_COLOR_ARG_C_SPECULAR_ALPHA (7 << 10)
+# define RADEON_COLOR_ARG_C_TFACTOR_COLOR (8 << 10)
+# define RADEON_COLOR_ARG_C_TFACTOR_ALPHA (9 << 10)
+# define RADEON_COLOR_ARG_C_T0_COLOR (10 << 10)
+# define RADEON_COLOR_ARG_C_T0_ALPHA (11 << 10)
+# define RADEON_COLOR_ARG_C_T1_COLOR (12 << 10)
+# define RADEON_COLOR_ARG_C_T1_ALPHA (13 << 10)
+# define RADEON_COLOR_ARG_C_T2_COLOR (14 << 10)
+# define RADEON_COLOR_ARG_C_T2_ALPHA (15 << 10)
+# define RADEON_COLOR_ARG_C_T3_COLOR (16 << 10)
+# define RADEON_COLOR_ARG_C_T3_ALPHA (17 << 10)
+# define RADEON_COMP_ARG_A (1 << 15)
+# define RADEON_COMP_ARG_A_SHIFT 15
+# define RADEON_COMP_ARG_B (1 << 16)
+# define RADEON_COMP_ARG_B_SHIFT 16
+# define RADEON_COMP_ARG_C (1 << 17)
+# define RADEON_COMP_ARG_C_SHIFT 17
+# define RADEON_BLEND_CTL_MASK (7 << 18)
+# define RADEON_BLEND_CTL_ADD (0 << 18)
+# define RADEON_BLEND_CTL_SUBTRACT (1 << 18)
+# define RADEON_BLEND_CTL_ADDSIGNED (2 << 18)
+# define RADEON_BLEND_CTL_BLEND (3 << 18)
+# define RADEON_BLEND_CTL_DOT3 (4 << 18)
+# define RADEON_SCALE_SHIFT 21
+# define RADEON_SCALE_MASK (3 << 21)
+# define RADEON_SCALE_1X (0 << 21)
+# define RADEON_SCALE_2X (1 << 21)
+# define RADEON_SCALE_4X (2 << 21)
+# define RADEON_CLAMP_TX (1 << 23)
+# define RADEON_T0_EQ_TCUR (1 << 24)
+# define RADEON_T1_EQ_TCUR (1 << 25)
+# define RADEON_T2_EQ_TCUR (1 << 26)
+# define RADEON_T3_EQ_TCUR (1 << 27)
+# define RADEON_COLOR_ARG_MASK 0x1f
+# define RADEON_COMP_ARG_SHIFT 15
+#define RADEON_PP_TXABLEND_0 0x1c64
+#define RADEON_PP_TXABLEND_1 0x1c7c
+#define RADEON_PP_TXABLEND_2 0x1c94
+# define RADEON_ALPHA_ARG_A_SHIFT 0
+# define RADEON_ALPHA_ARG_A_MASK (0xf << 0)
+# define RADEON_ALPHA_ARG_A_ZERO (0 << 0)
+# define RADEON_ALPHA_ARG_A_CURRENT_ALPHA (1 << 0)
+# define RADEON_ALPHA_ARG_A_DIFFUSE_ALPHA (2 << 0)
+# define RADEON_ALPHA_ARG_A_SPECULAR_ALPHA (3 << 0)
+# define RADEON_ALPHA_ARG_A_TFACTOR_ALPHA (4 << 0)
+# define RADEON_ALPHA_ARG_A_T0_ALPHA (5 << 0)
+# define RADEON_ALPHA_ARG_A_T1_ALPHA (6 << 0)
+# define RADEON_ALPHA_ARG_A_T2_ALPHA (7 << 0)
+# define RADEON_ALPHA_ARG_A_T3_ALPHA (8 << 0)
+# define RADEON_ALPHA_ARG_B_SHIFT 4
+# define RADEON_ALPHA_ARG_B_MASK (0xf << 4)
+# define RADEON_ALPHA_ARG_B_ZERO (0 << 4)
+# define RADEON_ALPHA_ARG_B_CURRENT_ALPHA (1 << 4)
+# define RADEON_ALPHA_ARG_B_DIFFUSE_ALPHA (2 << 4)
+# define RADEON_ALPHA_ARG_B_SPECULAR_ALPHA (3 << 4)
+# define RADEON_ALPHA_ARG_B_TFACTOR_ALPHA (4 << 4)
+# define RADEON_ALPHA_ARG_B_T0_ALPHA (5 << 4)
+# define RADEON_ALPHA_ARG_B_T1_ALPHA (6 << 4)
+# define RADEON_ALPHA_ARG_B_T2_ALPHA (7 << 4)
+# define RADEON_ALPHA_ARG_B_T3_ALPHA (8 << 4)
+# define RADEON_ALPHA_ARG_C_SHIFT 8
+# define RADEON_ALPHA_ARG_C_MASK (0xf << 8)
+# define RADEON_ALPHA_ARG_C_ZERO (0 << 8)
+# define RADEON_ALPHA_ARG_C_CURRENT_ALPHA (1 << 8)
+# define RADEON_ALPHA_ARG_C_DIFFUSE_ALPHA (2 << 8)
+# define RADEON_ALPHA_ARG_C_SPECULAR_ALPHA (3 << 8)
+# define RADEON_ALPHA_ARG_C_TFACTOR_ALPHA (4 << 8)
+# define RADEON_ALPHA_ARG_C_T0_ALPHA (5 << 8)
+# define RADEON_ALPHA_ARG_C_T1_ALPHA (6 << 8)
+# define RADEON_ALPHA_ARG_C_T2_ALPHA (7 << 8)
+# define RADEON_ALPHA_ARG_C_T3_ALPHA (8 << 8)
+# define RADEON_DOT_ALPHA_DONT_REPLICATE (1 << 9)
+# define RADEON_ALPHA_ARG_MASK 0xf
+
+#define RADEON_PP_TFACTOR_0 0x1c68
+#define RADEON_PP_TFACTOR_1 0x1c80
+#define RADEON_PP_TFACTOR_2 0x1c98
+
+#define RADEON_RB3D_BLENDCNTL 0x1c20
+# define RADEON_COMB_FCN_MASK (3 << 12)
+# define RADEON_COMB_FCN_ADD_CLAMP (0 << 12)
+# define RADEON_COMB_FCN_ADD_NOCLAMP (1 << 12)
+# define RADEON_COMB_FCN_SUB_CLAMP (2 << 12)
+# define RADEON_COMB_FCN_SUB_NOCLAMP (3 << 12)
+# define RADEON_SRC_BLEND_GL_ZERO (32 << 16)
+# define RADEON_SRC_BLEND_GL_ONE (33 << 16)
+# define RADEON_SRC_BLEND_GL_SRC_COLOR (34 << 16)
+# define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16)
+# define RADEON_SRC_BLEND_GL_DST_COLOR (36 << 16)
+# define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16)
+# define RADEON_SRC_BLEND_GL_SRC_ALPHA (38 << 16)
+# define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16)
+# define RADEON_SRC_BLEND_GL_DST_ALPHA (40 << 16)
+# define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16)
+# define RADEON_SRC_BLEND_GL_SRC_ALPHA_SATURATE (42 << 16)
+# define RADEON_SRC_BLEND_MASK (63 << 16)
+# define RADEON_DST_BLEND_GL_ZERO (32 << 24)
+# define RADEON_DST_BLEND_GL_ONE (33 << 24)
+# define RADEON_DST_BLEND_GL_SRC_COLOR (34 << 24)
+# define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24)
+# define RADEON_DST_BLEND_GL_DST_COLOR (36 << 24)
+# define RADEON_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24)
+# define RADEON_DST_BLEND_GL_SRC_ALPHA (38 << 24)
+# define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24)
+# define RADEON_DST_BLEND_GL_DST_ALPHA (40 << 24)
+# define RADEON_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24)
+# define RADEON_DST_BLEND_MASK (63 << 24)
+#define RADEON_RB3D_CNTL 0x1c3c
+# define RADEON_ALPHA_BLEND_ENABLE (1 << 0)
+# define RADEON_PLANE_MASK_ENABLE (1 << 1)
+# define RADEON_DITHER_ENABLE (1 << 2)
+# define RADEON_ROUND_ENABLE (1 << 3)
+# define RADEON_SCALE_DITHER_ENABLE (1 << 4)
+# define RADEON_DITHER_INIT (1 << 5)
+# define RADEON_ROP_ENABLE (1 << 6)
+# define RADEON_STENCIL_ENABLE (1 << 7)
+# define RADEON_Z_ENABLE (1 << 8)
+# define RADEON_DEPTHXY_OFFSET_ENABLE (1 << 9)
+# define RADEON_RB3D_COLOR_FORMAT_SHIFT 10
+
+# define RADEON_COLOR_FORMAT_ARGB1555 3
+# define RADEON_COLOR_FORMAT_RGB565 4
+# define RADEON_COLOR_FORMAT_ARGB8888 6
+# define RADEON_COLOR_FORMAT_RGB332 7
+# define RADEON_COLOR_FORMAT_Y8 8
+# define RADEON_COLOR_FORMAT_RGB8 9
+# define RADEON_COLOR_FORMAT_YUV422_VYUY 11
+# define RADEON_COLOR_FORMAT_YUV422_YVYU 12
+# define RADEON_COLOR_FORMAT_aYUV444 14
+# define RADEON_COLOR_FORMAT_ARGB4444 15
+
+# define RADEON_CLRCMP_FLIP_ENABLE (1 << 14)
+#define RADEON_RB3D_COLOROFFSET 0x1c40
+# define RADEON_COLOROFFSET_MASK 0xfffffff0
+#define RADEON_RB3D_COLORPITCH 0x1c48
+# define RADEON_COLORPITCH_MASK 0x000001ff8
+# define RADEON_COLOR_TILE_ENABLE (1 << 16)
+# define RADEON_COLOR_MICROTILE_ENABLE (1 << 17)
+# define RADEON_COLOR_ENDIAN_NO_SWAP (0 << 18)
+# define RADEON_COLOR_ENDIAN_WORD_SWAP (1 << 18)
+# define RADEON_COLOR_ENDIAN_DWORD_SWAP (2 << 18)
+#define RADEON_RB3D_DEPTHOFFSET 0x1c24
+#define RADEON_RB3D_DEPTHPITCH 0x1c28
+# define RADEON_DEPTHPITCH_MASK 0x00001ff8
+# define RADEON_DEPTH_ENDIAN_NO_SWAP (0 << 18)
+# define RADEON_DEPTH_ENDIAN_WORD_SWAP (1 << 18)
+# define RADEON_DEPTH_ENDIAN_DWORD_SWAP (2 << 18)
+#define RADEON_RB3D_PLANEMASK 0x1d84
+#define RADEON_RB3D_ROPCNTL 0x1d80
+# define RADEON_ROP_MASK (15 << 8)
+# define RADEON_ROP_CLEAR (0 << 8)
+# define RADEON_ROP_NOR (1 << 8)
+# define RADEON_ROP_AND_INVERTED (2 << 8)
+# define RADEON_ROP_COPY_INVERTED (3 << 8)
+# define RADEON_ROP_AND_REVERSE (4 << 8)
+# define RADEON_ROP_INVERT (5 << 8)
+# define RADEON_ROP_XOR (6 << 8)
+# define RADEON_ROP_NAND (7 << 8)
+# define RADEON_ROP_AND (8 << 8)
+# define RADEON_ROP_EQUIV (9 << 8)
+# define RADEON_ROP_NOOP (10 << 8)
+# define RADEON_ROP_OR_INVERTED (11 << 8)
+# define RADEON_ROP_COPY (12 << 8)
+# define RADEON_ROP_OR_REVERSE (13 << 8)
+# define RADEON_ROP_OR (14 << 8)
+# define RADEON_ROP_SET (15 << 8)
+#define RADEON_RB3D_STENCILREFMASK 0x1d7c
+# define RADEON_STENCIL_REF_SHIFT 0
+# define RADEON_STENCIL_REF_MASK (0xff << 0)
+# define RADEON_STENCIL_MASK_SHIFT 16
+# define RADEON_STENCIL_VALUE_MASK (0xff << 16)
+# define RADEON_STENCIL_WRITEMASK_SHIFT 24
+# define RADEON_STENCIL_WRITE_MASK (0xff << 24)
+#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
+# define RADEON_DEPTH_FORMAT_MASK (0xf << 0)
+# define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
+# define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
+# define RADEON_DEPTH_FORMAT_24BIT_FLOAT_Z (3 << 0)
+# define RADEON_DEPTH_FORMAT_32BIT_INT_Z (4 << 0)
+# define RADEON_DEPTH_FORMAT_32BIT_FLOAT_Z (5 << 0)
+# define RADEON_DEPTH_FORMAT_16BIT_FLOAT_W (7 << 0)
+# define RADEON_DEPTH_FORMAT_24BIT_FLOAT_W (9 << 0)
+# define RADEON_DEPTH_FORMAT_32BIT_FLOAT_W (11 << 0)
+# define RADEON_Z_TEST_NEVER (0 << 4)
+# define RADEON_Z_TEST_LESS (1 << 4)
+# define RADEON_Z_TEST_LEQUAL (2 << 4)
+# define RADEON_Z_TEST_EQUAL (3 << 4)
+# define RADEON_Z_TEST_GEQUAL (4 << 4)
+# define RADEON_Z_TEST_GREATER (5 << 4)
+# define RADEON_Z_TEST_NEQUAL (6 << 4)
+# define RADEON_Z_TEST_ALWAYS (7 << 4)
+# define RADEON_Z_TEST_MASK (7 << 4)
+# define RADEON_STENCIL_TEST_NEVER (0 << 12)
+# define RADEON_STENCIL_TEST_LESS (1 << 12)
+# define RADEON_STENCIL_TEST_LEQUAL (2 << 12)
+# define RADEON_STENCIL_TEST_EQUAL (3 << 12)
+# define RADEON_STENCIL_TEST_GEQUAL (4 << 12)
+# define RADEON_STENCIL_TEST_GREATER (5 << 12)
+# define RADEON_STENCIL_TEST_NEQUAL (6 << 12)
+# define RADEON_STENCIL_TEST_ALWAYS (7 << 12)
+# define RADEON_STENCIL_TEST_MASK (0x7 << 12)
+# define RADEON_STENCIL_FAIL_KEEP (0 << 16)
+# define RADEON_STENCIL_FAIL_ZERO (1 << 16)
+# define RADEON_STENCIL_FAIL_REPLACE (2 << 16)
+# define RADEON_STENCIL_FAIL_INC (3 << 16)
+# define RADEON_STENCIL_FAIL_DEC (4 << 16)
+# define RADEON_STENCIL_FAIL_INVERT (5 << 16)
+# define RADEON_STENCIL_FAIL_MASK (0x7 << 16)
+# define RADEON_STENCIL_ZPASS_KEEP (0 << 20)
+# define RADEON_STENCIL_ZPASS_ZERO (1 << 20)
+# define RADEON_STENCIL_ZPASS_REPLACE (2 << 20)
+# define RADEON_STENCIL_ZPASS_INC (3 << 20)
+# define RADEON_STENCIL_ZPASS_DEC (4 << 20)
+# define RADEON_STENCIL_ZPASS_INVERT (5 << 20)
+# define RADEON_STENCIL_ZPASS_MASK (0x7 << 20)
+# define RADEON_STENCIL_ZFAIL_KEEP (0 << 24)
+# define RADEON_STENCIL_ZFAIL_ZERO (1 << 24)
+# define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24)
+# define RADEON_STENCIL_ZFAIL_INC (3 << 24)
+# define RADEON_STENCIL_ZFAIL_DEC (4 << 24)
+# define RADEON_STENCIL_ZFAIL_INVERT (5 << 24)
+# define RADEON_STENCIL_ZFAIL_MASK (0x7 << 24)
+# define RADEON_Z_COMPRESSION_ENABLE (1 << 28)
+# define RADEON_FORCE_Z_DIRTY (1 << 29)
+# define RADEON_Z_WRITE_ENABLE (1 << 30)
+#define RADEON_RE_LINE_PATTERN 0x1cd0
+# define RADEON_LINE_PATTERN_MASK 0x0000ffff
+# define RADEON_LINE_REPEAT_COUNT_SHIFT 16
+# define RADEON_LINE_PATTERN_START_SHIFT 24
+# define RADEON_LINE_PATTERN_LITTLE_BIT_ORDER (0 << 28)
+# define RADEON_LINE_PATTERN_BIG_BIT_ORDER (1 << 28)
+# define RADEON_LINE_PATTERN_AUTO_RESET (1 << 29)
+#define RADEON_RE_LINE_STATE 0x1cd4
+# define RADEON_LINE_CURRENT_PTR_SHIFT 0
+# define RADEON_LINE_CURRENT_COUNT_SHIFT 8
+#define RADEON_RE_MISC 0x26c4
+# define RADEON_STIPPLE_COORD_MASK 0x1f
+# define RADEON_STIPPLE_X_OFFSET_SHIFT 0
+# define RADEON_STIPPLE_X_OFFSET_MASK (0x1f << 0)
+# define RADEON_STIPPLE_Y_OFFSET_SHIFT 8
+# define RADEON_STIPPLE_Y_OFFSET_MASK (0x1f << 8)
+# define RADEON_STIPPLE_LITTLE_BIT_ORDER (0 << 16)
+# define RADEON_STIPPLE_BIG_BIT_ORDER (1 << 16)
+#define RADEON_RE_SOLID_COLOR 0x1c1c
+#define RADEON_RE_TOP_LEFT 0x26c0
+# define RADEON_RE_LEFT_SHIFT 0
+# define RADEON_RE_TOP_SHIFT 16
+#define RADEON_RE_WIDTH_HEIGHT 0x1c44
+# define RADEON_RE_WIDTH_SHIFT 0
+# define RADEON_RE_HEIGHT_SHIFT 16
+
+#define RADEON_RB3D_ZPASS_DATA 0x3290
+#define RADEON_RB3D_ZPASS_ADDR 0x3294
+
+#define RADEON_SE_CNTL 0x1c4c
+# define RADEON_FFACE_CULL_CW (0 << 0)
+# define RADEON_FFACE_CULL_CCW (1 << 0)
+# define RADEON_FFACE_CULL_DIR_MASK (1 << 0)
+# define RADEON_BFACE_CULL (0 << 1)
+# define RADEON_BFACE_SOLID (3 << 1)
+# define RADEON_FFACE_CULL (0 << 3)
+# define RADEON_FFACE_SOLID (3 << 3)
+# define RADEON_FFACE_CULL_MASK (3 << 3)
+# define RADEON_BADVTX_CULL_DISABLE (1 << 5)
+# define RADEON_FLAT_SHADE_VTX_0 (0 << 6)
+# define RADEON_FLAT_SHADE_VTX_1 (1 << 6)
+# define RADEON_FLAT_SHADE_VTX_2 (2 << 6)
+# define RADEON_FLAT_SHADE_VTX_LAST (3 << 6)
+# define RADEON_DIFFUSE_SHADE_SOLID (0 << 8)
+# define RADEON_DIFFUSE_SHADE_FLAT (1 << 8)
+# define RADEON_DIFFUSE_SHADE_GOURAUD (2 << 8)
+# define RADEON_DIFFUSE_SHADE_MASK (3 << 8)
+# define RADEON_ALPHA_SHADE_SOLID (0 << 10)
+# define RADEON_ALPHA_SHADE_FLAT (1 << 10)
+# define RADEON_ALPHA_SHADE_GOURAUD (2 << 10)
+# define RADEON_ALPHA_SHADE_MASK (3 << 10)
+# define RADEON_SPECULAR_SHADE_SOLID (0 << 12)
+# define RADEON_SPECULAR_SHADE_FLAT (1 << 12)
+# define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12)
+# define RADEON_SPECULAR_SHADE_MASK (3 << 12)
+# define RADEON_FOG_SHADE_SOLID (0 << 14)
+# define RADEON_FOG_SHADE_FLAT (1 << 14)
+# define RADEON_FOG_SHADE_GOURAUD (2 << 14)
+# define RADEON_FOG_SHADE_MASK (3 << 14)
+# define RADEON_ZBIAS_ENABLE_POINT (1 << 16)
+# define RADEON_ZBIAS_ENABLE_LINE (1 << 17)
+# define RADEON_ZBIAS_ENABLE_TRI (1 << 18)
+# define RADEON_WIDELINE_ENABLE (1 << 20)
+# define RADEON_VPORT_XY_XFORM_ENABLE (1 << 24)
+# define RADEON_VPORT_Z_XFORM_ENABLE (1 << 25)
+# define RADEON_VTX_PIX_CENTER_D3D (0 << 27)
+# define RADEON_VTX_PIX_CENTER_OGL (1 << 27)
+# define RADEON_ROUND_MODE_TRUNC (0 << 28)
+# define RADEON_ROUND_MODE_ROUND (1 << 28)
+# define RADEON_ROUND_MODE_ROUND_EVEN (2 << 28)
+# define RADEON_ROUND_MODE_ROUND_ODD (3 << 28)
+# define RADEON_ROUND_PREC_16TH_PIX (0 << 30)
+# define RADEON_ROUND_PREC_8TH_PIX (1 << 30)
+# define RADEON_ROUND_PREC_4TH_PIX (2 << 30)
+# define RADEON_ROUND_PREC_HALF_PIX (3 << 30)
+#define R200_RE_CNTL 0x1c50
+# define R200_STIPPLE_ENABLE 0x1
+# define R200_SCISSOR_ENABLE 0x2
+# define R200_PATTERN_ENABLE 0x4
+# define R200_PERSPECTIVE_ENABLE 0x8
+# define R200_POINT_SMOOTH 0x20
+# define R200_VTX_STQ0_D3D 0x00010000
+# define R200_VTX_STQ1_D3D 0x00040000
+# define R200_VTX_STQ2_D3D 0x00100000
+# define R200_VTX_STQ3_D3D 0x00400000
+# define R200_VTX_STQ4_D3D 0x01000000
+# define R200_VTX_STQ5_D3D 0x04000000
+#define RADEON_SE_CNTL_STATUS 0x2140
+# define RADEON_VC_NO_SWAP (0 << 0)
+# define RADEON_VC_16BIT_SWAP (1 << 0)
+# define RADEON_VC_32BIT_SWAP (2 << 0)
+# define RADEON_VC_HALF_DWORD_SWAP (3 << 0)
+# define RADEON_TCL_BYPASS (1 << 8)
+#define RADEON_SE_COORD_FMT 0x1c50
+# define RADEON_VTX_XY_PRE_MULT_1_OVER_W0 (1 << 0)
+# define RADEON_VTX_Z_PRE_MULT_1_OVER_W0 (1 << 1)
+# define RADEON_VTX_ST0_NONPARAMETRIC (1 << 8)
+# define RADEON_VTX_ST1_NONPARAMETRIC (1 << 9)
+# define RADEON_VTX_ST2_NONPARAMETRIC (1 << 10)
+# define RADEON_VTX_ST3_NONPARAMETRIC (1 << 11)
+# define RADEON_VTX_W0_NORMALIZE (1 << 12)
+# define RADEON_VTX_W0_IS_NOT_1_OVER_W0 (1 << 16)
+# define RADEON_VTX_ST0_PRE_MULT_1_OVER_W0 (1 << 17)
+# define RADEON_VTX_ST1_PRE_MULT_1_OVER_W0 (1 << 19)
+# define RADEON_VTX_ST2_PRE_MULT_1_OVER_W0 (1 << 21)
+# define RADEON_VTX_ST3_PRE_MULT_1_OVER_W0 (1 << 23)
+# define RADEON_TEX1_W_ROUTING_USE_W0 (0 << 26)
+# define RADEON_TEX1_W_ROUTING_USE_Q1 (1 << 26)
+#define RADEON_SE_LINE_WIDTH 0x1db8
+#define RADEON_SE_TCL_LIGHT_MODEL_CTL 0x226c
+# define RADEON_LIGHTING_ENABLE (1 << 0)
+# define RADEON_LIGHT_IN_MODELSPACE (1 << 1)
+# define RADEON_LOCAL_VIEWER (1 << 2)
+# define RADEON_NORMALIZE_NORMALS (1 << 3)
+# define RADEON_RESCALE_NORMALS (1 << 4)
+# define RADEON_SPECULAR_LIGHTS (1 << 5)
+# define RADEON_DIFFUSE_SPECULAR_COMBINE (1 << 6)
+# define RADEON_LIGHT_ALPHA (1 << 7)
+# define RADEON_LOCAL_LIGHT_VEC_GL (1 << 8)
+# define RADEON_LIGHT_NO_NORMAL_AMBIENT_ONLY (1 << 9)
+# define RADEON_LM_SOURCE_STATE_PREMULT 0
+# define RADEON_LM_SOURCE_STATE_MULT 1
+# define RADEON_LM_SOURCE_VERTEX_DIFFUSE 2
+# define RADEON_LM_SOURCE_VERTEX_SPECULAR 3
+# define RADEON_EMISSIVE_SOURCE_SHIFT 16
+# define RADEON_AMBIENT_SOURCE_SHIFT 18
+# define RADEON_DIFFUSE_SOURCE_SHIFT 20
+# define RADEON_SPECULAR_SOURCE_SHIFT 22
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_RED 0x2220
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_GREEN 0x2224
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_BLUE 0x2228
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_ALPHA 0x222c
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_RED 0x2230
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_GREEN 0x2234
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_BLUE 0x2238
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_ALPHA 0x223c
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_GREEN 0x2214
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_BLUE 0x2218
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_ALPHA 0x221c
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_RED 0x2240
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_GREEN 0x2244
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_BLUE 0x2248
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_ALPHA 0x224c
+#define RADEON_SE_TCL_MATRIX_SELECT_0 0x225c
+# define RADEON_MODELVIEW_0_SHIFT 0
+# define RADEON_MODELVIEW_1_SHIFT 4
+# define RADEON_MODELVIEW_2_SHIFT 8
+# define RADEON_MODELVIEW_3_SHIFT 12
+# define RADEON_IT_MODELVIEW_0_SHIFT 16
+# define RADEON_IT_MODELVIEW_1_SHIFT 20
+# define RADEON_IT_MODELVIEW_2_SHIFT 24
+# define RADEON_IT_MODELVIEW_3_SHIFT 28
+#define RADEON_SE_TCL_MATRIX_SELECT_1 0x2260
+# define RADEON_MODELPROJECT_0_SHIFT 0
+# define RADEON_MODELPROJECT_1_SHIFT 4
+# define RADEON_MODELPROJECT_2_SHIFT 8
+# define RADEON_MODELPROJECT_3_SHIFT 12
+# define RADEON_TEXMAT_0_SHIFT 16
+# define RADEON_TEXMAT_1_SHIFT 20
+# define RADEON_TEXMAT_2_SHIFT 24
+# define RADEON_TEXMAT_3_SHIFT 28
+
+
+#define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254
+# define RADEON_TCL_VTX_W0 (1 << 0)
+# define RADEON_TCL_VTX_FP_DIFFUSE (1 << 1)
+# define RADEON_TCL_VTX_FP_ALPHA (1 << 2)
+# define RADEON_TCL_VTX_PK_DIFFUSE (1 << 3)
+# define RADEON_TCL_VTX_FP_SPEC (1 << 4)
+# define RADEON_TCL_VTX_FP_FOG (1 << 5)
+# define RADEON_TCL_VTX_PK_SPEC (1 << 6)
+# define RADEON_TCL_VTX_ST0 (1 << 7)
+# define RADEON_TCL_VTX_ST1 (1 << 8)
+# define RADEON_TCL_VTX_Q1 (1 << 9)
+# define RADEON_TCL_VTX_ST2 (1 << 10)
+# define RADEON_TCL_VTX_Q2 (1 << 11)
+# define RADEON_TCL_VTX_ST3 (1 << 12)
+# define RADEON_TCL_VTX_Q3 (1 << 13)
+# define RADEON_TCL_VTX_Q0 (1 << 14)
+# define RADEON_TCL_VTX_WEIGHT_COUNT_SHIFT 15
+# define RADEON_TCL_VTX_NORM0 (1 << 18)
+# define RADEON_TCL_VTX_XY1 (1 << 27)
+# define RADEON_TCL_VTX_Z1 (1 << 28)
+# define RADEON_TCL_VTX_W1 (1 << 29)
+# define RADEON_TCL_VTX_NORM1 (1 << 30)
+# define RADEON_TCL_VTX_Z0 (1 << 31)
+
+#define RADEON_SE_TCL_OUTPUT_VTX_SEL 0x2258
+# define RADEON_TCL_COMPUTE_XYZW (1 << 0)
+# define RADEON_TCL_COMPUTE_DIFFUSE (1 << 1)
+# define RADEON_TCL_COMPUTE_SPECULAR (1 << 2)
+# define RADEON_TCL_FORCE_NAN_IF_COLOR_NAN (1 << 3)
+# define RADEON_TCL_FORCE_INORDER_PROC (1 << 4)
+# define RADEON_TCL_TEX_INPUT_TEX_0 0
+# define RADEON_TCL_TEX_INPUT_TEX_1 1
+# define RADEON_TCL_TEX_INPUT_TEX_2 2
+# define RADEON_TCL_TEX_INPUT_TEX_3 3
+# define RADEON_TCL_TEX_COMPUTED_TEX_0 8
+# define RADEON_TCL_TEX_COMPUTED_TEX_1 9
+# define RADEON_TCL_TEX_COMPUTED_TEX_2 10
+# define RADEON_TCL_TEX_COMPUTED_TEX_3 11
+# define RADEON_TCL_TEX_0_OUTPUT_SHIFT 16
+# define RADEON_TCL_TEX_1_OUTPUT_SHIFT 20
+# define RADEON_TCL_TEX_2_OUTPUT_SHIFT 24
+# define RADEON_TCL_TEX_3_OUTPUT_SHIFT 28
+
+#define RADEON_SE_TCL_PER_LIGHT_CTL_0 0x2270
+# define RADEON_LIGHT_0_ENABLE (1 << 0)
+# define RADEON_LIGHT_0_ENABLE_AMBIENT (1 << 1)
+# define RADEON_LIGHT_0_ENABLE_SPECULAR (1 << 2)
+# define RADEON_LIGHT_0_IS_LOCAL (1 << 3)
+# define RADEON_LIGHT_0_IS_SPOT (1 << 4)
+# define RADEON_LIGHT_0_DUAL_CONE (1 << 5)
+# define RADEON_LIGHT_0_ENABLE_RANGE_ATTEN (1 << 6)
+# define RADEON_LIGHT_0_CONSTANT_RANGE_ATTEN (1 << 7)
+# define RADEON_LIGHT_0_SHIFT 0
+# define RADEON_LIGHT_1_ENABLE (1 << 16)
+# define RADEON_LIGHT_1_ENABLE_AMBIENT (1 << 17)
+# define RADEON_LIGHT_1_ENABLE_SPECULAR (1 << 18)
+# define RADEON_LIGHT_1_IS_LOCAL (1 << 19)
+# define RADEON_LIGHT_1_IS_SPOT (1 << 20)
+# define RADEON_LIGHT_1_DUAL_CONE (1 << 21)
+# define RADEON_LIGHT_1_ENABLE_RANGE_ATTEN (1 << 22)
+# define RADEON_LIGHT_1_CONSTANT_RANGE_ATTEN (1 << 23)
+# define RADEON_LIGHT_1_SHIFT 16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_1 0x2274
+# define RADEON_LIGHT_2_SHIFT 0
+# define RADEON_LIGHT_3_SHIFT 16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_2 0x2278
+# define RADEON_LIGHT_4_SHIFT 0
+# define RADEON_LIGHT_5_SHIFT 16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_3 0x227c
+# define RADEON_LIGHT_6_SHIFT 0
+# define RADEON_LIGHT_7_SHIFT 16
+
+#define RADEON_SE_TCL_SHININESS 0x2250
+
+#define RADEON_SE_TCL_TEXTURE_PROC_CTL 0x2268
+# define RADEON_TEXGEN_TEXMAT_0_ENABLE (1 << 0)
+# define RADEON_TEXGEN_TEXMAT_1_ENABLE (1 << 1)
+# define RADEON_TEXGEN_TEXMAT_2_ENABLE (1 << 2)
+# define RADEON_TEXGEN_TEXMAT_3_ENABLE (1 << 3)
+# define RADEON_TEXMAT_0_ENABLE (1 << 4)
+# define RADEON_TEXMAT_1_ENABLE (1 << 5)
+# define RADEON_TEXMAT_2_ENABLE (1 << 6)
+# define RADEON_TEXMAT_3_ENABLE (1 << 7)
+# define RADEON_TEXGEN_INPUT_MASK 0xf
+# define RADEON_TEXGEN_INPUT_TEXCOORD_0 0
+# define RADEON_TEXGEN_INPUT_TEXCOORD_1 1
+# define RADEON_TEXGEN_INPUT_TEXCOORD_2 2
+# define RADEON_TEXGEN_INPUT_TEXCOORD_3 3
+# define RADEON_TEXGEN_INPUT_OBJ 4
+# define RADEON_TEXGEN_INPUT_EYE 5
+# define RADEON_TEXGEN_INPUT_EYE_NORMAL 6
+# define RADEON_TEXGEN_INPUT_EYE_REFLECT 7
+# define RADEON_TEXGEN_INPUT_EYE_NORMALIZED 8
+# define RADEON_TEXGEN_0_INPUT_SHIFT 16
+# define RADEON_TEXGEN_1_INPUT_SHIFT 20
+# define RADEON_TEXGEN_2_INPUT_SHIFT 24
+# define RADEON_TEXGEN_3_INPUT_SHIFT 28
+
+#define RADEON_SE_TCL_UCP_VERT_BLEND_CTL 0x2264
+# define RADEON_UCP_IN_CLIP_SPACE (1 << 0)
+# define RADEON_UCP_IN_MODEL_SPACE (1 << 1)
+# define RADEON_UCP_ENABLE_0 (1 << 2)
+# define RADEON_UCP_ENABLE_1 (1 << 3)
+# define RADEON_UCP_ENABLE_2 (1 << 4)
+# define RADEON_UCP_ENABLE_3 (1 << 5)
+# define RADEON_UCP_ENABLE_4 (1 << 6)
+# define RADEON_UCP_ENABLE_5 (1 << 7)
+# define RADEON_TCL_FOG_MASK (3 << 8)
+# define RADEON_TCL_FOG_DISABLE (0 << 8)
+# define RADEON_TCL_FOG_EXP (1 << 8)
+# define RADEON_TCL_FOG_EXP2 (2 << 8)
+# define RADEON_TCL_FOG_LINEAR (3 << 8)
+# define RADEON_RNG_BASED_FOG (1 << 10)
+# define RADEON_LIGHT_TWOSIDE (1 << 11)
+# define RADEON_BLEND_OP_COUNT_MASK (7 << 12)
+# define RADEON_BLEND_OP_COUNT_SHIFT 12
+# define RADEON_POSITION_BLEND_OP_ENABLE (1 << 16)
+# define RADEON_NORMAL_BLEND_OP_ENABLE (1 << 17)
+# define RADEON_VERTEX_BLEND_SRC_0_PRIMARY (1 << 18)
+# define RADEON_VERTEX_BLEND_SRC_0_SECONDARY (1 << 18)
+# define RADEON_VERTEX_BLEND_SRC_1_PRIMARY (1 << 19)
+# define RADEON_VERTEX_BLEND_SRC_1_SECONDARY (1 << 19)
+# define RADEON_VERTEX_BLEND_SRC_2_PRIMARY (1 << 20)
+# define RADEON_VERTEX_BLEND_SRC_2_SECONDARY (1 << 20)
+# define RADEON_VERTEX_BLEND_SRC_3_PRIMARY (1 << 21)
+# define RADEON_VERTEX_BLEND_SRC_3_SECONDARY (1 << 21)
+# define RADEON_VERTEX_BLEND_WGT_MINUS_ONE (1 << 22)
+# define RADEON_CULL_FRONT_IS_CW (0 << 28)
+# define RADEON_CULL_FRONT_IS_CCW (1 << 28)
+# define RADEON_CULL_FRONT (1 << 29)
+# define RADEON_CULL_BACK (1 << 30)
+# define RADEON_FORCE_W_TO_ONE (1 << 31)
+
+#define RADEON_SE_VPORT_XSCALE 0x1d98
+#define RADEON_SE_VPORT_XOFFSET 0x1d9c
+#define RADEON_SE_VPORT_YSCALE 0x1da0
+#define RADEON_SE_VPORT_YOFFSET 0x1da4
+#define RADEON_SE_VPORT_ZSCALE 0x1da8
+#define RADEON_SE_VPORT_ZOFFSET 0x1dac
+#define RADEON_SE_ZBIAS_FACTOR 0x1db0
+#define RADEON_SE_ZBIAS_CONSTANT 0x1db4
+
+#define RADEON_SE_VTX_FMT 0x2080
+# define RADEON_SE_VTX_FMT_XY 0x00000000
+# define RADEON_SE_VTX_FMT_W0 0x00000001
+# define RADEON_SE_VTX_FMT_FPCOLOR 0x00000002
+# define RADEON_SE_VTX_FMT_FPALPHA 0x00000004
+# define RADEON_SE_VTX_FMT_PKCOLOR 0x00000008
+# define RADEON_SE_VTX_FMT_FPSPEC 0x00000010
+# define RADEON_SE_VTX_FMT_FPFOG 0x00000020
+# define RADEON_SE_VTX_FMT_PKSPEC 0x00000040
+# define RADEON_SE_VTX_FMT_ST0 0x00000080
+# define RADEON_SE_VTX_FMT_ST1 0x00000100
+# define RADEON_SE_VTX_FMT_Q1 0x00000200
+# define RADEON_SE_VTX_FMT_ST2 0x00000400
+# define RADEON_SE_VTX_FMT_Q2 0x00000800
+# define RADEON_SE_VTX_FMT_ST3 0x00001000
+# define RADEON_SE_VTX_FMT_Q3 0x00002000
+# define RADEON_SE_VTX_FMT_Q0 0x00004000
+# define RADEON_SE_VTX_FMT_BLND_WEIGHT_CNT_MASK 0x00038000
+# define RADEON_SE_VTX_FMT_N0 0x00040000
+# define RADEON_SE_VTX_FMT_XY1 0x08000000
+# define RADEON_SE_VTX_FMT_Z1 0x10000000
+# define RADEON_SE_VTX_FMT_W1 0x20000000
+# define RADEON_SE_VTX_FMT_N1 0x40000000
+# define RADEON_SE_VTX_FMT_Z 0x80000000
+
+#define RADEON_SE_VF_CNTL 0x2084
+# define RADEON_VF_PRIM_TYPE_POINT_LIST 1
+# define RADEON_VF_PRIM_TYPE_LINE_LIST 2
+# define RADEON_VF_PRIM_TYPE_LINE_STRIP 3
+# define RADEON_VF_PRIM_TYPE_TRIANGLE_LIST 4
+# define RADEON_VF_PRIM_TYPE_TRIANGLE_FAN 5
+# define RADEON_VF_PRIM_TYPE_TRIANGLE_STRIP 6
+# define RADEON_VF_PRIM_TYPE_TRIANGLE_FLAG 7
+# define RADEON_VF_PRIM_TYPE_RECTANGLE_LIST 8
+# define RADEON_VF_PRIM_TYPE_POINT_LIST_3 9
+# define RADEON_VF_PRIM_TYPE_LINE_LIST_3 10
+# define RADEON_VF_PRIM_TYPE_SPIRIT_LIST 11
+# define RADEON_VF_PRIM_TYPE_LINE_LOOP 12
+# define RADEON_VF_PRIM_TYPE_QUAD_LIST 13
+# define RADEON_VF_PRIM_TYPE_QUAD_STRIP 14
+# define RADEON_VF_PRIM_TYPE_POLYGON 15
+# define RADEON_VF_PRIM_WALK_STATE (0<<4)
+# define RADEON_VF_PRIM_WALK_INDEX (1<<4)
+# define RADEON_VF_PRIM_WALK_LIST (2<<4)
+# define RADEON_VF_PRIM_WALK_DATA (3<<4)
+# define RADEON_VF_COLOR_ORDER_RGBA (1<<6)
+# define RADEON_VF_RADEON_MODE (1<<8)
+# define RADEON_VF_TCL_OUTPUT_CTL_ENA (1<<9)
+# define RADEON_VF_PROG_STREAM_ENA (1<<10)
+# define RADEON_VF_INDEX_SIZE_SHIFT 11
+# define RADEON_VF_NUM_VERTICES_SHIFT 16
+
+#define RADEON_SE_PORT_DATA0 0x2000
+
+#define R200_SE_VAP_CNTL 0x2080
+# define R200_VAP_TCL_ENABLE 0x00000001
+# define R200_VAP_SINGLE_BUF_STATE_ENABLE 0x00000010
+# define R200_VAP_FORCE_W_TO_ONE 0x00010000
+# define R200_VAP_D3D_TEX_DEFAULT 0x00020000
+# define R200_VAP_VF_MAX_VTX_NUM__SHIFT 18
+# define R200_VAP_VF_MAX_VTX_NUM (9 << 18)
+# define R200_VAP_DX_CLIP_SPACE_DEF 0x00400000
+#define R200_VF_MAX_VTX_INDX 0x210c
+#define R200_VF_MIN_VTX_INDX 0x2110
+#define R200_SE_VTE_CNTL 0x20b0
+# define R200_VPORT_X_SCALE_ENA 0x00000001
+# define R200_VPORT_X_OFFSET_ENA 0x00000002
+# define R200_VPORT_Y_SCALE_ENA 0x00000004
+# define R200_VPORT_Y_OFFSET_ENA 0x00000008
+# define R200_VPORT_Z_SCALE_ENA 0x00000010
+# define R200_VPORT_Z_OFFSET_ENA 0x00000020
+# define R200_VTX_XY_FMT 0x00000100
+# define R200_VTX_Z_FMT 0x00000200
+# define R200_VTX_W0_FMT 0x00000400
+# define R200_VTX_W0_NORMALIZE 0x00000800
+# define R200_VTX_ST_DENORMALIZED 0x00001000
+#define R200_SE_VAP_CNTL_STATUS 0x2140
+# define R200_VC_NO_SWAP (0 << 0)
+# define R200_VC_16BIT_SWAP (1 << 0)
+# define R200_VC_32BIT_SWAP (2 << 0)
+#define R200_PP_TXFILTER_0 0x2c00
+#define R200_PP_TXFILTER_1 0x2c20
+#define R200_PP_TXFILTER_2 0x2c40
+#define R200_PP_TXFILTER_3 0x2c60
+#define R200_PP_TXFILTER_4 0x2c80
+#define R200_PP_TXFILTER_5 0x2ca0
+# define R200_MAG_FILTER_NEAREST (0 << 0)
+# define R200_MAG_FILTER_LINEAR (1 << 0)
+# define R200_MAG_FILTER_MASK (1 << 0)
+# define R200_MIN_FILTER_NEAREST (0 << 1)
+# define R200_MIN_FILTER_LINEAR (1 << 1)
+# define R200_MIN_FILTER_NEAREST_MIP_NEAREST (2 << 1)
+# define R200_MIN_FILTER_NEAREST_MIP_LINEAR (3 << 1)
+# define R200_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 1)
+# define R200_MIN_FILTER_LINEAR_MIP_LINEAR (7 << 1)
+# define R200_MIN_FILTER_ANISO_NEAREST (8 << 1)
+# define R200_MIN_FILTER_ANISO_LINEAR (9 << 1)
+# define R200_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 << 1)
+# define R200_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (11 << 1)
+# define R200_MIN_FILTER_MASK (15 << 1)
+# define R200_MAX_ANISO_1_TO_1 (0 << 5)
+# define R200_MAX_ANISO_2_TO_1 (1 << 5)
+# define R200_MAX_ANISO_4_TO_1 (2 << 5)
+# define R200_MAX_ANISO_8_TO_1 (3 << 5)
+# define R200_MAX_ANISO_16_TO_1 (4 << 5)
+# define R200_MAX_ANISO_MASK (7 << 5)
+# define R200_MAX_MIP_LEVEL_MASK (0x0f << 16)
+# define R200_MAX_MIP_LEVEL_SHIFT 16
+# define R200_YUV_TO_RGB (1 << 20)
+# define R200_YUV_TEMPERATURE_COOL (0 << 21)
+# define R200_YUV_TEMPERATURE_HOT (1 << 21)
+# define R200_YUV_TEMPERATURE_MASK (1 << 21)
+# define R200_WRAPEN_S (1 << 22)
+# define R200_CLAMP_S_WRAP (0 << 23)
+# define R200_CLAMP_S_MIRROR (1 << 23)
+# define R200_CLAMP_S_CLAMP_LAST (2 << 23)
+# define R200_CLAMP_S_MIRROR_CLAMP_LAST (3 << 23)
+# define R200_CLAMP_S_CLAMP_BORDER (4 << 23)
+# define R200_CLAMP_S_MIRROR_CLAMP_BORDER (5 << 23)
+# define R200_CLAMP_S_CLAMP_GL (6 << 23)
+# define R200_CLAMP_S_MIRROR_CLAMP_GL (7 << 23)
+# define R200_CLAMP_S_MASK (7 << 23)
+# define R200_WRAPEN_T (1 << 26)
+# define R200_CLAMP_T_WRAP (0 << 27)
+# define R200_CLAMP_T_MIRROR (1 << 27)
+# define R200_CLAMP_T_CLAMP_LAST (2 << 27)
+# define R200_CLAMP_T_MIRROR_CLAMP_LAST (3 << 27)
+# define R200_CLAMP_T_CLAMP_BORDER (4 << 27)
+# define R200_CLAMP_T_MIRROR_CLAMP_BORDER (5 << 27)
+# define R200_CLAMP_T_CLAMP_GL (6 << 27)
+# define R200_CLAMP_T_MIRROR_CLAMP_GL (7 << 27)
+# define R200_CLAMP_T_MASK (7 << 27)
+# define R200_KILL_LT_ZERO (1 << 30)
+# define R200_BORDER_MODE_OGL (0 << 31)
+# define R200_BORDER_MODE_D3D (1 << 31)
+#define R200_PP_TXFORMAT_0 0x2c04
+#define R200_PP_TXFORMAT_1 0x2c24
+#define R200_PP_TXFORMAT_2 0x2c44
+#define R200_PP_TXFORMAT_3 0x2c64
+#define R200_PP_TXFORMAT_4 0x2c84
+#define R200_PP_TXFORMAT_5 0x2ca4
+# define R200_TXFORMAT_I8 (0 << 0)
+# define R200_TXFORMAT_AI88 (1 << 0)
+# define R200_TXFORMAT_RGB332 (2 << 0)
+# define R200_TXFORMAT_ARGB1555 (3 << 0)
+# define R200_TXFORMAT_RGB565 (4 << 0)
+# define R200_TXFORMAT_ARGB4444 (5 << 0)
+# define R200_TXFORMAT_ARGB8888 (6 << 0)
+# define R200_TXFORMAT_RGBA8888 (7 << 0)
+# define R200_TXFORMAT_Y8 (8 << 0)
+# define R200_TXFORMAT_AVYU4444 (9 << 0)
+# define R200_TXFORMAT_VYUY422 (10 << 0)
+# define R200_TXFORMAT_YVYU422 (11 << 0)
+# define R200_TXFORMAT_DXT1 (12 << 0)
+# define R200_TXFORMAT_DXT23 (14 << 0)
+# define R200_TXFORMAT_DXT45 (15 << 0)
+# define R200_TXFORMAT_DVDU88 (18 << 0)
+# define R200_TXFORMAT_LDVDU655 (19 << 0)
+# define R200_TXFORMAT_LDVDU8888 (20 << 0)
+# define R200_TXFORMAT_GR1616 (21 << 0)
+# define R200_TXFORMAT_ABGR8888 (22 << 0)
+# define R200_TXFORMAT_BGR111110 (23 << 0)
+# define R200_TXFORMAT_FORMAT_MASK (31 << 0)
+# define R200_TXFORMAT_FORMAT_SHIFT 0
+# define R200_TXFORMAT_ALPHA_IN_MAP (1 << 6)
+# define R200_TXFORMAT_NON_POWER2 (1 << 7)
+# define R200_TXFORMAT_WIDTH_MASK (15 << 8)
+# define R200_TXFORMAT_WIDTH_SHIFT 8
+# define R200_TXFORMAT_HEIGHT_MASK (15 << 12)
+# define R200_TXFORMAT_HEIGHT_SHIFT 12
+# define R200_TXFORMAT_F5_WIDTH_MASK (15 << 16) /* cube face 5 */
+# define R200_TXFORMAT_F5_WIDTH_SHIFT 16
+# define R200_TXFORMAT_F5_HEIGHT_MASK (15 << 20)
+# define R200_TXFORMAT_F5_HEIGHT_SHIFT 20
+# define R200_TXFORMAT_ST_ROUTE_STQ0 (0 << 24)
+# define R200_TXFORMAT_ST_ROUTE_STQ1 (1 << 24)
+# define R200_TXFORMAT_ST_ROUTE_STQ2 (2 << 24)
+# define R200_TXFORMAT_ST_ROUTE_STQ3 (3 << 24)
+# define R200_TXFORMAT_ST_ROUTE_STQ4 (4 << 24)
+# define R200_TXFORMAT_ST_ROUTE_STQ5 (5 << 24)
+# define R200_TXFORMAT_ST_ROUTE_MASK (7 << 24)
+# define R200_TXFORMAT_ST_ROUTE_SHIFT 24
+# define R200_TXFORMAT_LOOKUP_DISABLE (1 << 27)
+# define R200_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28)
+# define R200_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29)
+# define R200_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30)
+#define R200_PP_TXFORMAT_X_0 0x2c08
+#define R200_PP_TXFORMAT_X_1 0x2c28
+#define R200_PP_TXFORMAT_X_2 0x2c48
+#define R200_PP_TXFORMAT_X_3 0x2c68
+#define R200_PP_TXFORMAT_X_4 0x2c88
+#define R200_PP_TXFORMAT_X_5 0x2ca8
+
+#define R200_PP_TXSIZE_0 0x2c0c /* NPOT only */
+#define R200_PP_TXSIZE_1 0x2c2c /* NPOT only */
+#define R200_PP_TXSIZE_2 0x2c4c /* NPOT only */
+#define R200_PP_TXSIZE_3 0x2c6c /* NPOT only */
+#define R200_PP_TXSIZE_4 0x2c8c /* NPOT only */
+#define R200_PP_TXSIZE_5 0x2cac /* NPOT only */
+
+#define R200_PP_TXPITCH_0 0x2c10 /* NPOT only */
+#define R200_PP_TXPITCH_1 0x2c30 /* NPOT only */
+#define R200_PP_TXPITCH_2 0x2c50 /* NPOT only */
+#define R200_PP_TXPITCH_3 0x2c70 /* NPOT only */
+#define R200_PP_TXPITCH_4 0x2c90 /* NPOT only */
+#define R200_PP_TXPITCH_5 0x2cb0 /* NPOT only */
+
+#define R200_PP_CUBIC_FACES_0 0x2c18
+#define R200_PP_CUBIC_FACES_1 0x2c38
+#define R200_PP_CUBIC_FACES_2 0x2c58
+#define R200_PP_CUBIC_FACES_3 0x2c78
+#define R200_PP_CUBIC_FACES_4 0x2c98
+#define R200_PP_CUBIC_FACES_5 0x2cb8
+
+#define R200_PP_TXOFFSET_0 0x2d00
+# define R200_TXO_ENDIAN_NO_SWAP (0 << 0)
+# define R200_TXO_ENDIAN_BYTE_SWAP (1 << 0)
+# define R200_TXO_ENDIAN_WORD_SWAP (2 << 0)
+# define R200_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
+# define R200_TXO_MACRO_LINEAR (0 << 2)
+# define R200_TXO_MACRO_TILE (1 << 2)
+# define R200_TXO_MICRO_LINEAR (0 << 3)
+# define R200_TXO_MICRO_TILE (1 << 3)
+# define R200_TXO_OFFSET_MASK 0xffffffe0
+# define R200_TXO_OFFSET_SHIFT 5
+#define R200_PP_CUBIC_OFFSET_F1_0 0x2d04
+#define R200_PP_CUBIC_OFFSET_F2_0 0x2d08
+#define R200_PP_CUBIC_OFFSET_F3_0 0x2d0c
+#define R200_PP_CUBIC_OFFSET_F4_0 0x2d10
+#define R200_PP_CUBIC_OFFSET_F5_0 0x2d14
+
+#define R200_PP_TXOFFSET_1 0x2d18
+#define R200_PP_CUBIC_OFFSET_F1_1 0x2d1c
+#define R200_PP_CUBIC_OFFSET_F2_1 0x2d20
+#define R200_PP_CUBIC_OFFSET_F3_1 0x2d24
+#define R200_PP_CUBIC_OFFSET_F4_1 0x2d28
+#define R200_PP_CUBIC_OFFSET_F5_1 0x2d2c
+
+#define R200_PP_TXOFFSET_2 0x2d30
+#define R200_PP_CUBIC_OFFSET_F1_2 0x2d34
+#define R200_PP_CUBIC_OFFSET_F2_2 0x2d38
+#define R200_PP_CUBIC_OFFSET_F3_2 0x2d3c
+#define R200_PP_CUBIC_OFFSET_F4_2 0x2d40
+#define R200_PP_CUBIC_OFFSET_F5_2 0x2d44
+
+#define R200_PP_TXOFFSET_3 0x2d48
+#define R200_PP_CUBIC_OFFSET_F1_3 0x2d4c
+#define R200_PP_CUBIC_OFFSET_F2_3 0x2d50
+#define R200_PP_CUBIC_OFFSET_F3_3 0x2d54
+#define R200_PP_CUBIC_OFFSET_F4_3 0x2d58
+#define R200_PP_CUBIC_OFFSET_F5_3 0x2d5c
+#define R200_PP_TXOFFSET_4 0x2d60
+#define R200_PP_CUBIC_OFFSET_F1_4 0x2d64
+#define R200_PP_CUBIC_OFFSET_F2_4 0x2d68
+#define R200_PP_CUBIC_OFFSET_F3_4 0x2d6c
+#define R200_PP_CUBIC_OFFSET_F4_4 0x2d70
+#define R200_PP_CUBIC_OFFSET_F5_4 0x2d74
+#define R200_PP_TXOFFSET_5 0x2d78
+#define R200_PP_CUBIC_OFFSET_F1_5 0x2d7c
+#define R200_PP_CUBIC_OFFSET_F2_5 0x2d80
+#define R200_PP_CUBIC_OFFSET_F3_5 0x2d84
+#define R200_PP_CUBIC_OFFSET_F4_5 0x2d88
+#define R200_PP_CUBIC_OFFSET_F5_5 0x2d8c
+
+#define R200_PP_TFACTOR_0 0x2ee0
+#define R200_PP_TFACTOR_1 0x2ee4
+#define R200_PP_TFACTOR_2 0x2ee8
+#define R200_PP_TFACTOR_3 0x2eec
+#define R200_PP_TFACTOR_4 0x2ef0
+#define R200_PP_TFACTOR_5 0x2ef4
+
+#define R200_PP_TXCBLEND_0 0x2f00
+# define R200_TXC_ARG_A_ZERO (0)
+# define R200_TXC_ARG_A_CURRENT_COLOR (2)
+# define R200_TXC_ARG_A_CURRENT_ALPHA (3)
+# define R200_TXC_ARG_A_DIFFUSE_COLOR (4)
+# define R200_TXC_ARG_A_DIFFUSE_ALPHA (5)
+# define R200_TXC_ARG_A_SPECULAR_COLOR (6)
+# define R200_TXC_ARG_A_SPECULAR_ALPHA (7)
+# define R200_TXC_ARG_A_TFACTOR_COLOR (8)
+# define R200_TXC_ARG_A_TFACTOR_ALPHA (9)
+# define R200_TXC_ARG_A_R0_COLOR (10)
+# define R200_TXC_ARG_A_R0_ALPHA (11)
+# define R200_TXC_ARG_A_R1_COLOR (12)
+# define R200_TXC_ARG_A_R1_ALPHA (13)
+# define R200_TXC_ARG_A_R2_COLOR (14)
+# define R200_TXC_ARG_A_R2_ALPHA (15)
+# define R200_TXC_ARG_A_R3_COLOR (16)
+# define R200_TXC_ARG_A_R3_ALPHA (17)
+# define R200_TXC_ARG_A_R4_COLOR (18)
+# define R200_TXC_ARG_A_R4_ALPHA (19)
+# define R200_TXC_ARG_A_R5_COLOR (20)
+# define R200_TXC_ARG_A_R5_ALPHA (21)
+# define R200_TXC_ARG_A_TFACTOR1_COLOR (26)
+# define R200_TXC_ARG_A_TFACTOR1_ALPHA (27)
+# define R200_TXC_ARG_A_MASK (31 << 0)
+# define R200_TXC_ARG_A_SHIFT 0
+# define R200_TXC_ARG_B_ZERO (0 << 5)
+# define R200_TXC_ARG_B_CURRENT_COLOR (2 << 5)
+# define R200_TXC_ARG_B_CURRENT_ALPHA (3 << 5)
+# define R200_TXC_ARG_B_DIFFUSE_COLOR (4 << 5)
+# define R200_TXC_ARG_B_DIFFUSE_ALPHA (5 << 5)
+# define R200_TXC_ARG_B_SPECULAR_COLOR (6 << 5)
+# define R200_TXC_ARG_B_SPECULAR_ALPHA (7 << 5)
+# define R200_TXC_ARG_B_TFACTOR_COLOR (8 << 5)
+# define R200_TXC_ARG_B_TFACTOR_ALPHA (9 << 5)
+# define R200_TXC_ARG_B_R0_COLOR (10 << 5)
+# define R200_TXC_ARG_B_R0_ALPHA (11 << 5)
+# define R200_TXC_ARG_B_R1_COLOR (12 << 5)
+# define R200_TXC_ARG_B_R1_ALPHA (13 << 5)
+# define R200_TXC_ARG_B_R2_COLOR (14 << 5)
+# define R200_TXC_ARG_B_R2_ALPHA (15 << 5)
+# define R200_TXC_ARG_B_R3_COLOR (16 << 5)
+# define R200_TXC_ARG_B_R3_ALPHA (17 << 5)
+# define R200_TXC_ARG_B_R4_COLOR (18 << 5)
+# define R200_TXC_ARG_B_R4_ALPHA (19 << 5)
+# define R200_TXC_ARG_B_R5_COLOR (20 << 5)
+# define R200_TXC_ARG_B_R5_ALPHA (21 << 5)
+# define R200_TXC_ARG_B_TFACTOR1_COLOR (26 << 5)
+# define R200_TXC_ARG_B_TFACTOR1_ALPHA (27 << 5)
+# define R200_TXC_ARG_B_MASK (31 << 5)
+# define R200_TXC_ARG_B_SHIFT 5
+# define R200_TXC_ARG_C_ZERO (0 << 10)
+# define R200_TXC_ARG_C_CURRENT_COLOR (2 << 10)
+# define R200_TXC_ARG_C_CURRENT_ALPHA (3 << 10)
+# define R200_TXC_ARG_C_DIFFUSE_COLOR (4 << 10)
+# define R200_TXC_ARG_C_DIFFUSE_ALPHA (5 << 10)
+# define R200_TXC_ARG_C_SPECULAR_COLOR (6 << 10)
+# define R200_TXC_ARG_C_SPECULAR_ALPHA (7 << 10)
+# define R200_TXC_ARG_C_TFACTOR_COLOR (8 << 10)
+# define R200_TXC_ARG_C_TFACTOR_ALPHA (9 << 10)
+# define R200_TXC_ARG_C_R0_COLOR (10 << 10)
+# define R200_TXC_ARG_C_R0_ALPHA (11 << 10)
+# define R200_TXC_ARG_C_R1_COLOR (12 << 10)
+# define R200_TXC_ARG_C_R1_ALPHA (13 << 10)
+# define R200_TXC_ARG_C_R2_COLOR (14 << 10)
+# define R200_TXC_ARG_C_R2_ALPHA (15 << 10)
+# define R200_TXC_ARG_C_R3_COLOR (16 << 10)
+# define R200_TXC_ARG_C_R3_ALPHA (17 << 10)
+# define R200_TXC_ARG_C_R4_COLOR (18 << 10)
+# define R200_TXC_ARG_C_R4_ALPHA (19 << 10)
+# define R200_TXC_ARG_C_R5_COLOR (20 << 10)
+# define R200_TXC_ARG_C_R5_ALPHA (21 << 10)
+# define R200_TXC_ARG_C_TFACTOR1_COLOR (26 << 10)
+# define R200_TXC_ARG_C_TFACTOR1_ALPHA (27 << 10)
+# define R200_TXC_ARG_C_MASK (31 << 10)
+# define R200_TXC_ARG_C_SHIFT 10
+# define R200_TXC_COMP_ARG_A (1 << 16)
+# define R200_TXC_COMP_ARG_A_SHIFT (16)
+# define R200_TXC_BIAS_ARG_A (1 << 17)
+# define R200_TXC_SCALE_ARG_A (1 << 18)
+# define R200_TXC_NEG_ARG_A (1 << 19)
+# define R200_TXC_COMP_ARG_B (1 << 20)
+# define R200_TXC_COMP_ARG_B_SHIFT (20)
+# define R200_TXC_BIAS_ARG_B (1 << 21)
+# define R200_TXC_SCALE_ARG_B (1 << 22)
+# define R200_TXC_NEG_ARG_B (1 << 23)
+# define R200_TXC_COMP_ARG_C (1 << 24)
+# define R200_TXC_COMP_ARG_C_SHIFT (24)
+# define R200_TXC_BIAS_ARG_C (1 << 25)
+# define R200_TXC_SCALE_ARG_C (1 << 26)
+# define R200_TXC_NEG_ARG_C (1 << 27)
+# define R200_TXC_OP_MADD (0 << 28)
+# define R200_TXC_OP_CND0 (2 << 28)
+# define R200_TXC_OP_LERP (3 << 28)
+# define R200_TXC_OP_DOT3 (4 << 28)
+# define R200_TXC_OP_DOT4 (5 << 28)
+# define R200_TXC_OP_CONDITIONAL (6 << 28)
+# define R200_TXC_OP_DOT2_ADD (7 << 28)
+# define R200_TXC_OP_MASK (7 << 28)
+#define R200_PP_TXCBLEND2_0 0x2f04
+# define R200_TXC_TFACTOR_SEL_SHIFT 0
+# define R200_TXC_TFACTOR_SEL_MASK 0x7
+# define R200_TXC_TFACTOR1_SEL_SHIFT 4
+# define R200_TXC_TFACTOR1_SEL_MASK (0x7 << 4)
+# define R200_TXC_SCALE_SHIFT 8
+# define R200_TXC_SCALE_MASK (7 << 8)
+# define R200_TXC_SCALE_1X (0 << 8)
+# define R200_TXC_SCALE_2X (1 << 8)
+# define R200_TXC_SCALE_4X (2 << 8)
+# define R200_TXC_SCALE_8X (3 << 8)
+# define R200_TXC_SCALE_INV2 (5 << 8)
+# define R200_TXC_SCALE_INV4 (6 << 8)
+# define R200_TXC_SCALE_INV8 (7 << 8)
+# define R200_TXC_CLAMP_SHIFT 12
+# define R200_TXC_CLAMP_MASK (3 << 12)
+# define R200_TXC_CLAMP_WRAP (0 << 12)
+# define R200_TXC_CLAMP_0_1 (1 << 12)
+# define R200_TXC_CLAMP_8_8 (2 << 12)
+# define R200_TXC_OUTPUT_REG_MASK (7 << 16)
+# define R200_TXC_OUTPUT_REG_NONE (0 << 16)
+# define R200_TXC_OUTPUT_REG_R0 (1 << 16)
+# define R200_TXC_OUTPUT_REG_R1 (2 << 16)
+# define R200_TXC_OUTPUT_REG_R2 (3 << 16)
+# define R200_TXC_OUTPUT_REG_R3 (4 << 16)
+# define R200_TXC_OUTPUT_REG_R4 (5 << 16)
+# define R200_TXC_OUTPUT_REG_R5 (6 << 16)
+# define R200_TXC_OUTPUT_MASK_MASK (7 << 20)
+# define R200_TXC_OUTPUT_MASK_RGB (0 << 20)
+# define R200_TXC_OUTPUT_MASK_RG (1 << 20)
+# define R200_TXC_OUTPUT_MASK_RB (2 << 20)
+# define R200_TXC_OUTPUT_MASK_R (3 << 20)
+# define R200_TXC_OUTPUT_MASK_GB (4 << 20)
+# define R200_TXC_OUTPUT_MASK_G (5 << 20)
+# define R200_TXC_OUTPUT_MASK_B (6 << 20)
+# define R200_TXC_OUTPUT_MASK_NONE (7 << 20)
+# define R200_TXC_REPL_NORMAL 0
+# define R200_TXC_REPL_RED 1
+# define R200_TXC_REPL_GREEN 2
+# define R200_TXC_REPL_BLUE 3
+# define R200_TXC_REPL_ARG_A_SHIFT 26
+# define R200_TXC_REPL_ARG_A_MASK (3 << 26)
+# define R200_TXC_REPL_ARG_B_SHIFT 28
+# define R200_TXC_REPL_ARG_B_MASK (3 << 28)
+# define R200_TXC_REPL_ARG_C_SHIFT 30
+# define R200_TXC_REPL_ARG_C_MASK (3 << 30)
+#define R200_PP_TXABLEND_0 0x2f08
+# define R200_TXA_ARG_A_ZERO (0)
+# define R200_TXA_ARG_A_CURRENT_ALPHA (2) /* guess */
+# define R200_TXA_ARG_A_CURRENT_BLUE (3) /* guess */
+# define R200_TXA_ARG_A_DIFFUSE_ALPHA (4)
+# define R200_TXA_ARG_A_DIFFUSE_BLUE (5)
+# define R200_TXA_ARG_A_SPECULAR_ALPHA (6)
+# define R200_TXA_ARG_A_SPECULAR_BLUE (7)
+# define R200_TXA_ARG_A_TFACTOR_ALPHA (8)
+# define R200_TXA_ARG_A_TFACTOR_BLUE (9)
+# define R200_TXA_ARG_A_R0_ALPHA (10)
+# define R200_TXA_ARG_A_R0_BLUE (11)
+# define R200_TXA_ARG_A_R1_ALPHA (12)
+# define R200_TXA_ARG_A_R1_BLUE (13)
+# define R200_TXA_ARG_A_R2_ALPHA (14)
+# define R200_TXA_ARG_A_R2_BLUE (15)
+# define R200_TXA_ARG_A_R3_ALPHA (16)
+# define R200_TXA_ARG_A_R3_BLUE (17)
+# define R200_TXA_ARG_A_R4_ALPHA (18)
+# define R200_TXA_ARG_A_R4_BLUE (19)
+# define R200_TXA_ARG_A_R5_ALPHA (20)
+# define R200_TXA_ARG_A_R5_BLUE (21)
+# define R200_TXA_ARG_A_TFACTOR1_ALPHA (26)
+# define R200_TXA_ARG_A_TFACTOR1_BLUE (27)
+# define R200_TXA_ARG_A_MASK (31 << 0)
+# define R200_TXA_ARG_A_SHIFT 0
+# define R200_TXA_ARG_B_ZERO (0 << 5)
+# define R200_TXA_ARG_B_CURRENT_ALPHA (2 << 5) /* guess */
+# define R200_TXA_ARG_B_CURRENT_BLUE (3 << 5) /* guess */
+# define R200_TXA_ARG_B_DIFFUSE_ALPHA (4 << 5)
+# define R200_TXA_ARG_B_DIFFUSE_BLUE (5 << 5)
+# define R200_TXA_ARG_B_SPECULAR_ALPHA (6 << 5)
+# define R200_TXA_ARG_B_SPECULAR_BLUE (7 << 5)
+# define R200_TXA_ARG_B_TFACTOR_ALPHA (8 << 5)
+# define R200_TXA_ARG_B_TFACTOR_BLUE (9 << 5)
+# define R200_TXA_ARG_B_R0_ALPHA (10 << 5)
+# define R200_TXA_ARG_B_R0_BLUE (11 << 5)
+# define R200_TXA_ARG_B_R1_ALPHA (12 << 5)
+# define R200_TXA_ARG_B_R1_BLUE (13 << 5)
+# define R200_TXA_ARG_B_R2_ALPHA (14 << 5)
+# define R200_TXA_ARG_B_R2_BLUE (15 << 5)
+# define R200_TXA_ARG_B_R3_ALPHA (16 << 5)
+# define R200_TXA_ARG_B_R3_BLUE (17 << 5)
+# define R200_TXA_ARG_B_R4_ALPHA (18 << 5)
+# define R200_TXA_ARG_B_R4_BLUE (19 << 5)
+# define R200_TXA_ARG_B_R5_ALPHA (20 << 5)
+# define R200_TXA_ARG_B_R5_BLUE (21 << 5)
+# define R200_TXA_ARG_B_TFACTOR1_ALPHA (26 << 5)
+# define R200_TXA_ARG_B_TFACTOR1_BLUE (27 << 5)
+# define R200_TXA_ARG_B_MASK (31 << 5)
+# define R200_TXA_ARG_B_SHIFT 5
+# define R200_TXA_ARG_C_ZERO (0 << 10)
+# define R200_TXA_ARG_C_CURRENT_ALPHA (2 << 10) /* guess */
+# define R200_TXA_ARG_C_CURRENT_BLUE (3 << 10) /* guess */
+# define R200_TXA_ARG_C_DIFFUSE_ALPHA (4 << 10)
+# define R200_TXA_ARG_C_DIFFUSE_BLUE (5 << 10)
+# define R200_TXA_ARG_C_SPECULAR_ALPHA (6 << 10)
+# define R200_TXA_ARG_C_SPECULAR_BLUE (7 << 10)
+# define R200_TXA_ARG_C_TFACTOR_ALPHA (8 << 10)
+# define R200_TXA_ARG_C_TFACTOR_BLUE (9 << 10)
+# define R200_TXA_ARG_C_R0_ALPHA (10 << 10)
+# define R200_TXA_ARG_C_R0_BLUE (11 << 10)
+# define R200_TXA_ARG_C_R1_ALPHA (12 << 10)
+# define R200_TXA_ARG_C_R1_BLUE (13 << 10)
+# define R200_TXA_ARG_C_R2_ALPHA (14 << 10)
+# define R200_TXA_ARG_C_R2_BLUE (15 << 10)
+# define R200_TXA_ARG_C_R3_ALPHA (16 << 10)
+# define R200_TXA_ARG_C_R3_BLUE (17 << 10)
+# define R200_TXA_ARG_C_R4_ALPHA (18 << 10)
+# define R200_TXA_ARG_C_R4_BLUE (19 << 10)
+# define R200_TXA_ARG_C_R5_ALPHA (20 << 10)
+# define R200_TXA_ARG_C_R5_BLUE (21 << 10)
+# define R200_TXA_ARG_C_TFACTOR1_ALPHA (26 << 10)
+# define R200_TXA_ARG_C_TFACTOR1_BLUE (27 << 10)
+# define R200_TXA_ARG_C_MASK (31 << 10)
+# define R200_TXA_ARG_C_SHIFT 10
+# define R200_TXA_COMP_ARG_A (1 << 16)
+# define R200_TXA_COMP_ARG_A_SHIFT (16)
+# define R200_TXA_BIAS_ARG_A (1 << 17)
+# define R200_TXA_SCALE_ARG_A (1 << 18)
+# define R200_TXA_NEG_ARG_A (1 << 19)
+# define R200_TXA_COMP_ARG_B (1 << 20)
+# define R200_TXA_COMP_ARG_B_SHIFT (20)
+# define R200_TXA_BIAS_ARG_B (1 << 21)
+# define R200_TXA_SCALE_ARG_B (1 << 22)
+# define R200_TXA_NEG_ARG_B (1 << 23)
+# define R200_TXA_COMP_ARG_C (1 << 24)
+# define R200_TXA_COMP_ARG_C_SHIFT (24)
+# define R200_TXA_BIAS_ARG_C (1 << 25)
+# define R200_TXA_SCALE_ARG_C (1 << 26)
+# define R200_TXA_NEG_ARG_C (1 << 27)
+# define R200_TXA_OP_MADD (0 << 28)
+# define R200_TXA_OP_CND0 (2 << 28)
+# define R200_TXA_OP_LERP (3 << 28)
+# define R200_TXA_OP_CONDITIONAL (6 << 28)
+# define R200_TXA_OP_MASK (7 << 28)
+#define R200_PP_TXABLEND2_0 0x2f0c
+# define R200_TXA_TFACTOR_SEL_SHIFT 0
+# define R200_TXA_TFACTOR_SEL_MASK 0x7
+# define R200_TXA_TFACTOR1_SEL_SHIFT 4
+# define R200_TXA_TFACTOR1_SEL_MASK (0x7 << 4)
+# define R200_TXA_SCALE_SHIFT 8
+# define R200_TXA_SCALE_MASK (7 << 8)
+# define R200_TXA_SCALE_1X (0 << 8)
+# define R200_TXA_SCALE_2X (1 << 8)
+# define R200_TXA_SCALE_4X (2 << 8)
+# define R200_TXA_SCALE_8X (3 << 8)
+# define R200_TXA_SCALE_INV2 (5 << 8)
+# define R200_TXA_SCALE_INV4 (6 << 8)
+# define R200_TXA_SCALE_INV8 (7 << 8)
+# define R200_TXA_CLAMP_SHIFT 12
+# define R200_TXA_CLAMP_MASK (3 << 12)
+# define R200_TXA_CLAMP_WRAP (0 << 12)
+# define R200_TXA_CLAMP_0_1 (1 << 12)
+# define R200_TXA_CLAMP_8_8 (2 << 12)
+# define R200_TXA_OUTPUT_REG_MASK (7 << 16)
+# define R200_TXA_OUTPUT_REG_NONE (0 << 16)
+# define R200_TXA_OUTPUT_REG_R0 (1 << 16)
+# define R200_TXA_OUTPUT_REG_R1 (2 << 16)
+# define R200_TXA_OUTPUT_REG_R2 (3 << 16)
+# define R200_TXA_OUTPUT_REG_R3 (4 << 16)
+# define R200_TXA_OUTPUT_REG_R4 (5 << 16)
+# define R200_TXA_OUTPUT_REG_R5 (6 << 16)
+# define R200_TXA_DOT_ALPHA (1 << 20)
+# define R200_TXA_REPL_NORMAL 0
+# define R200_TXA_REPL_RED 1
+# define R200_TXA_REPL_GREEN 2
+# define R200_TXA_REPL_ARG_A_SHIFT 26
+# define R200_TXA_REPL_ARG_A_MASK (3 << 26)
+# define R200_TXA_REPL_ARG_B_SHIFT 28
+# define R200_TXA_REPL_ARG_B_MASK (3 << 28)
+# define R200_TXA_REPL_ARG_C_SHIFT 30
+# define R200_TXA_REPL_ARG_C_MASK (3 << 30)
+
+#define R200_SE_VTX_FMT_0 0x2088
+# define R200_VTX_XY 0 /* always have xy */
+# define R200_VTX_Z0 (1<<0)
+# define R200_VTX_W0 (1<<1)
+# define R200_VTX_WEIGHT_COUNT_SHIFT (2)
+# define R200_VTX_PV_MATRIX_SEL (1<<5)
+# define R200_VTX_N0 (1<<6)
+# define R200_VTX_POINT_SIZE (1<<7)
+# define R200_VTX_DISCRETE_FOG (1<<8)
+# define R200_VTX_SHININESS_0 (1<<9)
+# define R200_VTX_SHININESS_1 (1<<10)
+# define R200_VTX_COLOR_NOT_PRESENT 0
+# define R200_VTX_PK_RGBA 1
+# define R200_VTX_FP_RGB 2
+# define R200_VTX_FP_RGBA 3
+# define R200_VTX_COLOR_MASK 3
+# define R200_VTX_COLOR_0_SHIFT 11
+# define R200_VTX_COLOR_1_SHIFT 13
+# define R200_VTX_COLOR_2_SHIFT 15
+# define R200_VTX_COLOR_3_SHIFT 17
+# define R200_VTX_COLOR_4_SHIFT 19
+# define R200_VTX_COLOR_5_SHIFT 21
+# define R200_VTX_COLOR_6_SHIFT 23
+# define R200_VTX_COLOR_7_SHIFT 25
+# define R200_VTX_XY1 (1<<28)
+# define R200_VTX_Z1 (1<<29)
+# define R200_VTX_W1 (1<<30)
+# define R200_VTX_N1 (1<<31)
+#define R200_SE_VTX_FMT_1 0x208c
+# define R200_VTX_TEX0_COMP_CNT_SHIFT 0
+# define R200_VTX_TEX1_COMP_CNT_SHIFT 3
+# define R200_VTX_TEX2_COMP_CNT_SHIFT 6
+# define R200_VTX_TEX3_COMP_CNT_SHIFT 9
+# define R200_VTX_TEX4_COMP_CNT_SHIFT 12
+# define R200_VTX_TEX5_COMP_CNT_SHIFT 15
+
+#define R200_SE_TCL_OUTPUT_VTX_FMT_0 0x2090
+#define R200_SE_TCL_OUTPUT_VTX_FMT_1 0x2094
+#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL 0x2250
+# define R200_OUTPUT_XYZW (1<<0)
+# define R200_OUTPUT_COLOR_0 (1<<8)
+# define R200_OUTPUT_COLOR_1 (1<<9)
+# define R200_OUTPUT_TEX_0 (1<<16)
+# define R200_OUTPUT_TEX_1 (1<<17)
+# define R200_OUTPUT_TEX_2 (1<<18)
+# define R200_OUTPUT_TEX_3 (1<<19)
+# define R200_OUTPUT_TEX_4 (1<<20)
+# define R200_OUTPUT_TEX_5 (1<<21)
+# define R200_OUTPUT_TEX_MASK (0x3f<<16)
+# define R200_OUTPUT_DISCRETE_FOG (1<<24)
+# define R200_OUTPUT_PT_SIZE (1<<25)
+# define R200_FORCE_INORDER_PROC (1<<31)
+#define R200_PP_CNTL_X 0x2cc4
+#define R200_PP_TXMULTI_CTL_0 0x2c1c
+#define R200_PP_TXMULTI_CTL_1 0x2c3c
+#define R200_PP_TXMULTI_CTL_2 0x2c5c
+#define R200_PP_TXMULTI_CTL_3 0x2c7c
+#define R200_PP_TXMULTI_CTL_4 0x2c9c
+#define R200_PP_TXMULTI_CTL_5 0x2cbc
+#define R200_SE_VTX_STATE_CNTL 0x2180
+# define R200_UPDATE_USER_COLOR_0_ENA_MASK (1<<16)
+
+ /* Registers for CP and Microcode Engine */
+#define RADEON_CP_ME_RAM_ADDR 0x07d4
+#define RADEON_CP_ME_RAM_RADDR 0x07d8
+#define RADEON_CP_ME_RAM_DATAH 0x07dc
+#define RADEON_CP_ME_RAM_DATAL 0x07e0
+
+#define RADEON_CP_RB_BASE 0x0700
+#define RADEON_CP_RB_CNTL 0x0704
+# define RADEON_RB_BUFSZ_SHIFT 0
+# define RADEON_RB_BUFSZ_MASK (0x3f << 0)
+# define RADEON_RB_BLKSZ_SHIFT 8
+# define RADEON_RB_BLKSZ_MASK (0x3f << 8)
+# define RADEON_BUF_SWAP_32BIT (2 << 16)
+# define RADEON_MAX_FETCH_SHIFT 18
+# define RADEON_MAX_FETCH_MASK (0x3 << 18)
+# define RADEON_RB_NO_UPDATE (1 << 27)
+# define RADEON_RB_RPTR_WR_ENA (1 << 31)
+#define RADEON_CP_RB_RPTR_ADDR 0x070c
+#define RADEON_CP_RB_RPTR 0x0710
+#define RADEON_CP_RB_WPTR 0x0714
+#define RADEON_CP_RB_RPTR_WR 0x071c
+
+#define RADEON_SCRATCH_UMSK 0x0770
+#define RADEON_SCRATCH_ADDR 0x0774
+
+#define R600_CP_RB_BASE 0xc100
+#define R600_CP_RB_CNTL 0xc104
+# define R600_RB_BUFSZ(x) ((x) << 0)
+# define R600_RB_BLKSZ(x) ((x) << 8)
+# define R600_RB_NO_UPDATE (1 << 27)
+# define R600_RB_RPTR_WR_ENA (1 << 31)
+#define R600_CP_RB_RPTR_WR 0xc108
+#define R600_CP_RB_RPTR_ADDR 0xc10c
+#define R600_CP_RB_RPTR_ADDR_HI 0xc110
+#define R600_CP_RB_WPTR 0xc114
+#define R600_CP_RB_WPTR_ADDR 0xc118
+#define R600_CP_RB_WPTR_ADDR_HI 0xc11c
+#define R600_CP_RB_RPTR 0x8700
+#define R600_CP_RB_WPTR_DELAY 0x8704
+
+#define RADEON_CP_IB_BASE 0x0738
+#define RADEON_CP_IB_BUFSZ 0x073c
+
+#define RADEON_CP_CSQ_CNTL 0x0740
+# define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0)
+# define RADEON_CSQ_PRIDIS_INDDIS (0 << 28)
+# define RADEON_CSQ_PRIPIO_INDDIS (1 << 28)
+# define RADEON_CSQ_PRIBM_INDDIS (2 << 28)
+# define RADEON_CSQ_PRIPIO_INDBM (3 << 28)
+# define RADEON_CSQ_PRIBM_INDBM (4 << 28)
+# define RADEON_CSQ_PRIPIO_INDPIO (15 << 28)
+
+#define R300_CP_RESYNC_ADDR 0x778
+#define R300_CP_RESYNC_DATA 0x77c
+
+#define RADEON_CP_CSQ_STAT 0x07f8
+# define RADEON_CSQ_RPTR_PRIMARY_MASK (0xff << 0)
+# define RADEON_CSQ_WPTR_PRIMARY_MASK (0xff << 8)
+# define RADEON_CSQ_RPTR_INDIRECT_MASK (0xff << 16)
+# define RADEON_CSQ_WPTR_INDIRECT_MASK (0xff << 24)
+#define RADEON_CP_CSQ2_STAT 0x07fc
+#define RADEON_CP_CSQ_ADDR 0x07f0
+#define RADEON_CP_CSQ_DATA 0x07f4
+#define RADEON_CP_CSQ_APER_PRIMARY 0x1000
+#define RADEON_CP_CSQ_APER_INDIRECT 0x1300
+
+#define RADEON_CP_RB_WPTR_DELAY 0x0718
+# define RADEON_PRE_WRITE_TIMER_SHIFT 0
+# define RADEON_PRE_WRITE_LIMIT_SHIFT 23
+#define RADEON_CP_CSQ_MODE 0x0744
+# define RADEON_INDIRECT2_START_SHIFT 0
+# define RADEON_INDIRECT2_START_MASK (0x7f << 0)
+# define RADEON_INDIRECT1_START_SHIFT 8
+# define RADEON_INDIRECT1_START_MASK (0x7f << 8)
+
+#define RADEON_AIC_CNTL 0x01d0
+# define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
+# define RADEON_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1)
+# define RS400_MSI_REARM (1 << 3) /* rs400/rs480 */
+#define RADEON_AIC_LO_ADDR 0x01dc
+#define RADEON_AIC_PT_BASE 0x01d8
+#define RADEON_AIC_HI_ADDR 0x01e0
+
+
+
+ /* Constants */
+/* #define RADEON_LAST_FRAME_REG RADEON_GUI_SCRATCH_REG0 */
+/* efine RADEON_LAST_CLEAR_REG RADEON_GUI_SCRATCH_REG2 */
+
+
+
+ /* CP packet types */
+#define RADEON_CP_PACKET0 0x00000000
+#define RADEON_CP_PACKET1 0x40000000
+#define RADEON_CP_PACKET2 0x80000000
+#define RADEON_CP_PACKET3 0xC0000000
+# define RADEON_CP_PACKET_MASK 0xC0000000
+# define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000
+# define RADEON_CP_PACKET_MAX_DWORDS (1 << 12)
+# define RADEON_CP_PACKET0_REG_MASK 0x000007ff
+# define R300_CP_PACKET0_REG_MASK 0x00001fff
+# define R600_CP_PACKET0_REG_MASK 0x0000ffff
+# define RADEON_CP_PACKET1_REG0_MASK 0x000007ff
+# define RADEON_CP_PACKET1_REG1_MASK 0x003ff800
+
+#define RADEON_CP_PACKET0_ONE_REG_WR 0x00008000
+
+#define RADEON_CP_PACKET3_NOP 0xC0001000
+#define RADEON_CP_PACKET3_NEXT_CHAR 0xC0001900
+#define RADEON_CP_PACKET3_PLY_NEXTSCAN 0xC0001D00
+#define RADEON_CP_PACKET3_SET_SCISSORS 0xC0001E00
+#define RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM 0xC0002300
+#define RADEON_CP_PACKET3_LOAD_MICROCODE 0xC0002400
+#define RADEON_CP_PACKET3_WAIT_FOR_IDLE 0xC0002600
+#define RADEON_CP_PACKET3_3D_DRAW_VBUF 0xC0002800
+#define RADEON_CP_PACKET3_3D_DRAW_IMMD 0xC0002900
+#define RADEON_CP_PACKET3_3D_DRAW_INDX 0xC0002A00
+#define RADEON_CP_PACKET3_LOAD_PALETTE 0xC0002C00
+#define R200_CP_PACKET3_3D_DRAW_IMMD_2 0xc0003500
+#define RADEON_CP_PACKET3_3D_LOAD_VBPNTR 0xC0002F00
+#define RADEON_CP_PACKET3_CNTL_PAINT 0xC0009100
+#define RADEON_CP_PACKET3_CNTL_BITBLT 0xC0009200
+#define RADEON_CP_PACKET3_CNTL_SMALLTEXT 0xC0009300
+#define RADEON_CP_PACKET3_CNTL_HOSTDATA_BLT 0xC0009400
+#define RADEON_CP_PACKET3_CNTL_POLYLINE 0xC0009500
+#define RADEON_CP_PACKET3_CNTL_POLYSCANLINES 0xC0009800
+#define RADEON_CP_PACKET3_CNTL_PAINT_MULTI 0xC0009A00
+#define RADEON_CP_PACKET3_CNTL_BITBLT_MULTI 0xC0009B00
+#define RADEON_CP_PACKET3_CNTL_TRANS_BITBLT 0xC0009C00
+
+
+#define RADEON_CP_VC_FRMT_XY 0x00000000
+#define RADEON_CP_VC_FRMT_W0 0x00000001
+#define RADEON_CP_VC_FRMT_FPCOLOR 0x00000002
+#define RADEON_CP_VC_FRMT_FPALPHA 0x00000004
+#define RADEON_CP_VC_FRMT_PKCOLOR 0x00000008
+#define RADEON_CP_VC_FRMT_FPSPEC 0x00000010
+#define RADEON_CP_VC_FRMT_FPFOG 0x00000020
+#define RADEON_CP_VC_FRMT_PKSPEC 0x00000040
+#define RADEON_CP_VC_FRMT_ST0 0x00000080
+#define RADEON_CP_VC_FRMT_ST1 0x00000100
+#define RADEON_CP_VC_FRMT_Q1 0x00000200
+#define RADEON_CP_VC_FRMT_ST2 0x00000400
+#define RADEON_CP_VC_FRMT_Q2 0x00000800
+#define RADEON_CP_VC_FRMT_ST3 0x00001000
+#define RADEON_CP_VC_FRMT_Q3 0x00002000
+#define RADEON_CP_VC_FRMT_Q0 0x00004000
+#define RADEON_CP_VC_FRMT_BLND_WEIGHT_CNT_MASK 0x00038000
+#define RADEON_CP_VC_FRMT_N0 0x00040000
+#define RADEON_CP_VC_FRMT_XY1 0x08000000
+#define RADEON_CP_VC_FRMT_Z1 0x10000000
+#define RADEON_CP_VC_FRMT_W1 0x20000000
+#define RADEON_CP_VC_FRMT_N1 0x40000000
+#define RADEON_CP_VC_FRMT_Z 0x80000000
+
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_NONE 0x00000000
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_POINT 0x00000001
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE 0x00000002
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE_STRIP 0x00000003
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST 0x00000004
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN 0x00000005
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_STRIP 0x00000006
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_TYPE_2 0x00000007
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_RECT_LIST 0x00000008
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_POINT_LIST 0x00000009
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_LINE_LIST 0x0000000a
+#define RADEON_CP_VC_CNTL_PRIM_WALK_IND 0x00000010
+#define RADEON_CP_VC_CNTL_PRIM_WALK_LIST 0x00000020
+#define RADEON_CP_VC_CNTL_PRIM_WALK_RING 0x00000030
+#define RADEON_CP_VC_CNTL_COLOR_ORDER_BGRA 0x00000000
+#define RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA 0x00000040
+#define RADEON_CP_VC_CNTL_MAOS_ENABLE 0x00000080
+#define RADEON_CP_VC_CNTL_VTX_FMT_NON_RADEON_MODE 0x00000000
+#define RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE 0x00000100
+#define RADEON_CP_VC_CNTL_TCL_DISABLE 0x00000000
+#define RADEON_CP_VC_CNTL_TCL_ENABLE 0x00000200
+#define RADEON_CP_VC_CNTL_NUM_SHIFT 16
+
+#define RADEON_VS_MATRIX_0_ADDR 0
+#define RADEON_VS_MATRIX_1_ADDR 4
+#define RADEON_VS_MATRIX_2_ADDR 8
+#define RADEON_VS_MATRIX_3_ADDR 12
+#define RADEON_VS_MATRIX_4_ADDR 16
+#define RADEON_VS_MATRIX_5_ADDR 20
+#define RADEON_VS_MATRIX_6_ADDR 24
+#define RADEON_VS_MATRIX_7_ADDR 28
+#define RADEON_VS_MATRIX_8_ADDR 32
+#define RADEON_VS_MATRIX_9_ADDR 36
+#define RADEON_VS_MATRIX_10_ADDR 40
+#define RADEON_VS_MATRIX_11_ADDR 44
+#define RADEON_VS_MATRIX_12_ADDR 48
+#define RADEON_VS_MATRIX_13_ADDR 52
+#define RADEON_VS_MATRIX_14_ADDR 56
+#define RADEON_VS_MATRIX_15_ADDR 60
+#define RADEON_VS_LIGHT_AMBIENT_ADDR 64
+#define RADEON_VS_LIGHT_DIFFUSE_ADDR 72
+#define RADEON_VS_LIGHT_SPECULAR_ADDR 80
+#define RADEON_VS_LIGHT_DIRPOS_ADDR 88
+#define RADEON_VS_LIGHT_HWVSPOT_ADDR 96
+#define RADEON_VS_LIGHT_ATTENUATION_ADDR 104
+#define RADEON_VS_MATRIX_EYE2CLIP_ADDR 112
+#define RADEON_VS_UCP_ADDR 116
+#define RADEON_VS_GLOBAL_AMBIENT_ADDR 122
+#define RADEON_VS_FOG_PARAM_ADDR 123
+#define RADEON_VS_EYE_VECTOR_ADDR 124
+
+#define RADEON_SS_LIGHT_DCD_ADDR 0
+#define RADEON_SS_LIGHT_SPOT_EXPONENT_ADDR 8
+#define RADEON_SS_LIGHT_SPOT_CUTOFF_ADDR 16
+#define RADEON_SS_LIGHT_SPECULAR_THRESH_ADDR 24
+#define RADEON_SS_LIGHT_RANGE_CUTOFF_ADDR 32
+#define RADEON_SS_VERT_GUARD_CLIP_ADJ_ADDR 48
+#define RADEON_SS_VERT_GUARD_DISCARD_ADJ_ADDR 49
+#define RADEON_SS_HORZ_GUARD_CLIP_ADJ_ADDR 50
+#define RADEON_SS_HORZ_GUARD_DISCARD_ADJ_ADDR 51
+#define RADEON_SS_SHININESS 60
+
+#define RADEON_TV_MASTER_CNTL 0x0800
+# define RADEON_TV_ASYNC_RST (1 << 0)
+# define RADEON_CRT_ASYNC_RST (1 << 1)
+# define RADEON_RESTART_PHASE_FIX (1 << 3)
+# define RADEON_TV_FIFO_ASYNC_RST (1 << 4)
+# define RADEON_VIN_ASYNC_RST (1 << 5)
+# define RADEON_AUD_ASYNC_RST (1 << 6)
+# define RADEON_DVS_ASYNC_RST (1 << 7)
+# define RADEON_CRT_FIFO_CE_EN (1 << 9)
+# define RADEON_TV_FIFO_CE_EN (1 << 10)
+# define RADEON_RE_SYNC_NOW_SEL_MASK (3 << 14)
+# define RADEON_TVCLK_ALWAYS_ONb (1 << 30)
+# define RADEON_TV_ON (1 << 31)
+#define RADEON_TV_PRE_DAC_MUX_CNTL 0x0888
+# define RADEON_Y_RED_EN (1 << 0)
+# define RADEON_C_GRN_EN (1 << 1)
+# define RADEON_CMP_BLU_EN (1 << 2)
+# define RADEON_DAC_DITHER_EN (1 << 3)
+# define RADEON_RED_MX_FORCE_DAC_DATA (6 << 4)
+# define RADEON_GRN_MX_FORCE_DAC_DATA (6 << 8)
+# define RADEON_BLU_MX_FORCE_DAC_DATA (6 << 12)
+# define RADEON_TV_FORCE_DAC_DATA_SHIFT 16
+#define RADEON_TV_RGB_CNTL 0x0804
+# define RADEON_SWITCH_TO_BLUE (1 << 4)
+# define RADEON_RGB_DITHER_EN (1 << 5)
+# define RADEON_RGB_SRC_SEL_MASK (3 << 8)
+# define RADEON_RGB_SRC_SEL_CRTC1 (0 << 8)
+# define RADEON_RGB_SRC_SEL_RMX (1 << 8)
+# define RADEON_RGB_SRC_SEL_CRTC2 (2 << 8)
+# define RADEON_RGB_CONVERT_BY_PASS (1 << 10)
+# define RADEON_UVRAM_READ_MARGIN_SHIFT 16
+# define RADEON_FIFORAM_FFMACRO_READ_MARGIN_SHIFT 20
+# define RADEON_RGB_ATTEN_SEL(x) ((x) << 24)
+# define RADEON_TVOUT_SCALE_EN (1 << 26)
+# define RADEON_RGB_ATTEN_VAL(x) ((x) << 28)
+#define RADEON_TV_SYNC_CNTL 0x0808
+# define RADEON_SYNC_OE (1 << 0)
+# define RADEON_SYNC_OUT (1 << 1)
+# define RADEON_SYNC_IN (1 << 2)
+# define RADEON_SYNC_PUB (1 << 3)
+# define RADEON_SYNC_PD (1 << 4)
+# define RADEON_TV_SYNC_IO_DRIVE (1 << 5)
+#define RADEON_TV_HTOTAL 0x080c
+#define RADEON_TV_HDISP 0x0810
+#define RADEON_TV_HSTART 0x0818
+#define RADEON_TV_HCOUNT 0x081C
+#define RADEON_TV_VTOTAL 0x0820
+#define RADEON_TV_VDISP 0x0824
+#define RADEON_TV_VCOUNT 0x0828
+#define RADEON_TV_FTOTAL 0x082c
+#define RADEON_TV_FCOUNT 0x0830
+#define RADEON_TV_FRESTART 0x0834
+#define RADEON_TV_HRESTART 0x0838
+#define RADEON_TV_VRESTART 0x083c
+#define RADEON_TV_HOST_READ_DATA 0x0840
+#define RADEON_TV_HOST_WRITE_DATA 0x0844
+#define RADEON_TV_HOST_RD_WT_CNTL 0x0848
+# define RADEON_HOST_FIFO_RD (1 << 12)
+# define RADEON_HOST_FIFO_RD_ACK (1 << 13)
+# define RADEON_HOST_FIFO_WT (1 << 14)
+# define RADEON_HOST_FIFO_WT_ACK (1 << 15)
+#define RADEON_TV_VSCALER_CNTL1 0x084c
+# define RADEON_UV_INC_MASK 0xffff
+# define RADEON_UV_INC_SHIFT 0
+# define RADEON_Y_W_EN (1 << 24)
+# define RADEON_RESTART_FIELD (1 << 29) /* restart on field 0 */
+# define RADEON_Y_DEL_W_SIG_SHIFT 26
+#define RADEON_TV_TIMING_CNTL 0x0850
+# define RADEON_H_INC_MASK 0xfff
+# define RADEON_H_INC_SHIFT 0
+# define RADEON_REQ_Y_FIRST (1 << 19)
+# define RADEON_FORCE_BURST_ALWAYS (1 << 21)
+# define RADEON_UV_POST_SCALE_BYPASS (1 << 23)
+# define RADEON_UV_OUTPUT_POST_SCALE_SHIFT 24
+#define RADEON_TV_VSCALER_CNTL2 0x0854
+# define RADEON_DITHER_MODE (1 << 0)
+# define RADEON_Y_OUTPUT_DITHER_EN (1 << 1)
+# define RADEON_UV_OUTPUT_DITHER_EN (1 << 2)
+# define RADEON_UV_TO_BUF_DITHER_EN (1 << 3)
+#define RADEON_TV_Y_FALL_CNTL 0x0858
+# define RADEON_Y_FALL_PING_PONG (1 << 16)
+# define RADEON_Y_COEF_EN (1 << 17)
+#define RADEON_TV_Y_RISE_CNTL 0x085c
+# define RADEON_Y_RISE_PING_PONG (1 << 16)
+#define RADEON_TV_Y_SAW_TOOTH_CNTL 0x0860
+#define RADEON_TV_UPSAMP_AND_GAIN_CNTL 0x0864
+# define RADEON_YUPSAMP_EN (1 << 0)
+# define RADEON_UVUPSAMP_EN (1 << 2)
+#define RADEON_TV_GAIN_LIMIT_SETTINGS 0x0868
+# define RADEON_Y_GAIN_LIMIT_SHIFT 0
+# define RADEON_UV_GAIN_LIMIT_SHIFT 16
+#define RADEON_TV_LINEAR_GAIN_SETTINGS 0x086c
+# define RADEON_Y_GAIN_SHIFT 0
+# define RADEON_UV_GAIN_SHIFT 16
+#define RADEON_TV_MODULATOR_CNTL1 0x0870
+# define RADEON_YFLT_EN (1 << 2)
+# define RADEON_UVFLT_EN (1 << 3)
+# define RADEON_ALT_PHASE_EN (1 << 6)
+# define RADEON_SYNC_TIP_LEVEL (1 << 7)
+# define RADEON_BLANK_LEVEL_SHIFT 8
+# define RADEON_SET_UP_LEVEL_SHIFT 16
+# define RADEON_SLEW_RATE_LIMIT (1 << 23)
+# define RADEON_CY_FILT_BLEND_SHIFT 28
+#define RADEON_TV_MODULATOR_CNTL2 0x0874
+# define RADEON_TV_U_BURST_LEVEL_MASK 0x1ff
+# define RADEON_TV_V_BURST_LEVEL_MASK 0x1ff
+# define RADEON_TV_V_BURST_LEVEL_SHIFT 16
+#define RADEON_TV_CRC_CNTL 0x0890
+#define RADEON_TV_UV_ADR 0x08ac
+# define RADEON_MAX_UV_ADR_MASK 0x000000ff
+# define RADEON_MAX_UV_ADR_SHIFT 0
+# define RADEON_TABLE1_BOT_ADR_MASK 0x0000ff00
+# define RADEON_TABLE1_BOT_ADR_SHIFT 8
+# define RADEON_TABLE3_TOP_ADR_MASK 0x00ff0000
+# define RADEON_TABLE3_TOP_ADR_SHIFT 16
+# define RADEON_HCODE_TABLE_SEL_MASK 0x06000000
+# define RADEON_HCODE_TABLE_SEL_SHIFT 25
+# define RADEON_VCODE_TABLE_SEL_MASK 0x18000000
+# define RADEON_VCODE_TABLE_SEL_SHIFT 27
+# define RADEON_TV_MAX_FIFO_ADDR 0x1a7
+# define RADEON_TV_MAX_FIFO_ADDR_INTERNAL 0x1ff
+#define RADEON_TV_PLL_FINE_CNTL 0x0020 /* PLL */
+#define RADEON_TV_PLL_CNTL 0x0021 /* PLL */
+# define RADEON_TV_M0LO_MASK 0xff
+# define RADEON_TV_M0HI_MASK 0x7
+# define RADEON_TV_M0HI_SHIFT 18
+# define RADEON_TV_N0LO_MASK 0x1ff
+# define RADEON_TV_N0LO_SHIFT 8
+# define RADEON_TV_N0HI_MASK 0x3
+# define RADEON_TV_N0HI_SHIFT 21
+# define RADEON_TV_P_MASK 0xf
+# define RADEON_TV_P_SHIFT 24
+# define RADEON_TV_SLIP_EN (1 << 23)
+# define RADEON_TV_DTO_EN (1 << 28)
+#define RADEON_TV_PLL_CNTL1 0x0022 /* PLL */
+# define RADEON_TVPLL_RESET (1 << 1)
+# define RADEON_TVPLL_SLEEP (1 << 3)
+# define RADEON_TVPLL_REFCLK_SEL (1 << 4)
+# define RADEON_TVPCP_SHIFT 8
+# define RADEON_TVPCP_MASK (7 << 8)
+# define RADEON_TVPVG_SHIFT 11
+# define RADEON_TVPVG_MASK (7 << 11)
+# define RADEON_TVPDC_SHIFT 14
+# define RADEON_TVPDC_MASK (3 << 14)
+# define RADEON_TVPLL_TEST_DIS (1 << 31)
+# define RADEON_TVCLK_SRC_SEL_TVPLL (1 << 30)
+
+#define RS400_DISP2_REQ_CNTL1 0xe30
+# define RS400_DISP2_START_REQ_LEVEL_SHIFT 0
+# define RS400_DISP2_START_REQ_LEVEL_MASK 0x3ff
+# define RS400_DISP2_STOP_REQ_LEVEL_SHIFT 12
+# define RS400_DISP2_STOP_REQ_LEVEL_MASK 0x3ff
+# define RS400_DISP2_ALLOW_FID_LEVEL_SHIFT 22
+# define RS400_DISP2_ALLOW_FID_LEVEL_MASK 0x3ff
+#define RS400_DISP2_REQ_CNTL2 0xe34
+# define RS400_DISP2_CRITICAL_POINT_START_SHIFT 12
+# define RS400_DISP2_CRITICAL_POINT_START_MASK 0x3ff
+# define RS400_DISP2_CRITICAL_POINT_STOP_SHIFT 22
+# define RS400_DISP2_CRITICAL_POINT_STOP_MASK 0x3ff
+#define RS400_DMIF_MEM_CNTL1 0xe38
+# define RS400_DISP2_START_ADR_SHIFT 0
+# define RS400_DISP2_START_ADR_MASK 0x3ff
+# define RS400_DISP1_CRITICAL_POINT_START_SHIFT 12
+# define RS400_DISP1_CRITICAL_POINT_START_MASK 0x3ff
+# define RS400_DISP1_CRITICAL_POINT_STOP_SHIFT 22
+# define RS400_DISP1_CRITICAL_POINT_STOP_MASK 0x3ff
+#define RS400_DISP1_REQ_CNTL1 0xe3c
+# define RS400_DISP1_START_REQ_LEVEL_SHIFT 0
+# define RS400_DISP1_START_REQ_LEVEL_MASK 0x3ff
+# define RS400_DISP1_STOP_REQ_LEVEL_SHIFT 12
+# define RS400_DISP1_STOP_REQ_LEVEL_MASK 0x3ff
+# define RS400_DISP1_ALLOW_FID_LEVEL_SHIFT 22
+# define RS400_DISP1_ALLOW_FID_LEVEL_MASK 0x3ff
+
+#define RADEON_PCIE_INDEX 0x0030
+#define RADEON_PCIE_DATA 0x0034
+#define RADEON_PCIE_TX_GART_CNTL 0x10
+# define RADEON_PCIE_TX_GART_EN (1 << 0)
+# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
+# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1 << 1)
+# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3 << 1)
+# define RADEON_PCIE_TX_GART_MODE_32_128_CACHE (0 << 3)
+# define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE (1 << 3)
+# define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN (1 << 5)
+# define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1 << 8)
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
+#define RADEON_PCIE_TX_GART_BASE 0x13
+#define RADEON_PCIE_TX_GART_START_LO 0x14
+#define RADEON_PCIE_TX_GART_START_HI 0x15
+#define RADEON_PCIE_TX_GART_END_LO 0x16
+#define RADEON_PCIE_TX_GART_END_HI 0x17
+#define RADEON_PCIE_TX_GART_ERROR 0x18
+
+#define RADEON_SCRATCH_REG0 0x15e0
+#define RADEON_SCRATCH_REG1 0x15e4
+#define RADEON_SCRATCH_REG2 0x15e8
+#define RADEON_SCRATCH_REG3 0x15ec
+#define RADEON_SCRATCH_REG4 0x15f0
+#define RADEON_SCRATCH_REG5 0x15f4
+
+#define RV530_GB_PIPE_SELECT2 0x4124
+
+#define RADEON_CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define RADEON_CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define RADEON_CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
+#define RADEON_CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define R100_CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
+#define R600_CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define RADEON_PACKET_TYPE0 0
+#define RADEON_PACKET_TYPE1 1
+#define RADEON_PACKET_TYPE2 2
+#define RADEON_PACKET_TYPE3 3
+
+#define RADEON_PACKET3_NOP 0x10
+
+#define RADEON_VLINE_STAT (1 << 12)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
new file mode 100644
index 0000000..6e0f480
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -0,0 +1,896 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ * Christian König
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+/*
+ * IB
+ * IBs (Indirect Buffers) and areas of GPU accessible memory where
+ * commands are stored. You can put a pointer to the IB in the
+ * command ring and the hw will fetch the commands from the IB
+ * and execute them. Generally userspace acceleration drivers
+ * produce command buffers which are send to the kernel and
+ * put in IBs for execution by the requested ring.
+ */
+static int radeon_debugfs_sa_init(struct radeon_device *rdev);
+
+/**
+ * radeon_ib_get - request an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the IB is associated with
+ * @ib: IB object returned
+ * @size: requested IB size
+ *
+ * Request an IB (all asics). IBs are allocated using the
+ * suballocator.
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+ struct radeon_ib *ib, struct radeon_vm *vm,
+ unsigned size)
+{
+ int i, r;
+
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
+ if (r) {
+ dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
+ return r;
+ }
+
+ r = radeon_semaphore_create(rdev, &ib->semaphore);
+ if (r) {
+ return r;
+ }
+
+ ib->ring = ring;
+ ib->fence = NULL;
+ ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
+ ib->vm = vm;
+ if (vm) {
+ /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
+ * space and soffset is the offset inside the pool bo
+ */
+ ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
+ } else {
+ ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
+ }
+ ib->is_const_ib = false;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ ib->sync_to[i] = NULL;
+
+ return 0;
+}
+
+/**
+ * radeon_ib_free - free an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to free
+ *
+ * Free an IB (all asics).
+ */
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
+ radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
+ radeon_fence_unref(&ib->fence);
+}
+
+/**
+ * radeon_ib_sync_to - sync to fence before executing the IB
+ *
+ * @ib: IB object to add fence to
+ * @fence: fence to sync to
+ *
+ * Sync to the fence before executing the IB
+ */
+void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
+{
+ struct radeon_fence *other;
+
+ if (!fence)
+ return;
+
+ other = ib->sync_to[fence->ring];
+ ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
+}
+
+/**
+ * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ * @const_ib: Const IB to schedule (SI only)
+ *
+ * Schedule an IB on the associated ring (all asics).
+ * Returns 0 on success, error on failure.
+ *
+ * On SI, there are two parallel engines fed from the primary ring,
+ * the CE (Constant Engine) and the DE (Drawing Engine). Since
+ * resource descriptors have moved to memory, the CE allows you to
+ * prime the caches while the DE is updating register state so that
+ * the resource descriptors will be already in cache when the draw is
+ * processed. To accomplish this, the userspace driver submits two
+ * IBs, one for the CE and one for the DE. If there is a CE IB (called
+ * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
+ * to SI there was just a DE IB.
+ */
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
+ struct radeon_ib *const_ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ bool need_sync = false;
+ int i, r = 0;
+
+ if (!ib->length_dw || !ring->ready) {
+ /* TODO: Nothings in the ib we should report. */
+ dev_err(rdev->dev, "couldn't schedule ib\n");
+ return -EINVAL;
+ }
+
+ /* 64 dwords should be enough for fence too */
+ r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
+ if (r) {
+ dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
+ return r;
+ }
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_fence *fence = ib->sync_to[i];
+ if (radeon_fence_need_sync(fence, ib->ring)) {
+ need_sync = true;
+ radeon_semaphore_sync_rings(rdev, ib->semaphore,
+ fence->ring, ib->ring);
+ radeon_fence_note_sync(fence, ib->ring);
+ }
+ }
+ /* immediately free semaphore when we don't need to sync */
+ if (!need_sync) {
+ radeon_semaphore_free(rdev, &ib->semaphore, NULL);
+ }
+ /* if we can't remember our last VM flush then flush now! */
+ /* XXX figure out why we have to flush for every IB */
+ if (ib->vm /*&& !ib->vm->last_flush*/) {
+ radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
+ }
+ if (const_ib) {
+ radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
+ radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
+ }
+ radeon_ring_ib_execute(rdev, ib->ring, ib);
+ r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+ if (const_ib) {
+ const_ib->fence = radeon_fence_ref(ib->fence);
+ }
+ /* we just flushed the VM, remember that */
+ if (ib->vm && !ib->vm->last_flush) {
+ ib->vm->last_flush = radeon_fence_ref(ib->fence);
+ }
+ radeon_ring_unlock_commit(rdev, ring);
+ return 0;
+}
+
+/**
+ * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the suballocator to manage a pool of memory
+ * for use as IBs (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_pool_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->ib_pool_ready) {
+ return 0;
+ }
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
+ RADEON_IB_POOL_SIZE*64*1024,
+ RADEON_GPU_PAGE_SIZE,
+ RADEON_GEM_DOMAIN_GTT);
+ if (r) {
+ return r;
+ }
+
+ r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
+ if (r) {
+ return r;
+ }
+
+ rdev->ib_pool_ready = true;
+ if (radeon_debugfs_sa_init(rdev)) {
+ dev_err(rdev->dev, "failed to register debugfs file for SA\n");
+ }
+ return 0;
+}
+
+/**
+ * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the suballocator managing the pool of memory
+ * for use as IBs (all asics).
+ */
+void radeon_ib_pool_fini(struct radeon_device *rdev)
+{
+ if (rdev->ib_pool_ready) {
+ radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
+ radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
+ rdev->ib_pool_ready = false;
+ }
+}
+
+/**
+ * radeon_ib_ring_tests - test IBs on the rings
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Test an IB (Indirect Buffer) on each ring.
+ * If the test fails, disable the ring.
+ * Returns 0 on success, error if the primary GFX ring
+ * IB test fails.
+ */
+int radeon_ib_ring_tests(struct radeon_device *rdev)
+{
+ unsigned i;
+ int r;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_ring *ring = &rdev->ring[i];
+
+ if (!ring->ready)
+ continue;
+
+ r = radeon_ib_test(rdev, i, ring);
+ if (r) {
+ ring->ready = false;
+
+ if (i == RADEON_RING_TYPE_GFX_INDEX) {
+ /* oh, oh, that's really bad */
+ DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+
+ } else {
+ /* still not good, but we can live with it */
+ DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * Rings
+ * Most engines on the GPU are fed via ring buffers. Ring
+ * buffers are areas of GPU accessible memory that the host
+ * writes commands into and the GPU reads commands out of.
+ * There is a rptr (read pointer) that determines where the
+ * GPU is currently reading, and a wptr (write pointer)
+ * which determines where the host has written. When the
+ * pointers are equal, the ring is idle. When the host
+ * writes commands to the ring buffer, it increments the
+ * wptr. The GPU then starts fetching commands and executes
+ * them until the pointers are equal again.
+ */
+static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
+
+/**
+ * radeon_ring_write - write a value to the ring
+ *
+ * @ring: radeon_ring structure holding ring information
+ * @v: dword (dw) value to write
+ *
+ * Write a value to the requested ring buffer (all asics).
+ */
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
+{
+#if DRM_DEBUG_CODE
+ if (ring->count_dw <= 0) {
+ DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
+ }
+#endif
+ ring->ring[ring->wptr++] = v;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw--;
+ ring->ring_free_dw--;
+}
+
+/**
+ * radeon_ring_supports_scratch_reg - check if the ring supports
+ * writing to scratch registers
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if a specific ring supports writing to scratch registers (all asics).
+ * Returns true if the ring supports writing to scratch regs, false if not.
+ */
+bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ switch (ring->idx) {
+ case RADEON_RING_TYPE_GFX_INDEX:
+ case CAYMAN_RING_TYPE_CP1_INDEX:
+ case CAYMAN_RING_TYPE_CP2_INDEX:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * radeon_ring_free_size - update the free size
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Update the free dw slots in the ring buffer (all asics).
+ */
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 rptr;
+
+ if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX])
+ rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+ else
+ rptr = RREG32(ring->rptr_reg);
+ ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
+ /* This works because ring_size is a power of 2 */
+ ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
+ ring->ring_free_dw -= ring->wptr;
+ ring->ring_free_dw &= ring->ptr_mask;
+ if (!ring->ring_free_dw) {
+ ring->ring_free_dw = ring->ring_size / 4;
+ }
+}
+
+/**
+ * radeon_ring_alloc - allocate space on the ring buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Allocate @ndw dwords in the ring buffer (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
+{
+ int r;
+
+ /* make sure we aren't trying to allocate more space than there is on the ring */
+ if (ndw > (ring->ring_size / 4))
+ return -ENOMEM;
+ /* Align requested size with padding so unlock_commit can
+ * pad safely */
+ radeon_ring_free_size(rdev, ring);
+ if (ring->ring_free_dw == (ring->ring_size / 4)) {
+ /* This is an empty ring update lockup info to avoid
+ * false positive.
+ */
+ radeon_ring_lockup_update(ring);
+ }
+ ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+ while (ndw > (ring->ring_free_dw - 1)) {
+ radeon_ring_free_size(rdev, ring);
+ if (ndw < ring->ring_free_dw) {
+ break;
+ }
+ r = radeon_fence_wait_next_locked(rdev, ring->idx);
+ if (r)
+ return r;
+ }
+ ring->count_dw = ndw;
+ ring->wptr_old = ring->wptr;
+ return 0;
+}
+
+/**
+ * radeon_ring_lock - lock the ring and allocate space on it
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Lock the ring and allocate @ndw dwords in the ring buffer
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
+{
+ int r;
+
+ mutex_lock(&rdev->ring_lock);
+ r = radeon_ring_alloc(rdev, ring, ndw);
+ if (r) {
+ mutex_unlock(&rdev->ring_lock);
+ return r;
+ }
+ return 0;
+}
+
+/**
+ * radeon_ring_commit - tell the GPU to execute the new
+ * commands on the ring buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Update the wptr (write pointer) to tell the GPU to
+ * execute new commands on the ring buffer (all asics).
+ */
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ /* We pad to match fetch size */
+ while (ring->wptr & ring->align_mask) {
+ radeon_ring_write(ring, ring->nop);
+ }
+ DRM_MEMORYBARRIER();
+ WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
+ (void)RREG32(ring->wptr_reg);
+}
+
+/**
+ * radeon_ring_unlock_commit - tell the GPU to execute the new
+ * commands on the ring buffer and unlock it
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Call radeon_ring_commit() then unlock the ring (all asics).
+ */
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ radeon_ring_commit(rdev, ring);
+ mutex_unlock(&rdev->ring_lock);
+}
+
+/**
+ * radeon_ring_undo - reset the wptr
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Reset the driver's copy of the wptr (all asics).
+ */
+void radeon_ring_undo(struct radeon_ring *ring)
+{
+ ring->wptr = ring->wptr_old;
+}
+
+/**
+ * radeon_ring_unlock_undo - reset the wptr and unlock the ring
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Call radeon_ring_undo() then unlock the ring (all asics).
+ */
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ radeon_ring_undo(ring);
+ mutex_unlock(&rdev->ring_lock);
+}
+
+/**
+ * radeon_ring_force_activity - add some nop packets to the ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Add some nop packets to the ring to force activity (all asics).
+ * Used for lockup detection to see if the rptr is advancing.
+ */
+void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ int r;
+
+ radeon_ring_free_size(rdev, ring);
+ if (ring->rptr == ring->wptr) {
+ r = radeon_ring_alloc(rdev, ring, 1);
+ if (!r) {
+ radeon_ring_write(ring, ring->nop);
+ radeon_ring_commit(rdev, ring);
+ }
+ }
+}
+
+/**
+ * radeon_ring_lockup_update - update lockup variables
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Update the last rptr value and timestamp (all asics).
+ */
+void radeon_ring_lockup_update(struct radeon_ring *ring)
+{
+ ring->last_rptr = ring->rptr;
+ ring->last_activity = jiffies;
+}
+
+/**
+ * radeon_ring_test_lockup() - check if ring is lockedup by recording information
+ * @rdev: radeon device structure
+ * @ring: radeon_ring structure holding ring information
+ *
+ * We don't need to initialize the lockup tracking information as we will either
+ * have CP rptr to a different value of jiffies wrap around which will force
+ * initialization of the lockup tracking informations.
+ *
+ * A possible false positivie is if we get call after while and last_cp_rptr ==
+ * the current CP rptr, even if it's unlikely it might happen. To avoid this
+ * if the elapsed time since last call is bigger than 2 second than we return
+ * false and update the tracking information. Due to this the caller must call
+ * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
+ * the fencing code should be cautious about that.
+ *
+ * Caller should write to the ring to force CP to do something so we don't get
+ * false positive when CP is just gived nothing to do.
+ *
+ **/
+bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ unsigned long cjiffies, elapsed;
+ uint32_t rptr;
+
+ cjiffies = jiffies;
+ if (!time_after(cjiffies, ring->last_activity)) {
+ /* likely a wrap around */
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ rptr = RREG32(ring->rptr_reg);
+ ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
+ if (ring->rptr != ring->last_rptr) {
+ /* CP is still working no lockup */
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
+ if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
+ dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
+ return true;
+ }
+ /* give a chance to the GPU ... */
+ return false;
+}
+
+/**
+ * radeon_ring_backup - Back up the content of a ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: the ring we want to back up
+ *
+ * Saves all unprocessed commits from a ring, returns the number of dwords saved.
+ */
+unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
+ uint32_t **data)
+{
+ unsigned size, ptr, i;
+
+ /* just in case lock the ring */
+ mutex_lock(&rdev->ring_lock);
+ *data = NULL;
+
+ if (ring->ring_obj == NULL) {
+ mutex_unlock(&rdev->ring_lock);
+ return 0;
+ }
+
+ /* it doesn't make sense to save anything if all fences are signaled */
+ if (!radeon_fence_count_emitted(rdev, ring->idx)) {
+ mutex_unlock(&rdev->ring_lock);
+ return 0;
+ }
+
+ /* calculate the number of dw on the ring */
+ if (ring->rptr_save_reg)
+ ptr = RREG32(ring->rptr_save_reg);
+ else if (rdev->wb.enabled)
+ ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
+ else {
+ /* no way to read back the next rptr */
+ mutex_unlock(&rdev->ring_lock);
+ return 0;
+ }
+
+ size = ring->wptr + (ring->ring_size / 4);
+ size -= ptr;
+ size &= ring->ptr_mask;
+ if (size == 0) {
+ mutex_unlock(&rdev->ring_lock);
+ return 0;
+ }
+
+ /* and then save the content of the ring */
+ *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
+ if (!*data) {
+ mutex_unlock(&rdev->ring_lock);
+ return 0;
+ }
+ for (i = 0; i < size; ++i) {
+ (*data)[i] = ring->ring[ptr++];
+ ptr &= ring->ptr_mask;
+ }
+
+ mutex_unlock(&rdev->ring_lock);
+ return size;
+}
+
+/**
+ * radeon_ring_restore - append saved commands to the ring again
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring to append commands to
+ * @size: number of dwords we want to write
+ * @data: saved commands
+ *
+ * Allocates space on the ring and restore the previously saved commands.
+ */
+int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned size, uint32_t *data)
+{
+ int i, r;
+
+ if (!size || !data)
+ return 0;
+
+ /* restore the saved ring content */
+ r = radeon_ring_lock(rdev, ring, size);
+ if (r)
+ return r;
+
+ for (i = 0; i < size; ++i) {
+ radeon_ring_write(ring, data[i]);
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ kfree(data);
+ return 0;
+}
+
+/**
+ * radeon_ring_init - init driver ring struct.
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ring_size: size of the ring
+ * @rptr_offs: offset of the rptr writeback location in the WB buffer
+ * @rptr_reg: MMIO offset of the rptr register
+ * @wptr_reg: MMIO offset of the wptr register
+ * @ptr_reg_shift: bit offset of the rptr/wptr values
+ * @ptr_reg_mask: bit mask of the rptr/wptr values
+ * @nop: nop packet for this ring
+ *
+ * Initialize the driver information for the selected ring (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
+ unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+ u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
+{
+ int r;
+
+ ring->ring_size = ring_size;
+ ring->rptr_offs = rptr_offs;
+ ring->rptr_reg = rptr_reg;
+ ring->wptr_reg = wptr_reg;
+ ring->ptr_reg_shift = ptr_reg_shift;
+ ring->ptr_reg_mask = ptr_reg_mask;
+ ring->nop = nop;
+ /* Allocate ring buffer */
+ if (ring->ring_obj == NULL) {
+ r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_GTT,
+ NULL, &ring->ring_obj);
+ if (r) {
+ dev_err(rdev->dev, "(%d) ring create failed\n", r);
+ return r;
+ }
+ r = radeon_bo_reserve(ring->ring_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
+ &ring->gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(ring->ring_obj);
+ dev_err(rdev->dev, "(%d) ring pin failed\n", r);
+ return r;
+ }
+ r = radeon_bo_kmap(ring->ring_obj,
+ (void **)&ring->ring);
+ radeon_bo_unreserve(ring->ring_obj);
+ if (r) {
+ dev_err(rdev->dev, "(%d) ring map failed\n", r);
+ return r;
+ }
+ }
+ ring->ptr_mask = (ring->ring_size / 4) - 1;
+ ring->ring_free_dw = ring->ring_size / 4;
+ if (rdev->wb.enabled) {
+ u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4);
+ ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
+ ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
+ }
+ if (radeon_debugfs_ring_init(rdev, ring)) {
+ DRM_ERROR("Failed to register debugfs file for rings !\n");
+ }
+ radeon_ring_lockup_update(ring);
+ return 0;
+}
+
+/**
+ * radeon_ring_fini - tear down the driver ring struct.
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Tear down the driver information for the selected ring (all asics).
+ */
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ int r;
+ struct radeon_bo *ring_obj;
+
+ mutex_lock(&rdev->ring_lock);
+ ring_obj = ring->ring_obj;
+ ring->ready = false;
+ ring->ring = NULL;
+ ring->ring_obj = NULL;
+ mutex_unlock(&rdev->ring_lock);
+
+ if (ring_obj) {
+ r = radeon_bo_reserve(ring_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(ring_obj);
+ radeon_bo_unpin(ring_obj);
+ radeon_bo_unreserve(ring_obj);
+ }
+ radeon_bo_unref(&ring_obj);
+ }
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int ridx = *(int*)node->info_ent->data;
+ struct radeon_ring *ring = &rdev->ring[ridx];
+ unsigned count, i, j;
+ u32 tmp;
+
+ radeon_ring_free_size(rdev, ring);
+ count = (ring->ring_size / 4) - ring->ring_free_dw;
+ tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift;
+ seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
+ tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift;
+ seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
+ if (ring->rptr_save_reg) {
+ seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
+ RREG32(ring->rptr_save_reg));
+ }
+ seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
+ seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
+ seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
+ seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr);
+ seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
+ seq_printf(m, "%u dwords in ring\n", count);
+ /* print 8 dw before current rptr as often it's the last executed
+ * packet that is the root issue
+ */
+ i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
+ if (ring->ready) {
+ for (j = 0; j <= (count + 32); j++) {
+ seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
+ i = (i + 1) & ring->ptr_mask;
+ }
+ }
+ return 0;
+}
+
+static int radeon_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
+static int cayman_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
+static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
+static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX;
+static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
+static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX;
+
+static struct drm_info_list radeon_debugfs_ring_info_list[] = {
+ {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index},
+ {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_cp1_index},
+ {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_cp2_index},
+ {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index},
+ {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index},
+ {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index},
+};
+
+static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
+
+ return 0;
+
+}
+
+static struct drm_info_list radeon_debugfs_sa_list[] = {
+ {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
+};
+
+#endif
+
+static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+#if defined(CONFIG_DEBUG_FS)
+ unsigned i;
+ for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
+ struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
+ int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
+ unsigned r;
+
+ if (&rdev->ring[ridx] != ring)
+ continue;
+
+ r = radeon_debugfs_add_files(rdev, info, 1);
+ if (r)
+ return r;
+ }
+#endif
+ return 0;
+}
+
+static int radeon_debugfs_sa_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
new file mode 100644
index 0000000..f0bac68
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -0,0 +1,420 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Jerome Glisse <glisse@freedesktop.org>
+ */
+/* Algorithm:
+ *
+ * We store the last allocated bo in "hole", we always try to allocate
+ * after the last allocated bo. Principle is that in a linear GPU ring
+ * progression was is after last is the oldest bo we allocated and thus
+ * the first one that should no longer be in use by the GPU.
+ *
+ * If it's not the case we skip over the bo after last to the closest
+ * done bo if such one exist. If none exist and we are not asked to
+ * block we report failure to allocate.
+ *
+ * If we are asked to block we wait on all the oldest fence of all
+ * rings. We just wait for any of those fence to complete.
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
+static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
+
+int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager,
+ unsigned size, u32 align, u32 domain)
+{
+ int i, r;
+
+ init_waitqueue_head(&sa_manager->wq);
+ sa_manager->bo = NULL;
+ sa_manager->size = size;
+ sa_manager->domain = domain;
+ sa_manager->align = align;
+ sa_manager->hole = &sa_manager->olist;
+ INIT_LIST_HEAD(&sa_manager->olist);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ INIT_LIST_HEAD(&sa_manager->flist[i]);
+ }
+
+ r = radeon_bo_create(rdev, size, align, true,
+ domain, NULL, &sa_manager->bo);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
+ return r;
+ }
+
+ return r;
+}
+
+void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager)
+{
+ struct radeon_sa_bo *sa_bo, *tmp;
+
+ if (!list_empty(&sa_manager->olist)) {
+ sa_manager->hole = &sa_manager->olist,
+ radeon_sa_bo_try_free(sa_manager);
+ if (!list_empty(&sa_manager->olist)) {
+ dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
+ }
+ }
+ list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
+ radeon_sa_bo_remove_locked(sa_bo);
+ }
+ radeon_bo_unref(&sa_manager->bo);
+ sa_manager->size = 0;
+}
+
+int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager)
+{
+ int r;
+
+ if (sa_manager->bo == NULL) {
+ dev_err(rdev->dev, "no bo for sa manager\n");
+ return -EINVAL;
+ }
+
+ /* map the buffer */
+ r = radeon_bo_reserve(sa_manager->bo, false);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
+ return r;
+ }
+ r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(sa_manager->bo);
+ dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
+ return r;
+ }
+ r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+ radeon_bo_unreserve(sa_manager->bo);
+ return r;
+}
+
+int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager)
+{
+ int r;
+
+ if (sa_manager->bo == NULL) {
+ dev_err(rdev->dev, "no bo for sa manager\n");
+ return -EINVAL;
+ }
+
+ r = radeon_bo_reserve(sa_manager->bo, false);
+ if (!r) {
+ radeon_bo_kunmap(sa_manager->bo);
+ radeon_bo_unpin(sa_manager->bo);
+ radeon_bo_unreserve(sa_manager->bo);
+ }
+ return r;
+}
+
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
+{
+ struct radeon_sa_manager *sa_manager = sa_bo->manager;
+ if (sa_manager->hole == &sa_bo->olist) {
+ sa_manager->hole = sa_bo->olist.prev;
+ }
+ list_del_init(&sa_bo->olist);
+ list_del_init(&sa_bo->flist);
+ radeon_fence_unref(&sa_bo->fence);
+ kfree(sa_bo);
+}
+
+static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
+{
+ struct radeon_sa_bo *sa_bo, *tmp;
+
+ if (sa_manager->hole->next == &sa_manager->olist)
+ return;
+
+ sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
+ list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
+ if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
+ return;
+ }
+ radeon_sa_bo_remove_locked(sa_bo);
+ }
+}
+
+static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
+{
+ struct list_head *hole = sa_manager->hole;
+
+ if (hole != &sa_manager->olist) {
+ return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
+ }
+ return 0;
+}
+
+static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
+{
+ struct list_head *hole = sa_manager->hole;
+
+ if (hole->next != &sa_manager->olist) {
+ return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
+ }
+ return sa_manager->size;
+}
+
+static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
+ struct radeon_sa_bo *sa_bo,
+ unsigned size, unsigned align)
+{
+ unsigned soffset, eoffset, wasted;
+
+ soffset = radeon_sa_bo_hole_soffset(sa_manager);
+ eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
+ wasted = (align - (soffset % align)) % align;
+
+ if ((eoffset - soffset) >= (size + wasted)) {
+ soffset += wasted;
+
+ sa_bo->manager = sa_manager;
+ sa_bo->soffset = soffset;
+ sa_bo->eoffset = soffset + size;
+ list_add(&sa_bo->olist, sa_manager->hole);
+ INIT_LIST_HEAD(&sa_bo->flist);
+ sa_manager->hole = &sa_bo->olist;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * radeon_sa_event - Check if we can stop waiting
+ *
+ * @sa_manager: pointer to the sa_manager
+ * @size: number of bytes we want to allocate
+ * @align: alignment we need to match
+ *
+ * Check if either there is a fence we can wait for or
+ * enough free memory to satisfy the allocation directly
+ */
+static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
+ unsigned size, unsigned align)
+{
+ unsigned soffset, eoffset, wasted;
+ int i;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!list_empty(&sa_manager->flist[i])) {
+ return true;
+ }
+ }
+
+ soffset = radeon_sa_bo_hole_soffset(sa_manager);
+ eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
+ wasted = (align - (soffset % align)) % align;
+
+ if ((eoffset - soffset) >= (size + wasted)) {
+ return true;
+ }
+
+ return false;
+}
+
+static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
+ struct radeon_fence **fences,
+ unsigned *tries)
+{
+ struct radeon_sa_bo *best_bo = NULL;
+ unsigned i, soffset, best, tmp;
+
+ /* if hole points to the end of the buffer */
+ if (sa_manager->hole->next == &sa_manager->olist) {
+ /* try again with its beginning */
+ sa_manager->hole = &sa_manager->olist;
+ return true;
+ }
+
+ soffset = radeon_sa_bo_hole_soffset(sa_manager);
+ /* to handle wrap around we add sa_manager->size */
+ best = sa_manager->size * 2;
+ /* go over all fence list and try to find the closest sa_bo
+ * of the current last
+ */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_sa_bo *sa_bo;
+
+ if (list_empty(&sa_manager->flist[i])) {
+ continue;
+ }
+
+ sa_bo = list_first_entry(&sa_manager->flist[i],
+ struct radeon_sa_bo, flist);
+
+ if (!radeon_fence_signaled(sa_bo->fence)) {
+ fences[i] = sa_bo->fence;
+ continue;
+ }
+
+ /* limit the number of tries each ring gets */
+ if (tries[i] > 2) {
+ continue;
+ }
+
+ tmp = sa_bo->soffset;
+ if (tmp < soffset) {
+ /* wrap around, pretend it's after */
+ tmp += sa_manager->size;
+ }
+ tmp -= soffset;
+ if (tmp < best) {
+ /* this sa bo is the closest one */
+ best = tmp;
+ best_bo = sa_bo;
+ }
+ }
+
+ if (best_bo) {
+ ++tries[best_bo->fence->ring];
+ sa_manager->hole = best_bo->olist.prev;
+
+ /* we knew that this one is signaled,
+ so it's save to remote it */
+ radeon_sa_bo_remove_locked(best_bo);
+ return true;
+ }
+ return false;
+}
+
+int radeon_sa_bo_new(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager,
+ struct radeon_sa_bo **sa_bo,
+ unsigned size, unsigned align, bool block)
+{
+ struct radeon_fence *fences[RADEON_NUM_RINGS];
+ unsigned tries[RADEON_NUM_RINGS];
+ int i, r;
+
+ BUG_ON(align > sa_manager->align);
+ BUG_ON(size > sa_manager->size);
+
+ *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
+ if ((*sa_bo) == NULL) {
+ return -ENOMEM;
+ }
+ (*sa_bo)->manager = sa_manager;
+ (*sa_bo)->fence = NULL;
+ INIT_LIST_HEAD(&(*sa_bo)->olist);
+ INIT_LIST_HEAD(&(*sa_bo)->flist);
+
+ spin_lock(&sa_manager->wq.lock);
+ do {
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ fences[i] = NULL;
+ tries[i] = 0;
+ }
+
+ do {
+ radeon_sa_bo_try_free(sa_manager);
+
+ if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
+ size, align)) {
+ spin_unlock(&sa_manager->wq.lock);
+ return 0;
+ }
+
+ /* see if we can skip over some allocations */
+ } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+
+ spin_unlock(&sa_manager->wq.lock);
+ r = radeon_fence_wait_any(rdev, fences, false);
+ spin_lock(&sa_manager->wq.lock);
+ /* if we have nothing to wait for block */
+ if (r == -ENOENT && block) {
+ r = wait_event_interruptible_locked(
+ sa_manager->wq,
+ radeon_sa_event(sa_manager, size, align)
+ );
+
+ } else if (r == -ENOENT) {
+ r = -ENOMEM;
+ }
+
+ } while (!r);
+
+ spin_unlock(&sa_manager->wq.lock);
+ kfree(*sa_bo);
+ *sa_bo = NULL;
+ return r;
+}
+
+void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
+ struct radeon_fence *fence)
+{
+ struct radeon_sa_manager *sa_manager;
+
+ if (sa_bo == NULL || *sa_bo == NULL) {
+ return;
+ }
+
+ sa_manager = (*sa_bo)->manager;
+ spin_lock(&sa_manager->wq.lock);
+ if (fence && !radeon_fence_signaled(fence)) {
+ (*sa_bo)->fence = radeon_fence_ref(fence);
+ list_add_tail(&(*sa_bo)->flist,
+ &sa_manager->flist[fence->ring]);
+ } else {
+ radeon_sa_bo_remove_locked(*sa_bo);
+ }
+ wake_up_all_locked(&sa_manager->wq);
+ spin_unlock(&sa_manager->wq.lock);
+ *sa_bo = NULL;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
+ struct seq_file *m)
+{
+ struct radeon_sa_bo *i;
+
+ spin_lock(&sa_manager->wq.lock);
+ list_for_each_entry(i, &sa_manager->olist, olist) {
+ if (&i->olist == sa_manager->hole) {
+ seq_printf(m, ">");
+ } else {
+ seq_printf(m, " ");
+ }
+ seq_printf(m, "[0x%08x 0x%08x] size %8d",
+ i->soffset, i->eoffset, i->eoffset - i->soffset);
+ if (i->fence) {
+ seq_printf(m, " protected by 0x%016llx on ring %d",
+ i->fence->seq, i->fence->ring);
+ }
+ seq_printf(m, "\n");
+ }
+ spin_unlock(&sa_manager->wq.lock);
+}
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
new file mode 100644
index 0000000..8dcc20f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2011 Christian König.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Christian König <deathsimple@vodafone.de>
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+
+
+int radeon_semaphore_create(struct radeon_device *rdev,
+ struct radeon_semaphore **semaphore)
+{
+ int r;
+
+ *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
+ if (*semaphore == NULL) {
+ return -ENOMEM;
+ }
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
+ &(*semaphore)->sa_bo, 8, 8, true);
+ if (r) {
+ kfree(*semaphore);
+ *semaphore = NULL;
+ return r;
+ }
+ (*semaphore)->waiters = 0;
+ (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
+ *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
+ return 0;
+}
+
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+ struct radeon_semaphore *semaphore)
+{
+ --semaphore->waiters;
+ radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
+}
+
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+ struct radeon_semaphore *semaphore)
+{
+ ++semaphore->waiters;
+ radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
+}
+
+/* caller must hold ring lock */
+int radeon_semaphore_sync_rings(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore,
+ int signaler, int waiter)
+{
+ int r;
+
+ /* no need to signal and wait on the same ring */
+ if (signaler == waiter) {
+ return 0;
+ }
+
+ /* prevent GPU deadlocks */
+ if (!rdev->ring[signaler].ready) {
+ dev_err(rdev->dev, "Trying to sync to a disabled ring!");
+ return -EINVAL;
+ }
+
+ r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8);
+ if (r) {
+ return r;
+ }
+ radeon_semaphore_emit_signal(rdev, signaler, semaphore);
+ radeon_ring_commit(rdev, &rdev->ring[signaler]);
+
+ /* we assume caller has already allocated space on waiters ring */
+ radeon_semaphore_emit_wait(rdev, waiter, semaphore);
+
+ /* for debugging lockup only, used by sysfs debug files */
+ rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
+ rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
+
+ return 0;
+}
+
+void radeon_semaphore_free(struct radeon_device *rdev,
+ struct radeon_semaphore **semaphore,
+ struct radeon_fence *fence)
+{
+ if (semaphore == NULL || *semaphore == NULL) {
+ return;
+ }
+ if ((*semaphore)->waiters > 0) {
+ dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
+ " hardware lockup imminent!\n", *semaphore);
+ }
+ radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence);
+ kfree(*semaphore);
+ *semaphore = NULL;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
new file mode 100644
index 0000000..4d20910
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -0,0 +1,3261 @@
+/* radeon_state.c -- State support for Radeon -*- linux-c -*- */
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ * Kevin E. Martin <martin@valinux.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_buffer.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+
+/* ================================================================
+ * Helper functions for client state checking and fixup
+ */
+
+static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
+ dev_priv,
+ struct drm_file * file_priv,
+ u32 *offset)
+{
+ u64 off = *offset;
+ u32 fb_end = dev_priv->fb_location + dev_priv->fb_size - 1;
+ struct drm_radeon_driver_file_fields *radeon_priv;
+
+ /* Hrm ... the story of the offset ... So this function converts
+ * the various ideas of what userland clients might have for an
+ * offset in the card address space into an offset into the card
+ * address space :) So with a sane client, it should just keep
+ * the value intact and just do some boundary checking. However,
+ * not all clients are sane. Some older clients pass us 0 based
+ * offsets relative to the start of the framebuffer and some may
+ * assume the AGP aperture it appended to the framebuffer, so we
+ * try to detect those cases and fix them up.
+ *
+ * Note: It might be a good idea here to make sure the offset lands
+ * in some "allowed" area to protect things like the PCIE GART...
+ */
+
+ /* First, the best case, the offset already lands in either the
+ * framebuffer or the GART mapped space
+ */
+ if (radeon_check_offset(dev_priv, off))
+ return 0;
+
+ /* Ok, that didn't happen... now check if we have a zero based
+ * offset that fits in the framebuffer + gart space, apply the
+ * magic offset we get from SETPARAM or calculated from fb_location
+ */
+ if (off < (dev_priv->fb_size + dev_priv->gart_size)) {
+ radeon_priv = file_priv->driver_priv;
+ off += radeon_priv->radeon_fb_delta;
+ }
+
+ /* Finally, assume we aimed at a GART offset if beyond the fb */
+ if (off > fb_end)
+ off = off - fb_end - 1 + dev_priv->gart_vm_start;
+
+ /* Now recheck and fail if out of bounds */
+ if (radeon_check_offset(dev_priv, off)) {
+ DRM_DEBUG("offset fixed up to 0x%x\n", (unsigned int)off);
+ *offset = off;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
+ dev_priv,
+ struct drm_file *file_priv,
+ int id, struct drm_buffer *buf)
+{
+ u32 *data;
+ switch (id) {
+
+ case RADEON_EMIT_PP_MISC:
+ data = drm_buffer_pointer_to_dword(buf,
+ (RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4);
+
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
+ DRM_ERROR("Invalid depth buffer offset\n");
+ return -EINVAL;
+ }
+ dev_priv->have_z_offset = 1;
+ break;
+
+ case RADEON_EMIT_PP_CNTL:
+ data = drm_buffer_pointer_to_dword(buf,
+ (RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4);
+
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
+ DRM_ERROR("Invalid colour buffer offset\n");
+ return -EINVAL;
+ }
+ break;
+
+ case R200_EMIT_PP_TXOFFSET_0:
+ case R200_EMIT_PP_TXOFFSET_1:
+ case R200_EMIT_PP_TXOFFSET_2:
+ case R200_EMIT_PP_TXOFFSET_3:
+ case R200_EMIT_PP_TXOFFSET_4:
+ case R200_EMIT_PP_TXOFFSET_5:
+ data = drm_buffer_pointer_to_dword(buf, 0);
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
+ DRM_ERROR("Invalid R200 texture offset\n");
+ return -EINVAL;
+ }
+ break;
+
+ case RADEON_EMIT_PP_TXFILTER_0:
+ case RADEON_EMIT_PP_TXFILTER_1:
+ case RADEON_EMIT_PP_TXFILTER_2:
+ data = drm_buffer_pointer_to_dword(buf,
+ (RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4);
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
+ DRM_ERROR("Invalid R100 texture offset\n");
+ return -EINVAL;
+ }
+ break;
+
+ case R200_EMIT_PP_CUBIC_OFFSETS_0:
+ case R200_EMIT_PP_CUBIC_OFFSETS_1:
+ case R200_EMIT_PP_CUBIC_OFFSETS_2:
+ case R200_EMIT_PP_CUBIC_OFFSETS_3:
+ case R200_EMIT_PP_CUBIC_OFFSETS_4:
+ case R200_EMIT_PP_CUBIC_OFFSETS_5:{
+ int i;
+ for (i = 0; i < 5; i++) {
+ data = drm_buffer_pointer_to_dword(buf, i);
+ if (radeon_check_and_fixup_offset(dev_priv,
+ file_priv,
+ data)) {
+ DRM_ERROR
+ ("Invalid R200 cubic texture offset\n");
+ return -EINVAL;
+ }
+ }
+ break;
+ }
+
+ case RADEON_EMIT_PP_CUBIC_OFFSETS_T0:
+ case RADEON_EMIT_PP_CUBIC_OFFSETS_T1:
+ case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
+ int i;
+ for (i = 0; i < 5; i++) {
+ data = drm_buffer_pointer_to_dword(buf, i);
+ if (radeon_check_and_fixup_offset(dev_priv,
+ file_priv,
+ data)) {
+ DRM_ERROR
+ ("Invalid R100 cubic texture offset\n");
+ return -EINVAL;
+ }
+ }
+ }
+ break;
+
+ case R200_EMIT_VAP_CTL:{
+ RING_LOCALS;
+ BEGIN_RING(2);
+ OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
+ ADVANCE_RING();
+ }
+ break;
+
+ case RADEON_EMIT_RB3D_COLORPITCH:
+ case RADEON_EMIT_RE_LINE_PATTERN:
+ case RADEON_EMIT_SE_LINE_WIDTH:
+ case RADEON_EMIT_PP_LUM_MATRIX:
+ case RADEON_EMIT_PP_ROT_MATRIX_0:
+ case RADEON_EMIT_RB3D_STENCILREFMASK:
+ case RADEON_EMIT_SE_VPORT_XSCALE:
+ case RADEON_EMIT_SE_CNTL:
+ case RADEON_EMIT_SE_CNTL_STATUS:
+ case RADEON_EMIT_RE_MISC:
+ case RADEON_EMIT_PP_BORDER_COLOR_0:
+ case RADEON_EMIT_PP_BORDER_COLOR_1:
+ case RADEON_EMIT_PP_BORDER_COLOR_2:
+ case RADEON_EMIT_SE_ZBIAS_FACTOR:
+ case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT:
+ case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED:
+ case R200_EMIT_PP_TXCBLEND_0:
+ case R200_EMIT_PP_TXCBLEND_1:
+ case R200_EMIT_PP_TXCBLEND_2:
+ case R200_EMIT_PP_TXCBLEND_3:
+ case R200_EMIT_PP_TXCBLEND_4:
+ case R200_EMIT_PP_TXCBLEND_5:
+ case R200_EMIT_PP_TXCBLEND_6:
+ case R200_EMIT_PP_TXCBLEND_7:
+ case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
+ case R200_EMIT_TFACTOR_0:
+ case R200_EMIT_VTX_FMT_0:
+ case R200_EMIT_MATRIX_SELECT_0:
+ case R200_EMIT_TEX_PROC_CTL_2:
+ case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
+ case R200_EMIT_PP_TXFILTER_0:
+ case R200_EMIT_PP_TXFILTER_1:
+ case R200_EMIT_PP_TXFILTER_2:
+ case R200_EMIT_PP_TXFILTER_3:
+ case R200_EMIT_PP_TXFILTER_4:
+ case R200_EMIT_PP_TXFILTER_5:
+ case R200_EMIT_VTE_CNTL:
+ case R200_EMIT_OUTPUT_VTX_COMP_SEL:
+ case R200_EMIT_PP_TAM_DEBUG3:
+ case R200_EMIT_PP_CNTL_X:
+ case R200_EMIT_RB3D_DEPTHXY_OFFSET:
+ case R200_EMIT_RE_AUX_SCISSOR_CNTL:
+ case R200_EMIT_RE_SCISSOR_TL_0:
+ case R200_EMIT_RE_SCISSOR_TL_1:
+ case R200_EMIT_RE_SCISSOR_TL_2:
+ case R200_EMIT_SE_VAP_CNTL_STATUS:
+ case R200_EMIT_SE_VTX_STATE_CNTL:
+ case R200_EMIT_RE_POINTSIZE:
+ case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0:
+ case R200_EMIT_PP_CUBIC_FACES_0:
+ case R200_EMIT_PP_CUBIC_FACES_1:
+ case R200_EMIT_PP_CUBIC_FACES_2:
+ case R200_EMIT_PP_CUBIC_FACES_3:
+ case R200_EMIT_PP_CUBIC_FACES_4:
+ case R200_EMIT_PP_CUBIC_FACES_5:
+ case RADEON_EMIT_PP_TEX_SIZE_0:
+ case RADEON_EMIT_PP_TEX_SIZE_1:
+ case RADEON_EMIT_PP_TEX_SIZE_2:
+ case R200_EMIT_RB3D_BLENDCOLOR:
+ case R200_EMIT_TCL_POINT_SPRITE_CNTL:
+ case RADEON_EMIT_PP_CUBIC_FACES_0:
+ case RADEON_EMIT_PP_CUBIC_FACES_1:
+ case RADEON_EMIT_PP_CUBIC_FACES_2:
+ case R200_EMIT_PP_TRI_PERF_CNTL:
+ case R200_EMIT_PP_AFS_0:
+ case R200_EMIT_PP_AFS_1:
+ case R200_EMIT_ATF_TFACTOR:
+ case R200_EMIT_PP_TXCTLALL_0:
+ case R200_EMIT_PP_TXCTLALL_1:
+ case R200_EMIT_PP_TXCTLALL_2:
+ case R200_EMIT_PP_TXCTLALL_3:
+ case R200_EMIT_PP_TXCTLALL_4:
+ case R200_EMIT_PP_TXCTLALL_5:
+ case R200_EMIT_VAP_PVS_CNTL:
+ /* These packets don't contain memory offsets */
+ break;
+
+ default:
+ DRM_ERROR("Unknown state packet ID %d\n", id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int radeon_check_and_fixup_packet3(drm_radeon_private_t *
+ dev_priv,
+ struct drm_file *file_priv,
+ drm_radeon_kcmd_buffer_t *
+ cmdbuf,
+ unsigned int *cmdsz)
+{
+ u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+ u32 offset, narrays;
+ int count, i, k;
+
+ count = ((*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16);
+ *cmdsz = 2 + count;
+
+ if ((*cmd & 0xc0000000) != RADEON_CP_PACKET3) {
+ DRM_ERROR("Not a type 3 packet\n");
+ return -EINVAL;
+ }
+
+ if (4 * *cmdsz > drm_buffer_unprocessed(cmdbuf->buffer)) {
+ DRM_ERROR("Packet size larger than size of data provided\n");
+ return -EINVAL;
+ }
+
+ switch (*cmd & 0xff00) {
+ /* XXX Are there old drivers needing other packets? */
+
+ case RADEON_3D_DRAW_IMMD:
+ case RADEON_3D_DRAW_VBUF:
+ case RADEON_3D_DRAW_INDX:
+ case RADEON_WAIT_FOR_IDLE:
+ case RADEON_CP_NOP:
+ case RADEON_3D_CLEAR_ZMASK:
+/* case RADEON_CP_NEXT_CHAR:
+ case RADEON_CP_PLY_NEXTSCAN:
+ case RADEON_CP_SET_SCISSORS: */ /* probably safe but will never need them? */
+ /* these packets are safe */
+ break;
+
+ case RADEON_CP_3D_DRAW_IMMD_2:
+ case RADEON_CP_3D_DRAW_VBUF_2:
+ case RADEON_CP_3D_DRAW_INDX_2:
+ case RADEON_3D_CLEAR_HIZ:
+ /* safe but r200 only */
+ if (dev_priv->microcode_version != UCODE_R200) {
+ DRM_ERROR("Invalid 3d packet for r100-class chip\n");
+ return -EINVAL;
+ }
+ break;
+
+ case RADEON_3D_LOAD_VBPNTR:
+
+ if (count > 18) { /* 12 arrays max */
+ DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
+ count);
+ return -EINVAL;
+ }
+
+ /* carefully check packet contents */
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+
+ narrays = *cmd & ~0xc000;
+ k = 0;
+ i = 2;
+ while ((k < narrays) && (i < (count + 2))) {
+ i++; /* skip attribute field */
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+ cmd)) {
+ DRM_ERROR
+ ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
+ k, i);
+ return -EINVAL;
+ }
+ k++;
+ i++;
+ if (k == narrays)
+ break;
+ /* have one more to process, they come in pairs */
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+
+ if (radeon_check_and_fixup_offset(dev_priv,
+ file_priv, cmd))
+ {
+ DRM_ERROR
+ ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
+ k, i);
+ return -EINVAL;
+ }
+ k++;
+ i++;
+ }
+ /* do the counts match what we expect ? */
+ if ((k != narrays) || (i != (count + 2))) {
+ DRM_ERROR
+ ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
+ k, i, narrays, count + 1);
+ return -EINVAL;
+ }
+ break;
+
+ case RADEON_3D_RNDR_GEN_INDX_PRIM:
+ if (dev_priv->microcode_version != UCODE_R100) {
+ DRM_ERROR("Invalid 3d packet for r200-class chip\n");
+ return -EINVAL;
+ }
+
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
+ DRM_ERROR("Invalid rndr_gen_indx offset\n");
+ return -EINVAL;
+ }
+ break;
+
+ case RADEON_CP_INDX_BUFFER:
+ if (dev_priv->microcode_version != UCODE_R200) {
+ DRM_ERROR("Invalid 3d packet for r100-class chip\n");
+ return -EINVAL;
+ }
+
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ if ((*cmd & 0x8000ffff) != 0x80000810) {
+ DRM_ERROR("Invalid indx_buffer reg address %08X\n", *cmd);
+ return -EINVAL;
+ }
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
+ DRM_ERROR("Invalid indx_buffer offset is %08X\n", *cmd);
+ return -EINVAL;
+ }
+ break;
+
+ case RADEON_CNTL_HOSTDATA_BLT:
+ case RADEON_CNTL_PAINT_MULTI:
+ case RADEON_CNTL_BITBLT_MULTI:
+ /* MSB of opcode: next DWORD GUI_CNTL */
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ if (*cmd & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+ | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+ u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+ offset = *cmd2 << 10;
+ if (radeon_check_and_fixup_offset
+ (dev_priv, file_priv, &offset)) {
+ DRM_ERROR("Invalid first packet offset\n");
+ return -EINVAL;
+ }
+ *cmd2 = (*cmd2 & 0xffc00000) | offset >> 10;
+ }
+
+ if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+ (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+ u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
+ offset = *cmd3 << 10;
+ if (radeon_check_and_fixup_offset
+ (dev_priv, file_priv, &offset)) {
+ DRM_ERROR("Invalid second packet offset\n");
+ return -EINVAL;
+ }
+ *cmd3 = (*cmd3 & 0xffc00000) | offset >> 10;
+ }
+ break;
+
+ default:
+ DRM_ERROR("Invalid packet type %x\n", *cmd & 0xff00);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* ================================================================
+ * CP hardware state programming functions
+ */
+
+static void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
+ struct drm_clip_rect * box)
+{
+ RING_LOCALS;
+
+ DRM_DEBUG(" box: x1=%d y1=%d x2=%d y2=%d\n",
+ box->x1, box->y1, box->x2, box->y2);
+
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
+ OUT_RING((box->y1 << 16) | box->x1);
+ OUT_RING(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
+ OUT_RING(((box->y2 - 1) << 16) | (box->x2 - 1));
+ ADVANCE_RING();
+}
+
+/* Emit 1.1 state
+ */
+static int radeon_emit_state(drm_radeon_private_t * dev_priv,
+ struct drm_file *file_priv,
+ drm_radeon_context_regs_t * ctx,
+ drm_radeon_texture_regs_t * tex,
+ unsigned int dirty)
+{
+ RING_LOCALS;
+ DRM_DEBUG("dirty=0x%08x\n", dirty);
+
+ if (dirty & RADEON_UPLOAD_CONTEXT) {
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+ &ctx->rb3d_depthoffset)) {
+ DRM_ERROR("Invalid depth buffer offset\n");
+ return -EINVAL;
+ }
+
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+ &ctx->rb3d_coloroffset)) {
+ DRM_ERROR("Invalid depth buffer offset\n");
+ return -EINVAL;
+ }
+
+ BEGIN_RING(14);
+ OUT_RING(CP_PACKET0(RADEON_PP_MISC, 6));
+ OUT_RING(ctx->pp_misc);
+ OUT_RING(ctx->pp_fog_color);
+ OUT_RING(ctx->re_solid_color);
+ OUT_RING(ctx->rb3d_blendcntl);
+ OUT_RING(ctx->rb3d_depthoffset);
+ OUT_RING(ctx->rb3d_depthpitch);
+ OUT_RING(ctx->rb3d_zstencilcntl);
+ OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 2));
+ OUT_RING(ctx->pp_cntl);
+ OUT_RING(ctx->rb3d_cntl);
+ OUT_RING(ctx->rb3d_coloroffset);
+ OUT_RING(CP_PACKET0(RADEON_RB3D_COLORPITCH, 0));
+ OUT_RING(ctx->rb3d_colorpitch);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_VERTFMT) {
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(RADEON_SE_COORD_FMT, 0));
+ OUT_RING(ctx->se_coord_fmt);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_LINE) {
+ BEGIN_RING(5);
+ OUT_RING(CP_PACKET0(RADEON_RE_LINE_PATTERN, 1));
+ OUT_RING(ctx->re_line_pattern);
+ OUT_RING(ctx->re_line_state);
+ OUT_RING(CP_PACKET0(RADEON_SE_LINE_WIDTH, 0));
+ OUT_RING(ctx->se_line_width);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_BUMPMAP) {
+ BEGIN_RING(5);
+ OUT_RING(CP_PACKET0(RADEON_PP_LUM_MATRIX, 0));
+ OUT_RING(ctx->pp_lum_matrix);
+ OUT_RING(CP_PACKET0(RADEON_PP_ROT_MATRIX_0, 1));
+ OUT_RING(ctx->pp_rot_matrix_0);
+ OUT_RING(ctx->pp_rot_matrix_1);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_MASKS) {
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET0(RADEON_RB3D_STENCILREFMASK, 2));
+ OUT_RING(ctx->rb3d_stencilrefmask);
+ OUT_RING(ctx->rb3d_ropcntl);
+ OUT_RING(ctx->rb3d_planemask);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_VIEWPORT) {
+ BEGIN_RING(7);
+ OUT_RING(CP_PACKET0(RADEON_SE_VPORT_XSCALE, 5));
+ OUT_RING(ctx->se_vport_xscale);
+ OUT_RING(ctx->se_vport_xoffset);
+ OUT_RING(ctx->se_vport_yscale);
+ OUT_RING(ctx->se_vport_yoffset);
+ OUT_RING(ctx->se_vport_zscale);
+ OUT_RING(ctx->se_vport_zoffset);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_SETUP) {
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET0(RADEON_SE_CNTL, 0));
+ OUT_RING(ctx->se_cntl);
+ OUT_RING(CP_PACKET0(RADEON_SE_CNTL_STATUS, 0));
+ OUT_RING(ctx->se_cntl_status);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_MISC) {
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(RADEON_RE_MISC, 0));
+ OUT_RING(ctx->re_misc);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_TEX0) {
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+ &tex[0].pp_txoffset)) {
+ DRM_ERROR("Invalid texture offset for unit 0\n");
+ return -EINVAL;
+ }
+
+ BEGIN_RING(9);
+ OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_0, 5));
+ OUT_RING(tex[0].pp_txfilter);
+ OUT_RING(tex[0].pp_txformat);
+ OUT_RING(tex[0].pp_txoffset);
+ OUT_RING(tex[0].pp_txcblend);
+ OUT_RING(tex[0].pp_txablend);
+ OUT_RING(tex[0].pp_tfactor);
+ OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_0, 0));
+ OUT_RING(tex[0].pp_border_color);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_TEX1) {
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+ &tex[1].pp_txoffset)) {
+ DRM_ERROR("Invalid texture offset for unit 1\n");
+ return -EINVAL;
+ }
+
+ BEGIN_RING(9);
+ OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_1, 5));
+ OUT_RING(tex[1].pp_txfilter);
+ OUT_RING(tex[1].pp_txformat);
+ OUT_RING(tex[1].pp_txoffset);
+ OUT_RING(tex[1].pp_txcblend);
+ OUT_RING(tex[1].pp_txablend);
+ OUT_RING(tex[1].pp_tfactor);
+ OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_1, 0));
+ OUT_RING(tex[1].pp_border_color);
+ ADVANCE_RING();
+ }
+
+ if (dirty & RADEON_UPLOAD_TEX2) {
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+ &tex[2].pp_txoffset)) {
+ DRM_ERROR("Invalid texture offset for unit 2\n");
+ return -EINVAL;
+ }
+
+ BEGIN_RING(9);
+ OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_2, 5));
+ OUT_RING(tex[2].pp_txfilter);
+ OUT_RING(tex[2].pp_txformat);
+ OUT_RING(tex[2].pp_txoffset);
+ OUT_RING(tex[2].pp_txcblend);
+ OUT_RING(tex[2].pp_txablend);
+ OUT_RING(tex[2].pp_tfactor);
+ OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_2, 0));
+ OUT_RING(tex[2].pp_border_color);
+ ADVANCE_RING();
+ }
+
+ return 0;
+}
+
+/* Emit 1.2 state
+ */
+static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
+ struct drm_file *file_priv,
+ drm_radeon_state_t * state)
+{
+ RING_LOCALS;
+
+ if (state->dirty & RADEON_UPLOAD_ZBIAS) {
+ BEGIN_RING(3);
+ OUT_RING(CP_PACKET0(RADEON_SE_ZBIAS_FACTOR, 1));
+ OUT_RING(state->context2.se_zbias_factor);
+ OUT_RING(state->context2.se_zbias_constant);
+ ADVANCE_RING();
+ }
+
+ return radeon_emit_state(dev_priv, file_priv, &state->context,
+ state->tex, state->dirty);
+}
+
+/* New (1.3) state mechanism. 3 commands (packet, scalar, vector) in
+ * 1.3 cmdbuffers allow all previous state to be updated as well as
+ * the tcl scalar and vector areas.
+ */
+static struct {
+ int start;
+ int len;
+ const char *name;
+} packet[RADEON_MAX_STATE_PACKETS] = {
+ {RADEON_PP_MISC, 7, "RADEON_PP_MISC"},
+ {RADEON_PP_CNTL, 3, "RADEON_PP_CNTL"},
+ {RADEON_RB3D_COLORPITCH, 1, "RADEON_RB3D_COLORPITCH"},
+ {RADEON_RE_LINE_PATTERN, 2, "RADEON_RE_LINE_PATTERN"},
+ {RADEON_SE_LINE_WIDTH, 1, "RADEON_SE_LINE_WIDTH"},
+ {RADEON_PP_LUM_MATRIX, 1, "RADEON_PP_LUM_MATRIX"},
+ {RADEON_PP_ROT_MATRIX_0, 2, "RADEON_PP_ROT_MATRIX_0"},
+ {RADEON_RB3D_STENCILREFMASK, 3, "RADEON_RB3D_STENCILREFMASK"},
+ {RADEON_SE_VPORT_XSCALE, 6, "RADEON_SE_VPORT_XSCALE"},
+ {RADEON_SE_CNTL, 2, "RADEON_SE_CNTL"},
+ {RADEON_SE_CNTL_STATUS, 1, "RADEON_SE_CNTL_STATUS"},
+ {RADEON_RE_MISC, 1, "RADEON_RE_MISC"},
+ {RADEON_PP_TXFILTER_0, 6, "RADEON_PP_TXFILTER_0"},
+ {RADEON_PP_BORDER_COLOR_0, 1, "RADEON_PP_BORDER_COLOR_0"},
+ {RADEON_PP_TXFILTER_1, 6, "RADEON_PP_TXFILTER_1"},
+ {RADEON_PP_BORDER_COLOR_1, 1, "RADEON_PP_BORDER_COLOR_1"},
+ {RADEON_PP_TXFILTER_2, 6, "RADEON_PP_TXFILTER_2"},
+ {RADEON_PP_BORDER_COLOR_2, 1, "RADEON_PP_BORDER_COLOR_2"},
+ {RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"},
+ {RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"},
+ {RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17,
+ "RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"},
+ {R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"},
+ {R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"},
+ {R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"},
+ {R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3"},
+ {R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4"},
+ {R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5"},
+ {R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6"},
+ {R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7"},
+ {R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0"},
+ {R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0"},
+ {R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0"},
+ {R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL"},
+ {R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0"},
+ {R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2"},
+ {R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL"},
+ {R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0"},
+ {R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1"},
+ {R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2"},
+ {R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3"},
+ {R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4"},
+ {R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5"},
+ {R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0"},
+ {R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1"},
+ {R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2"},
+ {R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3"},
+ {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
+ {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
+ {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
+ {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
+ "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
+ {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
+ {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
+ {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
+ {R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL"},
+ {R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0"},
+ {R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1"},
+ {R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2"},
+ {R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS"},
+ {R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"},
+ {R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"},
+ {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
+ "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
+ {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */
+ {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
+ {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
+ {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
+ {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
+ {R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2"},
+ {R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3"},
+ {R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3"},
+ {R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4"},
+ {R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4"},
+ {R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5"},
+ {R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5"},
+ {RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0"},
+ {RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1"},
+ {RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2"},
+ {R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR"},
+ {R200_SE_TCL_POINT_SPRITE_CNTL, 1, "R200_SE_TCL_POINT_SPRITE_CNTL"},
+ {RADEON_PP_CUBIC_FACES_0, 1, "RADEON_PP_CUBIC_FACES_0"},
+ {RADEON_PP_CUBIC_OFFSET_T0_0, 5, "RADEON_PP_CUBIC_OFFSET_T0_0"},
+ {RADEON_PP_CUBIC_FACES_1, 1, "RADEON_PP_CUBIC_FACES_1"},
+ {RADEON_PP_CUBIC_OFFSET_T1_0, 5, "RADEON_PP_CUBIC_OFFSET_T1_0"},
+ {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
+ {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
+ {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
+ {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */
+ {R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
+ {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
+ {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
+ {R200_PP_TXFILTER_1, 8, "R200_PP_TXCTLALL_1"},
+ {R200_PP_TXFILTER_2, 8, "R200_PP_TXCTLALL_2"},
+ {R200_PP_TXFILTER_3, 8, "R200_PP_TXCTLALL_3"},
+ {R200_PP_TXFILTER_4, 8, "R200_PP_TXCTLALL_4"},
+ {R200_PP_TXFILTER_5, 8, "R200_PP_TXCTLALL_5"},
+ {R200_VAP_PVS_CNTL_1, 2, "R200_VAP_PVS_CNTL"},
+};
+
+/* ================================================================
+ * Performance monitoring functions
+ */
+
+static void radeon_clear_box(drm_radeon_private_t * dev_priv,
+ struct drm_radeon_master_private *master_priv,
+ int x, int y, int w, int h, int r, int g, int b)
+{
+ u32 color;
+ RING_LOCALS;
+
+ x += master_priv->sarea_priv->boxes[0].x1;
+ y += master_priv->sarea_priv->boxes[0].y1;
+
+ switch (dev_priv->color_fmt) {
+ case RADEON_COLOR_FORMAT_RGB565:
+ color = (((r & 0xf8) << 8) |
+ ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
+ break;
+ case RADEON_COLOR_FORMAT_ARGB8888:
+ default:
+ color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
+ break;
+ }
+
+ BEGIN_RING(4);
+ RADEON_WAIT_UNTIL_3D_IDLE();
+ OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
+ OUT_RING(0xffffffff);
+ ADVANCE_RING();
+
+ BEGIN_RING(6);
+
+ OUT_RING(CP_PACKET3(RADEON_CNTL_PAINT_MULTI, 4));
+ OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_SOLID_COLOR |
+ (dev_priv->color_fmt << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS);
+
+ if (master_priv->sarea_priv->pfCurrentPage == 1) {
+ OUT_RING(dev_priv->front_pitch_offset);
+ } else {
+ OUT_RING(dev_priv->back_pitch_offset);
+ }
+
+ OUT_RING(color);
+
+ OUT_RING((x << 16) | y);
+ OUT_RING((w << 16) | h);
+
+ ADVANCE_RING();
+}
+
+static void radeon_cp_performance_boxes(drm_radeon_private_t *dev_priv, struct drm_radeon_master_private *master_priv)
+{
+ /* Collapse various things into a wait flag -- trying to
+ * guess if userspase slept -- better just to have them tell us.
+ */
+ if (dev_priv->stats.last_frame_reads > 1 ||
+ dev_priv->stats.last_clear_reads > dev_priv->stats.clears) {
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+ }
+
+ if (dev_priv->stats.freelist_loops) {
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+ }
+
+ /* Purple box for page flipping
+ */
+ if (dev_priv->stats.boxes & RADEON_BOX_FLIP)
+ radeon_clear_box(dev_priv, master_priv, 4, 4, 8, 8, 255, 0, 255);
+
+ /* Red box if we have to wait for idle at any point
+ */
+ if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE)
+ radeon_clear_box(dev_priv, master_priv, 16, 4, 8, 8, 255, 0, 0);
+
+ /* Blue box: lost context?
+ */
+
+ /* Yellow box for texture swaps
+ */
+ if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD)
+ radeon_clear_box(dev_priv, master_priv, 40, 4, 8, 8, 255, 255, 0);
+
+ /* Green box if hardware never idles (as far as we can tell)
+ */
+ if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE))
+ radeon_clear_box(dev_priv, master_priv, 64, 4, 8, 8, 0, 255, 0);
+
+ /* Draw bars indicating number of buffers allocated
+ * (not a great measure, easily confused)
+ */
+ if (dev_priv->stats.requested_bufs) {
+ if (dev_priv->stats.requested_bufs > 100)
+ dev_priv->stats.requested_bufs = 100;
+
+ radeon_clear_box(dev_priv, master_priv, 4, 16,
+ dev_priv->stats.requested_bufs, 4,
+ 196, 128, 128);
+ }
+
+ memset(&dev_priv->stats, 0, sizeof(dev_priv->stats));
+
+}
+
+/* ================================================================
+ * CP command dispatch functions
+ */
+
+static void radeon_cp_dispatch_clear(struct drm_device * dev,
+ struct drm_master *master,
+ drm_radeon_clear_t * clear,
+ drm_radeon_clear_rect_t * depth_boxes)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+ drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
+ int nbox = sarea_priv->nbox;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
+ unsigned int flags = clear->flags;
+ u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0;
+ int i;
+ RING_LOCALS;
+ DRM_DEBUG("flags = 0x%x\n", flags);
+
+ dev_priv->stats.clears++;
+
+ if (sarea_priv->pfCurrentPage == 1) {
+ unsigned int tmp = flags;
+
+ flags &= ~(RADEON_FRONT | RADEON_BACK);
+ if (tmp & RADEON_FRONT)
+ flags |= RADEON_BACK;
+ if (tmp & RADEON_BACK)
+ flags |= RADEON_FRONT;
+ }
+ if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
+ if (!dev_priv->have_z_offset) {
+ printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
+ flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+ }
+ }
+
+ if (flags & (RADEON_FRONT | RADEON_BACK)) {
+
+ BEGIN_RING(4);
+
+ /* Ensure the 3D stream is idle before doing a
+ * 2D fill to clear the front or back buffer.
+ */
+ RADEON_WAIT_UNTIL_3D_IDLE();
+
+ OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
+ OUT_RING(clear->color_mask);
+
+ ADVANCE_RING();
+
+ /* Make sure we restore the 3D state next time.
+ */
+ sarea_priv->ctx_owner = 0;
+
+ for (i = 0; i < nbox; i++) {
+ int x = pbox[i].x1;
+ int y = pbox[i].y1;
+ int w = pbox[i].x2 - x;
+ int h = pbox[i].y2 - y;
+
+ DRM_DEBUG("%d,%d-%d,%d flags 0x%x\n",
+ x, y, w, h, flags);
+
+ if (flags & RADEON_FRONT) {
+ BEGIN_RING(6);
+
+ OUT_RING(CP_PACKET3
+ (RADEON_CNTL_PAINT_MULTI, 4));
+ OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_SOLID_COLOR |
+ (dev_priv->
+ color_fmt << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_P |
+ RADEON_GMC_CLR_CMP_CNTL_DIS);
+
+ OUT_RING(dev_priv->front_pitch_offset);
+ OUT_RING(clear->clear_color);
+
+ OUT_RING((x << 16) | y);
+ OUT_RING((w << 16) | h);
+
+ ADVANCE_RING();
+ }
+
+ if (flags & RADEON_BACK) {
+ BEGIN_RING(6);
+
+ OUT_RING(CP_PACKET3
+ (RADEON_CNTL_PAINT_MULTI, 4));
+ OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_SOLID_COLOR |
+ (dev_priv->
+ color_fmt << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_P |
+ RADEON_GMC_CLR_CMP_CNTL_DIS);
+
+ OUT_RING(dev_priv->back_pitch_offset);
+ OUT_RING(clear->clear_color);
+
+ OUT_RING((x << 16) | y);
+ OUT_RING((w << 16) | h);
+
+ ADVANCE_RING();
+ }
+ }
+ }
+
+ /* hyper z clear */
+ /* no docs available, based on reverse engineering by Stephane Marchesin */
+ if ((flags & (RADEON_DEPTH | RADEON_STENCIL))
+ && (flags & RADEON_CLEAR_FASTZ)) {
+
+ int i;
+ int depthpixperline =
+ dev_priv->depth_fmt ==
+ RADEON_DEPTH_FORMAT_16BIT_INT_Z ? (dev_priv->depth_pitch /
+ 2) : (dev_priv->
+ depth_pitch / 4);
+
+ u32 clearmask;
+
+ u32 tempRB3D_DEPTHCLEARVALUE = clear->clear_depth |
+ ((clear->depth_mask & 0xff) << 24);
+
+ /* Make sure we restore the 3D state next time.
+ * we haven't touched any "normal" state - still need this?
+ */
+ sarea_priv->ctx_owner = 0;
+
+ if ((dev_priv->flags & RADEON_HAS_HIERZ)
+ && (flags & RADEON_USE_HIERZ)) {
+ /* FIXME : reverse engineer that for Rx00 cards */
+ /* FIXME : the mask supposedly contains low-res z values. So can't set
+ just to the max (0xff? or actually 0x3fff?), need to take z clear
+ value into account? */
+ /* pattern seems to work for r100, though get slight
+ rendering errors with glxgears. If hierz is not enabled for r100,
+ only 4 bits which indicate clear (15,16,31,32, all zero) matter, the
+ other ones are ignored, and the same clear mask can be used. That's
+ very different behaviour than R200 which needs different clear mask
+ and different number of tiles to clear if hierz is enabled or not !?!
+ */
+ clearmask = (0xff << 22) | (0xff << 6) | 0x003f003f;
+ } else {
+ /* clear mask : chooses the clearing pattern.
+ rv250: could be used to clear only parts of macrotiles
+ (but that would get really complicated...)?
+ bit 0 and 1 (either or both of them ?!?!) are used to
+ not clear tile (or maybe one of the bits indicates if the tile is
+ compressed or not), bit 2 and 3 to not clear tile 1,...,.
+ Pattern is as follows:
+ | 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29|
+ bits -------------------------------------------------
+ | 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31|
+ rv100: clearmask covers 2x8 4x1 tiles, but one clear still
+ covers 256 pixels ?!?
+ */
+ clearmask = 0x0;
+ }
+
+ BEGIN_RING(8);
+ RADEON_WAIT_UNTIL_2D_IDLE();
+ OUT_RING_REG(RADEON_RB3D_DEPTHCLEARVALUE,
+ tempRB3D_DEPTHCLEARVALUE);
+ /* what offset is this exactly ? */
+ OUT_RING_REG(RADEON_RB3D_ZMASKOFFSET, 0);
+ /* need ctlstat, otherwise get some strange black flickering */
+ OUT_RING_REG(RADEON_RB3D_ZCACHE_CTLSTAT,
+ RADEON_RB3D_ZC_FLUSH_ALL);
+ ADVANCE_RING();
+
+ for (i = 0; i < nbox; i++) {
+ int tileoffset, nrtilesx, nrtilesy, j;
+ /* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */
+ if ((dev_priv->flags & RADEON_HAS_HIERZ)
+ && !(dev_priv->microcode_version == UCODE_R200)) {
+ /* FIXME : figure this out for r200 (when hierz is enabled). Or
+ maybe r200 actually doesn't need to put the low-res z value into
+ the tile cache like r100, but just needs to clear the hi-level z-buffer?
+ Works for R100, both with hierz and without.
+ R100 seems to operate on 2x1 8x8 tiles, but...
+ odd: offset/nrtiles need to be 64 pix (4 block) aligned? Potentially
+ problematic with resolutions which are not 64 pix aligned? */
+ tileoffset =
+ ((pbox[i].y1 >> 3) * depthpixperline +
+ pbox[i].x1) >> 6;
+ nrtilesx =
+ ((pbox[i].x2 & ~63) -
+ (pbox[i].x1 & ~63)) >> 4;
+ nrtilesy =
+ (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
+ for (j = 0; j <= nrtilesy; j++) {
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET3
+ (RADEON_3D_CLEAR_ZMASK, 2));
+ /* first tile */
+ OUT_RING(tileoffset * 8);
+ /* the number of tiles to clear */
+ OUT_RING(nrtilesx + 4);
+ /* clear mask : chooses the clearing pattern. */
+ OUT_RING(clearmask);
+ ADVANCE_RING();
+ tileoffset += depthpixperline >> 6;
+ }
+ } else if (dev_priv->microcode_version == UCODE_R200) {
+ /* works for rv250. */
+ /* find first macro tile (8x2 4x4 z-pixels on rv250) */
+ tileoffset =
+ ((pbox[i].y1 >> 3) * depthpixperline +
+ pbox[i].x1) >> 5;
+ nrtilesx =
+ (pbox[i].x2 >> 5) - (pbox[i].x1 >> 5);
+ nrtilesy =
+ (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
+ for (j = 0; j <= nrtilesy; j++) {
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET3
+ (RADEON_3D_CLEAR_ZMASK, 2));
+ /* first tile */
+ /* judging by the first tile offset needed, could possibly
+ directly address/clear 4x4 tiles instead of 8x2 * 4x4
+ macro tiles, though would still need clear mask for
+ right/bottom if truly 4x4 granularity is desired ? */
+ OUT_RING(tileoffset * 16);
+ /* the number of tiles to clear */
+ OUT_RING(nrtilesx + 1);
+ /* clear mask : chooses the clearing pattern. */
+ OUT_RING(clearmask);
+ ADVANCE_RING();
+ tileoffset += depthpixperline >> 5;
+ }
+ } else { /* rv 100 */
+ /* rv100 might not need 64 pix alignment, who knows */
+ /* offsets are, hmm, weird */
+ tileoffset =
+ ((pbox[i].y1 >> 4) * depthpixperline +
+ pbox[i].x1) >> 6;
+ nrtilesx =
+ ((pbox[i].x2 & ~63) -
+ (pbox[i].x1 & ~63)) >> 4;
+ nrtilesy =
+ (pbox[i].y2 >> 4) - (pbox[i].y1 >> 4);
+ for (j = 0; j <= nrtilesy; j++) {
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET3
+ (RADEON_3D_CLEAR_ZMASK, 2));
+ OUT_RING(tileoffset * 128);
+ /* the number of tiles to clear */
+ OUT_RING(nrtilesx + 4);
+ /* clear mask : chooses the clearing pattern. */
+ OUT_RING(clearmask);
+ ADVANCE_RING();
+ tileoffset += depthpixperline >> 6;
+ }
+ }
+ }
+
+ /* TODO don't always clear all hi-level z tiles */
+ if ((dev_priv->flags & RADEON_HAS_HIERZ)
+ && (dev_priv->microcode_version == UCODE_R200)
+ && (flags & RADEON_USE_HIERZ))
+ /* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
+ /* FIXME : the mask supposedly contains low-res z values. So can't set
+ just to the max (0xff? or actually 0x3fff?), need to take z clear
+ value into account? */
+ {
+ BEGIN_RING(4);
+ OUT_RING(CP_PACKET3(RADEON_3D_CLEAR_HIZ, 2));
+ OUT_RING(0x0); /* First tile */
+ OUT_RING(0x3cc0);
+ OUT_RING((0xff << 22) | (0xff << 6) | 0x003f003f);
+ ADVANCE_RING();
+ }
+ }
+
+ /* We have to clear the depth and/or stencil buffers by
+ * rendering a quad into just those buffers. Thus, we have to
+ * make sure the 3D engine is configured correctly.
+ */
+ else if ((dev_priv->microcode_version == UCODE_R200) &&
+ (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
+
+ int tempPP_CNTL;
+ int tempRE_CNTL;
+ int tempRB3D_CNTL;
+ int tempRB3D_ZSTENCILCNTL;
+ int tempRB3D_STENCILREFMASK;
+ int tempRB3D_PLANEMASK;
+ int tempSE_CNTL;
+ int tempSE_VTE_CNTL;
+ int tempSE_VTX_FMT_0;
+ int tempSE_VTX_FMT_1;
+ int tempSE_VAP_CNTL;
+ int tempRE_AUX_SCISSOR_CNTL;
+
+ tempPP_CNTL = 0;
+ tempRE_CNTL = 0;
+
+ tempRB3D_CNTL = depth_clear->rb3d_cntl;
+
+ tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
+ tempRB3D_STENCILREFMASK = 0x0;
+
+ tempSE_CNTL = depth_clear->se_cntl;
+
+ /* Disable TCL */
+
+ tempSE_VAP_CNTL = ( /* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK | */
+ (0x9 <<
+ SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));
+
+ tempRB3D_PLANEMASK = 0x0;
+
+ tempRE_AUX_SCISSOR_CNTL = 0x0;
+
+ tempSE_VTE_CNTL =
+ SE_VTE_CNTL__VTX_XY_FMT_MASK | SE_VTE_CNTL__VTX_Z_FMT_MASK;
+
+ /* Vertex format (X, Y, Z, W) */
+ tempSE_VTX_FMT_0 =
+ SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK |
+ SE_VTX_FMT_0__VTX_W0_PRESENT_MASK;
+ tempSE_VTX_FMT_1 = 0x0;
+
+ /*
+ * Depth buffer specific enables
+ */
+ if (flags & RADEON_DEPTH) {
+ /* Enable depth buffer */
+ tempRB3D_CNTL |= RADEON_Z_ENABLE;
+ } else {
+ /* Disable depth buffer */
+ tempRB3D_CNTL &= ~RADEON_Z_ENABLE;
+ }
+
+ /*
+ * Stencil buffer specific enables
+ */
+ if (flags & RADEON_STENCIL) {
+ tempRB3D_CNTL |= RADEON_STENCIL_ENABLE;
+ tempRB3D_STENCILREFMASK = clear->depth_mask;
+ } else {
+ tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE;
+ tempRB3D_STENCILREFMASK = 0x00000000;
+ }
+
+ if (flags & RADEON_USE_COMP_ZBUF) {
+ tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
+ RADEON_Z_DECOMPRESSION_ENABLE;
+ }
+ if (flags & RADEON_USE_HIERZ) {
+ tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
+ }
+
+ BEGIN_RING(26);
+ RADEON_WAIT_UNTIL_2D_IDLE();
+
+ OUT_RING_REG(RADEON_PP_CNTL, tempPP_CNTL);
+ OUT_RING_REG(R200_RE_CNTL, tempRE_CNTL);
+ OUT_RING_REG(RADEON_RB3D_CNTL, tempRB3D_CNTL);
+ OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
+ OUT_RING_REG(RADEON_RB3D_STENCILREFMASK,
+ tempRB3D_STENCILREFMASK);
+ OUT_RING_REG(RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK);
+ OUT_RING_REG(RADEON_SE_CNTL, tempSE_CNTL);
+ OUT_RING_REG(R200_SE_VTE_CNTL, tempSE_VTE_CNTL);
+ OUT_RING_REG(R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0);
+ OUT_RING_REG(R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1);
+ OUT_RING_REG(R200_SE_VAP_CNTL, tempSE_VAP_CNTL);
+ OUT_RING_REG(R200_RE_AUX_SCISSOR_CNTL, tempRE_AUX_SCISSOR_CNTL);
+ ADVANCE_RING();
+
+ /* Make sure we restore the 3D state next time.
+ */
+ sarea_priv->ctx_owner = 0;
+
+ for (i = 0; i < nbox; i++) {
+
+ /* Funny that this should be required --
+ * sets top-left?
+ */
+ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+
+ BEGIN_RING(14);
+ OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 12));
+ OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
+ RADEON_PRIM_WALK_RING |
+ (3 << RADEON_NUM_VERTICES_SHIFT)));
+ OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+ OUT_RING(0x3f800000);
+ OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+ OUT_RING(0x3f800000);
+ OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+ OUT_RING(0x3f800000);
+ ADVANCE_RING();
+ }
+ } else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) {
+
+ int tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
+
+ rb3d_cntl = depth_clear->rb3d_cntl;
+
+ if (flags & RADEON_DEPTH) {
+ rb3d_cntl |= RADEON_Z_ENABLE;
+ } else {
+ rb3d_cntl &= ~RADEON_Z_ENABLE;
+ }
+
+ if (flags & RADEON_STENCIL) {
+ rb3d_cntl |= RADEON_STENCIL_ENABLE;
+ rb3d_stencilrefmask = clear->depth_mask; /* misnamed field */
+ } else {
+ rb3d_cntl &= ~RADEON_STENCIL_ENABLE;
+ rb3d_stencilrefmask = 0x00000000;
+ }
+
+ if (flags & RADEON_USE_COMP_ZBUF) {
+ tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
+ RADEON_Z_DECOMPRESSION_ENABLE;
+ }
+ if (flags & RADEON_USE_HIERZ) {
+ tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
+ }
+
+ BEGIN_RING(13);
+ RADEON_WAIT_UNTIL_2D_IDLE();
+
+ OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 1));
+ OUT_RING(0x00000000);
+ OUT_RING(rb3d_cntl);
+
+ OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
+ OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, rb3d_stencilrefmask);
+ OUT_RING_REG(RADEON_RB3D_PLANEMASK, 0x00000000);
+ OUT_RING_REG(RADEON_SE_CNTL, depth_clear->se_cntl);
+ ADVANCE_RING();
+
+ /* Make sure we restore the 3D state next time.
+ */
+ sarea_priv->ctx_owner = 0;
+
+ for (i = 0; i < nbox; i++) {
+
+ /* Funny that this should be required --
+ * sets top-left?
+ */
+ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+
+ BEGIN_RING(15);
+
+ OUT_RING(CP_PACKET3(RADEON_3D_DRAW_IMMD, 13));
+ OUT_RING(RADEON_VTX_Z_PRESENT |
+ RADEON_VTX_PKCOLOR_PRESENT);
+ OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
+ RADEON_PRIM_WALK_RING |
+ RADEON_MAOS_ENABLE |
+ RADEON_VTX_FMT_RADEON_MODE |
+ (3 << RADEON_NUM_VERTICES_SHIFT)));
+
+ OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+ OUT_RING(0x0);
+
+ OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+ OUT_RING(0x0);
+
+ OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+ OUT_RING(0x0);
+
+ ADVANCE_RING();
+ }
+ }
+
+ /* Increment the clear counter. The client-side 3D driver must
+ * wait on this value before performing the clear ioctl. We
+ * need this because the card's so damned fast...
+ */
+ sarea_priv->last_clear++;
+
+ BEGIN_RING(4);
+
+ RADEON_CLEAR_AGE(sarea_priv->last_clear);
+ RADEON_WAIT_UNTIL_IDLE();
+
+ ADVANCE_RING();
+}
+
+static void radeon_cp_dispatch_swap(struct drm_device *dev, struct drm_master *master)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+ int nbox = sarea_priv->nbox;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
+ int i;
+ RING_LOCALS;
+ DRM_DEBUG("\n");
+
+ /* Do some trivial performance monitoring...
+ */
+ if (dev_priv->do_boxes)
+ radeon_cp_performance_boxes(dev_priv, master_priv);
+
+ /* Wait for the 3D stream to idle before dispatching the bitblt.
+ * This will prevent data corruption between the two streams.
+ */
+ BEGIN_RING(2);
+
+ RADEON_WAIT_UNTIL_3D_IDLE();
+
+ ADVANCE_RING();
+
+ for (i = 0; i < nbox; i++) {
+ int x = pbox[i].x1;
+ int y = pbox[i].y1;
+ int w = pbox[i].x2 - x;
+ int h = pbox[i].y2 - y;
+
+ DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
+
+ BEGIN_RING(9);
+
+ OUT_RING(CP_PACKET0(RADEON_DP_GUI_MASTER_CNTL, 0));
+ OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+ RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_NONE |
+ (dev_priv->color_fmt << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_S |
+ RADEON_DP_SRC_SOURCE_MEMORY |
+ RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
+
+ /* Make this work even if front & back are flipped:
+ */
+ OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1));
+ if (sarea_priv->pfCurrentPage == 0) {
+ OUT_RING(dev_priv->back_pitch_offset);
+ OUT_RING(dev_priv->front_pitch_offset);
+ } else {
+ OUT_RING(dev_priv->front_pitch_offset);
+ OUT_RING(dev_priv->back_pitch_offset);
+ }
+
+ OUT_RING(CP_PACKET0(RADEON_SRC_X_Y, 2));
+ OUT_RING((x << 16) | y);
+ OUT_RING((x << 16) | y);
+ OUT_RING((w << 16) | h);
+
+ ADVANCE_RING();
+ }
+
+ /* Increment the frame counter. The client-side 3D driver must
+ * throttle the framerate by waiting for this value before
+ * performing the swapbuffer ioctl.
+ */
+ sarea_priv->last_frame++;
+
+ BEGIN_RING(4);
+
+ RADEON_FRAME_AGE(sarea_priv->last_frame);
+ RADEON_WAIT_UNTIL_2D_IDLE();
+
+ ADVANCE_RING();
+}
+
+void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
+ struct drm_sarea *sarea = (struct drm_sarea *)master_priv->sarea->handle;
+ int offset = (master_priv->sarea_priv->pfCurrentPage == 1)
+ ? dev_priv->front_offset : dev_priv->back_offset;
+ RING_LOCALS;
+ DRM_DEBUG("pfCurrentPage=%d\n",
+ master_priv->sarea_priv->pfCurrentPage);
+
+ /* Do some trivial performance monitoring...
+ */
+ if (dev_priv->do_boxes) {
+ dev_priv->stats.boxes |= RADEON_BOX_FLIP;
+ radeon_cp_performance_boxes(dev_priv, master_priv);
+ }
+
+ /* Update the frame offsets for both CRTCs
+ */
+ BEGIN_RING(6);
+
+ RADEON_WAIT_UNTIL_3D_IDLE();
+ OUT_RING_REG(RADEON_CRTC_OFFSET,
+ ((sarea->frame.y * dev_priv->front_pitch +
+ sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7)
+ + offset);
+ OUT_RING_REG(RADEON_CRTC2_OFFSET, master_priv->sarea_priv->crtc2_base
+ + offset);
+
+ ADVANCE_RING();
+
+ /* Increment the frame counter. The client-side 3D driver must
+ * throttle the framerate by waiting for this value before
+ * performing the swapbuffer ioctl.
+ */
+ master_priv->sarea_priv->last_frame++;
+ master_priv->sarea_priv->pfCurrentPage =
+ 1 - master_priv->sarea_priv->pfCurrentPage;
+
+ BEGIN_RING(2);
+
+ RADEON_FRAME_AGE(master_priv->sarea_priv->last_frame);
+
+ ADVANCE_RING();
+}
+
+static int bad_prim_vertex_nr(int primitive, int nr)
+{
+ switch (primitive & RADEON_PRIM_TYPE_MASK) {
+ case RADEON_PRIM_TYPE_NONE:
+ case RADEON_PRIM_TYPE_POINT:
+ return nr < 1;
+ case RADEON_PRIM_TYPE_LINE:
+ return (nr & 1) || nr == 0;
+ case RADEON_PRIM_TYPE_LINE_STRIP:
+ return nr < 2;
+ case RADEON_PRIM_TYPE_TRI_LIST:
+ case RADEON_PRIM_TYPE_3VRT_POINT_LIST:
+ case RADEON_PRIM_TYPE_3VRT_LINE_LIST:
+ case RADEON_PRIM_TYPE_RECT_LIST:
+ return nr % 3 || nr == 0;
+ case RADEON_PRIM_TYPE_TRI_FAN:
+ case RADEON_PRIM_TYPE_TRI_STRIP:
+ return nr < 3;
+ default:
+ return 1;
+ }
+}
+
+typedef struct {
+ unsigned int start;
+ unsigned int finish;
+ unsigned int prim;
+ unsigned int numverts;
+ unsigned int offset;
+ unsigned int vc_format;
+} drm_radeon_tcl_prim_t;
+
+static void radeon_cp_dispatch_vertex(struct drm_device * dev,
+ struct drm_file *file_priv,
+ struct drm_buf * buf,
+ drm_radeon_tcl_prim_t * prim)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+ int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
+ int numverts = (int)prim->numverts;
+ int nbox = sarea_priv->nbox;
+ int i = 0;
+ RING_LOCALS;
+
+ DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
+ prim->prim,
+ prim->vc_format, prim->start, prim->finish, prim->numverts);
+
+ if (bad_prim_vertex_nr(prim->prim, prim->numverts)) {
+ DRM_ERROR("bad prim %x numverts %d\n",
+ prim->prim, prim->numverts);
+ return;
+ }
+
+ do {
+ /* Emit the next cliprect */
+ if (i < nbox) {
+ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+ }
+
+ /* Emit the vertex buffer rendering commands */
+ BEGIN_RING(5);
+
+ OUT_RING(CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, 3));
+ OUT_RING(offset);
+ OUT_RING(numverts);
+ OUT_RING(prim->vc_format);
+ OUT_RING(prim->prim | RADEON_PRIM_WALK_LIST |
+ RADEON_COLOR_ORDER_RGBA |
+ RADEON_VTX_FMT_RADEON_MODE |
+ (numverts << RADEON_NUM_VERTICES_SHIFT));
+
+ ADVANCE_RING();
+
+ i++;
+ } while (i < nbox);
+}
+
+void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
+ drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+ RING_LOCALS;
+
+ buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
+
+ /* Emit the vertex buffer age */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
+ BEGIN_RING(3);
+ R600_DISPATCH_AGE(buf_priv->age);
+ ADVANCE_RING();
+ } else {
+ BEGIN_RING(2);
+ RADEON_DISPATCH_AGE(buf_priv->age);
+ ADVANCE_RING();
+ }
+
+ buf->pending = 1;
+ buf->used = 0;
+}
+
+static void radeon_cp_dispatch_indirect(struct drm_device * dev,
+ struct drm_buf * buf, int start, int end)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+ DRM_DEBUG("buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
+
+ if (start != end) {
+ int offset = (dev_priv->gart_buffers_offset
+ + buf->offset + start);
+ int dwords = (end - start + 3) / sizeof(u32);
+
+ /* Indirect buffer data must be an even number of
+ * dwords, so if we've been given an odd number we must
+ * pad the data with a Type-2 CP packet.
+ */
+ if (dwords & 1) {
+ u32 *data = (u32 *)
+ ((char *)dev->agp_buffer_map->handle
+ + buf->offset + start);
+ data[dwords++] = RADEON_CP_PACKET2;
+ }
+
+ /* Fire off the indirect buffer */
+ BEGIN_RING(3);
+
+ OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1));
+ OUT_RING(offset);
+ OUT_RING(dwords);
+
+ ADVANCE_RING();
+ }
+}
+
+static void radeon_cp_dispatch_indices(struct drm_device *dev,
+ struct drm_master *master,
+ struct drm_buf * elt_buf,
+ drm_radeon_tcl_prim_t * prim)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+ int offset = dev_priv->gart_buffers_offset + prim->offset;
+ u32 *data;
+ int dwords;
+ int i = 0;
+ int start = prim->start + RADEON_INDEX_PRIM_OFFSET;
+ int count = (prim->finish - start) / sizeof(u16);
+ int nbox = sarea_priv->nbox;
+
+ DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
+ prim->prim,
+ prim->vc_format,
+ prim->start, prim->finish, prim->offset, prim->numverts);
+
+ if (bad_prim_vertex_nr(prim->prim, count)) {
+ DRM_ERROR("bad prim %x count %d\n", prim->prim, count);
+ return;
+ }
+
+ if (start >= prim->finish || (prim->start & 0x7)) {
+ DRM_ERROR("buffer prim %d\n", prim->prim);
+ return;
+ }
+
+ dwords = (prim->finish - prim->start + 3) / sizeof(u32);
+
+ data = (u32 *) ((char *)dev->agp_buffer_map->handle +
+ elt_buf->offset + prim->start);
+
+ data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2);
+ data[1] = offset;
+ data[2] = prim->numverts;
+ data[3] = prim->vc_format;
+ data[4] = (prim->prim |
+ RADEON_PRIM_WALK_IND |
+ RADEON_COLOR_ORDER_RGBA |
+ RADEON_VTX_FMT_RADEON_MODE |
+ (count << RADEON_NUM_VERTICES_SHIFT));
+
+ do {
+ if (i < nbox)
+ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+
+ radeon_cp_dispatch_indirect(dev, elt_buf,
+ prim->start, prim->finish);
+
+ i++;
+ } while (i < nbox);
+
+}
+
+#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
+
+static int radeon_cp_dispatch_texture(struct drm_device * dev,
+ struct drm_file *file_priv,
+ drm_radeon_texture_t * tex,
+ drm_radeon_tex_image_t * image)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_buf *buf;
+ u32 format;
+ u32 *buffer;
+ const u8 __user *data;
+ int size, dwords, tex_width, blit_width, spitch;
+ u32 height;
+ int i;
+ u32 texpitch, microtile;
+ u32 offset, byte_offset;
+ RING_LOCALS;
+
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) {
+ DRM_ERROR("Invalid destination offset\n");
+ return -EINVAL;
+ }
+
+ dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
+
+ /* Flush the pixel cache. This ensures no pixel data gets mixed
+ * up with the texture data from the host data blit, otherwise
+ * part of the texture image may be corrupted.
+ */
+ BEGIN_RING(4);
+ RADEON_FLUSH_CACHE();
+ RADEON_WAIT_UNTIL_IDLE();
+ ADVANCE_RING();
+
+ /* The compiler won't optimize away a division by a variable,
+ * even if the only legal values are powers of two. Thus, we'll
+ * use a shift instead.
+ */
+ switch (tex->format) {
+ case RADEON_TXFORMAT_ARGB8888:
+ case RADEON_TXFORMAT_RGBA8888:
+ format = RADEON_COLOR_FORMAT_ARGB8888;
+ tex_width = tex->width * 4;
+ blit_width = image->width * 4;
+ break;
+ case RADEON_TXFORMAT_AI88:
+ case RADEON_TXFORMAT_ARGB1555:
+ case RADEON_TXFORMAT_RGB565:
+ case RADEON_TXFORMAT_ARGB4444:
+ case RADEON_TXFORMAT_VYUY422:
+ case RADEON_TXFORMAT_YVYU422:
+ format = RADEON_COLOR_FORMAT_RGB565;
+ tex_width = tex->width * 2;
+ blit_width = image->width * 2;
+ break;
+ case RADEON_TXFORMAT_I8:
+ case RADEON_TXFORMAT_RGB332:
+ format = RADEON_COLOR_FORMAT_CI8;
+ tex_width = tex->width * 1;
+ blit_width = image->width * 1;
+ break;
+ default:
+ DRM_ERROR("invalid texture format %d\n", tex->format);
+ return -EINVAL;
+ }
+ spitch = blit_width >> 6;
+ if (spitch == 0 && image->height > 1)
+ return -EINVAL;
+
+ texpitch = tex->pitch;
+ if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
+ microtile = 1;
+ if (tex_width < 64) {
+ texpitch &= ~(RADEON_DST_TILE_MICRO >> 22);
+ /* we got tiled coordinates, untile them */
+ image->x *= 2;
+ }
+ } else
+ microtile = 0;
+
+ /* this might fail for zero-sized uploads - are those illegal? */
+ if (!radeon_check_offset(dev_priv, tex->offset + image->height *
+ blit_width - 1)) {
+ DRM_ERROR("Invalid final destination offset\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width);
+
+ do {
+ DRM_DEBUG("tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
+ tex->offset >> 10, tex->pitch, tex->format,
+ image->x, image->y, image->width, image->height);
+
+ /* Make a copy of some parameters in case we have to
+ * update them for a multi-pass texture blit.
+ */
+ height = image->height;
+ data = (const u8 __user *)image->data;
+
+ size = height * blit_width;
+
+ if (size > RADEON_MAX_TEXTURE_SIZE) {
+ height = RADEON_MAX_TEXTURE_SIZE / blit_width;
+ size = height * blit_width;
+ } else if (size < 4 && size > 0) {
+ size = 4;
+ } else if (size == 0) {
+ return 0;
+ }
+
+ buf = radeon_freelist_get(dev);
+ if (0 && !buf) {
+ radeon_do_cp_idle(dev_priv);
+ buf = radeon_freelist_get(dev);
+ }
+ if (!buf) {
+ DRM_DEBUG("EAGAIN\n");
+ if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
+ return -EFAULT;
+ return -EAGAIN;
+ }
+
+ /* Dispatch the indirect buffer.
+ */
+ buffer =
+ (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
+ dwords = size / 4;
+
+#define RADEON_COPY_MT(_buf, _data, _width) \
+ do { \
+ if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
+ DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
+ return -EFAULT; \
+ } \
+ } while(0)
+
+ if (microtile) {
+ /* texture micro tiling in use, minimum texture width is thus 16 bytes.
+ however, we cannot use blitter directly for texture width < 64 bytes,
+ since minimum tex pitch is 64 bytes and we need this to match
+ the texture width, otherwise the blitter will tile it wrong.
+ Thus, tiling manually in this case. Additionally, need to special
+ case tex height = 1, since our actual image will have height 2
+ and we need to ensure we don't read beyond the texture size
+ from user space. */
+ if (tex->height == 1) {
+ if (tex_width >= 64 || tex_width <= 16) {
+ RADEON_COPY_MT(buffer, data,
+ (int)(tex_width * sizeof(u32)));
+ } else if (tex_width == 32) {
+ RADEON_COPY_MT(buffer, data, 16);
+ RADEON_COPY_MT(buffer + 8,
+ data + 16, 16);
+ }
+ } else if (tex_width >= 64 || tex_width == 16) {
+ RADEON_COPY_MT(buffer, data,
+ (int)(dwords * sizeof(u32)));
+ } else if (tex_width < 16) {
+ for (i = 0; i < tex->height; i++) {
+ RADEON_COPY_MT(buffer, data, tex_width);
+ buffer += 4;
+ data += tex_width;
+ }
+ } else if (tex_width == 32) {
+ /* TODO: make sure this works when not fitting in one buffer
+ (i.e. 32bytes x 2048...) */
+ for (i = 0; i < tex->height; i += 2) {
+ RADEON_COPY_MT(buffer, data, 16);
+ data += 16;
+ RADEON_COPY_MT(buffer + 8, data, 16);
+ data += 16;
+ RADEON_COPY_MT(buffer + 4, data, 16);
+ data += 16;
+ RADEON_COPY_MT(buffer + 12, data, 16);
+ data += 16;
+ buffer += 16;
+ }
+ }
+ } else {
+ if (tex_width >= 32) {
+ /* Texture image width is larger than the minimum, so we
+ * can upload it directly.
+ */
+ RADEON_COPY_MT(buffer, data,
+ (int)(dwords * sizeof(u32)));
+ } else {
+ /* Texture image width is less than the minimum, so we
+ * need to pad out each image scanline to the minimum
+ * width.
+ */
+ for (i = 0; i < tex->height; i++) {
+ RADEON_COPY_MT(buffer, data, tex_width);
+ buffer += 8;
+ data += tex_width;
+ }
+ }
+ }
+
+#undef RADEON_COPY_MT
+ byte_offset = (image->y & ~2047) * blit_width;
+ buf->file_priv = file_priv;
+ buf->used = size;
+ offset = dev_priv->gart_buffers_offset + buf->offset;
+ BEGIN_RING(9);
+ OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
+ OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+ RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_NONE |
+ (format << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_S |
+ RADEON_DP_SRC_SOURCE_MEMORY |
+ RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
+ OUT_RING((spitch << 22) | (offset >> 10));
+ OUT_RING((texpitch << 22) | ((tex->offset >> 10) + (byte_offset >> 10)));
+ OUT_RING(0);
+ OUT_RING((image->x << 16) | (image->y % 2048));
+ OUT_RING((image->width << 16) | height);
+ RADEON_WAIT_UNTIL_2D_IDLE();
+ ADVANCE_RING();
+ COMMIT_RING();
+
+ radeon_cp_discard_buffer(dev, file_priv->master, buf);
+
+ /* Update the input parameters for next time */
+ image->y += height;
+ image->height -= height;
+ image->data = (const u8 __user *)image->data + size;
+ } while (image->height > 0);
+
+ /* Flush the pixel cache after the blit completes. This ensures
+ * the texture data is written out to memory before rendering
+ * continues.
+ */
+ BEGIN_RING(4);
+ RADEON_FLUSH_CACHE();
+ RADEON_WAIT_UNTIL_2D_IDLE();
+ ADVANCE_RING();
+ COMMIT_RING();
+
+ return 0;
+}
+
+static void radeon_cp_dispatch_stipple(struct drm_device * dev, u32 * stipple)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ int i;
+ RING_LOCALS;
+ DRM_DEBUG("\n");
+
+ BEGIN_RING(35);
+
+ OUT_RING(CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0));
+ OUT_RING(0x00000000);
+
+ OUT_RING(CP_PACKET0_TABLE(RADEON_RE_STIPPLE_DATA, 31));
+ for (i = 0; i < 32; i++) {
+ OUT_RING(stipple[i]);
+ }
+
+ ADVANCE_RING();
+}
+
+static void radeon_apply_surface_regs(int surf_index,
+ drm_radeon_private_t *dev_priv)
+{
+ if (!dev_priv->mmio)
+ return;
+
+ radeon_do_cp_idle(dev_priv);
+
+ RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * surf_index,
+ dev_priv->surfaces[surf_index].flags);
+ RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * surf_index,
+ dev_priv->surfaces[surf_index].lower);
+ RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * surf_index,
+ dev_priv->surfaces[surf_index].upper);
+}
+
+/* Allocates a virtual surface
+ * doesn't always allocate a real surface, will stretch an existing
+ * surface when possible.
+ *
+ * Note that refcount can be at most 2, since during a free refcount=3
+ * might mean we have to allocate a new surface which might not always
+ * be available.
+ * For example : we allocate three contiguous surfaces ABC. If B is
+ * freed, we suddenly need two surfaces to store A and C, which might
+ * not always be available.
+ */
+static int alloc_surface(drm_radeon_surface_alloc_t *new,
+ drm_radeon_private_t *dev_priv,
+ struct drm_file *file_priv)
+{
+ struct radeon_virt_surface *s;
+ int i;
+ int virt_surface_index;
+ uint32_t new_upper, new_lower;
+
+ new_lower = new->address;
+ new_upper = new_lower + new->size - 1;
+
+ /* sanity check */
+ if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||
+ ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) !=
+ RADEON_SURF_ADDRESS_FIXED_MASK)
+ || ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
+ return -1;
+
+ /* make sure there is no overlap with existing surfaces */
+ for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+ if ((dev_priv->surfaces[i].refcount != 0) &&
+ (((new_lower >= dev_priv->surfaces[i].lower) &&
+ (new_lower < dev_priv->surfaces[i].upper)) ||
+ ((new_lower < dev_priv->surfaces[i].lower) &&
+ (new_upper > dev_priv->surfaces[i].lower)))) {
+ return -1;
+ }
+ }
+
+ /* find a virtual surface */
+ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++)
+ if (dev_priv->virt_surfaces[i].file_priv == NULL)
+ break;
+ if (i == 2 * RADEON_MAX_SURFACES) {
+ return -1;
+ }
+ virt_surface_index = i;
+
+ /* try to reuse an existing surface */
+ for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+ /* extend before */
+ if ((dev_priv->surfaces[i].refcount == 1) &&
+ (new->flags == dev_priv->surfaces[i].flags) &&
+ (new_upper + 1 == dev_priv->surfaces[i].lower)) {
+ s = &(dev_priv->virt_surfaces[virt_surface_index]);
+ s->surface_index = i;
+ s->lower = new_lower;
+ s->upper = new_upper;
+ s->flags = new->flags;
+ s->file_priv = file_priv;
+ dev_priv->surfaces[i].refcount++;
+ dev_priv->surfaces[i].lower = s->lower;
+ radeon_apply_surface_regs(s->surface_index, dev_priv);
+ return virt_surface_index;
+ }
+
+ /* extend after */
+ if ((dev_priv->surfaces[i].refcount == 1) &&
+ (new->flags == dev_priv->surfaces[i].flags) &&
+ (new_lower == dev_priv->surfaces[i].upper + 1)) {
+ s = &(dev_priv->virt_surfaces[virt_surface_index]);
+ s->surface_index = i;
+ s->lower = new_lower;
+ s->upper = new_upper;
+ s->flags = new->flags;
+ s->file_priv = file_priv;
+ dev_priv->surfaces[i].refcount++;
+ dev_priv->surfaces[i].upper = s->upper;
+ radeon_apply_surface_regs(s->surface_index, dev_priv);
+ return virt_surface_index;
+ }
+ }
+
+ /* okay, we need a new one */
+ for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+ if (dev_priv->surfaces[i].refcount == 0) {
+ s = &(dev_priv->virt_surfaces[virt_surface_index]);
+ s->surface_index = i;
+ s->lower = new_lower;
+ s->upper = new_upper;
+ s->flags = new->flags;
+ s->file_priv = file_priv;
+ dev_priv->surfaces[i].refcount = 1;
+ dev_priv->surfaces[i].lower = s->lower;
+ dev_priv->surfaces[i].upper = s->upper;
+ dev_priv->surfaces[i].flags = s->flags;
+ radeon_apply_surface_regs(s->surface_index, dev_priv);
+ return virt_surface_index;
+ }
+ }
+
+ /* we didn't find anything */
+ return -1;
+}
+
+static int free_surface(struct drm_file *file_priv,
+ drm_radeon_private_t * dev_priv,
+ int lower)
+{
+ struct radeon_virt_surface *s;
+ int i;
+ /* find the virtual surface */
+ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
+ s = &(dev_priv->virt_surfaces[i]);
+ if (s->file_priv) {
+ if ((lower == s->lower) && (file_priv == s->file_priv))
+ {
+ if (dev_priv->surfaces[s->surface_index].
+ lower == s->lower)
+ dev_priv->surfaces[s->surface_index].
+ lower = s->upper;
+
+ if (dev_priv->surfaces[s->surface_index].
+ upper == s->upper)
+ dev_priv->surfaces[s->surface_index].
+ upper = s->lower;
+
+ dev_priv->surfaces[s->surface_index].refcount--;
+ if (dev_priv->surfaces[s->surface_index].
+ refcount == 0)
+ dev_priv->surfaces[s->surface_index].
+ flags = 0;
+ s->file_priv = NULL;
+ radeon_apply_surface_regs(s->surface_index,
+ dev_priv);
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
+static void radeon_surfaces_release(struct drm_file *file_priv,
+ drm_radeon_private_t * dev_priv)
+{
+ int i;
+ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
+ if (dev_priv->virt_surfaces[i].file_priv == file_priv)
+ free_surface(file_priv, dev_priv,
+ dev_priv->virt_surfaces[i].lower);
+ }
+}
+
+/* ================================================================
+ * IOCTL functions
+ */
+static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_surface_alloc_t *alloc = data;
+
+ if (alloc_surface(alloc, dev_priv, file_priv) == -1)
+ return -EINVAL;
+ else
+ return 0;
+}
+
+static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_surface_free_t *memfree = data;
+
+ if (free_surface(file_priv, dev_priv, memfree->address))
+ return -EINVAL;
+ else
+ return 0;
+}
+
+static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+ drm_radeon_clear_t *clear = data;
+ drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
+ DRM_DEBUG("\n");
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+ sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
+
+ if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
+ sarea_priv->nbox * sizeof(depth_boxes[0])))
+ return -EFAULT;
+
+ radeon_cp_dispatch_clear(dev, file_priv->master, clear, depth_boxes);
+
+ COMMIT_RING();
+ return 0;
+}
+
+/* Not sure why this isn't set all the time:
+ */
+static int radeon_do_init_pageflip(struct drm_device *dev, struct drm_master *master)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
+ RING_LOCALS;
+
+ DRM_DEBUG("\n");
+
+ BEGIN_RING(6);
+ RADEON_WAIT_UNTIL_3D_IDLE();
+ OUT_RING(CP_PACKET0(RADEON_CRTC_OFFSET_CNTL, 0));
+ OUT_RING(RADEON_READ(RADEON_CRTC_OFFSET_CNTL) |
+ RADEON_CRTC_OFFSET_FLIP_CNTL);
+ OUT_RING(CP_PACKET0(RADEON_CRTC2_OFFSET_CNTL, 0));
+ OUT_RING(RADEON_READ(RADEON_CRTC2_OFFSET_CNTL) |
+ RADEON_CRTC_OFFSET_FLIP_CNTL);
+ ADVANCE_RING();
+
+ dev_priv->page_flipping = 1;
+
+ if (master_priv->sarea_priv->pfCurrentPage != 1)
+ master_priv->sarea_priv->pfCurrentPage = 0;
+
+ return 0;
+}
+
+/* Swapping and flipping are different operations, need different ioctls.
+ * They can & should be intermixed to support multiple 3d windows.
+ */
+static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ DRM_DEBUG("\n");
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+ if (!dev_priv->page_flipping)
+ radeon_do_init_pageflip(dev, file_priv->master);
+
+ radeon_cp_dispatch_flip(dev, file_priv->master);
+
+ COMMIT_RING();
+ return 0;
+}
+
+static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+
+ DRM_DEBUG("\n");
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+ sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ r600_cp_dispatch_swap(dev, file_priv);
+ else
+ radeon_cp_dispatch_swap(dev, file_priv->master);
+ sarea_priv->ctx_owner = 0;
+
+ COMMIT_RING();
+ return 0;
+}
+
+static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
+ drm_radeon_vertex_t *vertex = data;
+ drm_radeon_tcl_prim_t prim;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ sarea_priv = master_priv->sarea_priv;
+
+ DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
+ DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
+
+ if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
+ DRM_ERROR("buffer index %d (of %d max)\n",
+ vertex->idx, dma->buf_count - 1);
+ return -EINVAL;
+ }
+ if (vertex->prim < 0 || vertex->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
+ DRM_ERROR("buffer prim %d\n", vertex->prim);
+ return -EINVAL;
+ }
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+ VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+ buf = dma->buflist[vertex->idx];
+
+ if (buf->file_priv != file_priv) {
+ DRM_ERROR("process %d using buffer owned by %p\n",
+ DRM_CURRENTPID, buf->file_priv);
+ return -EINVAL;
+ }
+ if (buf->pending) {
+ DRM_ERROR("sending pending buffer %d\n", vertex->idx);
+ return -EINVAL;
+ }
+
+ /* Build up a prim_t record:
+ */
+ if (vertex->count) {
+ buf->used = vertex->count; /* not used? */
+
+ if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
+ if (radeon_emit_state(dev_priv, file_priv,
+ &sarea_priv->context_state,
+ sarea_priv->tex_state,
+ sarea_priv->dirty)) {
+ DRM_ERROR("radeon_emit_state failed\n");
+ return -EINVAL;
+ }
+
+ sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
+ RADEON_UPLOAD_TEX1IMAGES |
+ RADEON_UPLOAD_TEX2IMAGES |
+ RADEON_REQUIRE_QUIESCENCE);
+ }
+
+ prim.start = 0;
+ prim.finish = vertex->count; /* unused */
+ prim.prim = vertex->prim;
+ prim.numverts = vertex->count;
+ prim.vc_format = sarea_priv->vc_format;
+
+ radeon_cp_dispatch_vertex(dev, file_priv, buf, &prim);
+ }
+
+ if (vertex->discard) {
+ radeon_cp_discard_buffer(dev, file_priv->master, buf);
+ }
+
+ COMMIT_RING();
+ return 0;
+}
+
+static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
+ drm_radeon_indices_t *elts = data;
+ drm_radeon_tcl_prim_t prim;
+ int count;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ sarea_priv = master_priv->sarea_priv;
+
+ DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
+ DRM_CURRENTPID, elts->idx, elts->start, elts->end,
+ elts->discard);
+
+ if (elts->idx < 0 || elts->idx >= dma->buf_count) {
+ DRM_ERROR("buffer index %d (of %d max)\n",
+ elts->idx, dma->buf_count - 1);
+ return -EINVAL;
+ }
+ if (elts->prim < 0 || elts->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
+ DRM_ERROR("buffer prim %d\n", elts->prim);
+ return -EINVAL;
+ }
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+ VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+ buf = dma->buflist[elts->idx];
+
+ if (buf->file_priv != file_priv) {
+ DRM_ERROR("process %d using buffer owned by %p\n",
+ DRM_CURRENTPID, buf->file_priv);
+ return -EINVAL;
+ }
+ if (buf->pending) {
+ DRM_ERROR("sending pending buffer %d\n", elts->idx);
+ return -EINVAL;
+ }
+
+ count = (elts->end - elts->start) / sizeof(u16);
+ elts->start -= RADEON_INDEX_PRIM_OFFSET;
+
+ if (elts->start & 0x7) {
+ DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
+ return -EINVAL;
+ }
+ if (elts->start < buf->used) {
+ DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
+ return -EINVAL;
+ }
+
+ buf->used = elts->end;
+
+ if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
+ if (radeon_emit_state(dev_priv, file_priv,
+ &sarea_priv->context_state,
+ sarea_priv->tex_state,
+ sarea_priv->dirty)) {
+ DRM_ERROR("radeon_emit_state failed\n");
+ return -EINVAL;
+ }
+
+ sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
+ RADEON_UPLOAD_TEX1IMAGES |
+ RADEON_UPLOAD_TEX2IMAGES |
+ RADEON_REQUIRE_QUIESCENCE);
+ }
+
+ /* Build up a prim_t record:
+ */
+ prim.start = elts->start;
+ prim.finish = elts->end;
+ prim.prim = elts->prim;
+ prim.offset = 0; /* offset from start of dma buffers */
+ prim.numverts = RADEON_MAX_VB_VERTS; /* duh */
+ prim.vc_format = sarea_priv->vc_format;
+
+ radeon_cp_dispatch_indices(dev, file_priv->master, buf, &prim);
+ if (elts->discard) {
+ radeon_cp_discard_buffer(dev, file_priv->master, buf);
+ }
+
+ COMMIT_RING();
+ return 0;
+}
+
+static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_texture_t *tex = data;
+ drm_radeon_tex_image_t image;
+ int ret;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ if (tex->image == NULL) {
+ DRM_ERROR("null texture image!\n");
+ return -EINVAL;
+ }
+
+ if (DRM_COPY_FROM_USER(&image,
+ (drm_radeon_tex_image_t __user *) tex->image,
+ sizeof(image)))
+ return -EFAULT;
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+ VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ ret = r600_cp_dispatch_texture(dev, file_priv, tex, &image);
+ else
+ ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);
+
+ return ret;
+}
+
+static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_stipple_t *stipple = data;
+ u32 mask[32];
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
+ return -EFAULT;
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+ radeon_cp_dispatch_stipple(dev, mask);
+
+ COMMIT_RING();
+ return 0;
+}
+
+static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
+ drm_radeon_indirect_t *indirect = data;
+ RING_LOCALS;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
+ indirect->idx, indirect->start, indirect->end,
+ indirect->discard);
+
+ if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
+ DRM_ERROR("buffer index %d (of %d max)\n",
+ indirect->idx, dma->buf_count - 1);
+ return -EINVAL;
+ }
+
+ buf = dma->buflist[indirect->idx];
+
+ if (buf->file_priv != file_priv) {
+ DRM_ERROR("process %d using buffer owned by %p\n",
+ DRM_CURRENTPID, buf->file_priv);
+ return -EINVAL;
+ }
+ if (buf->pending) {
+ DRM_ERROR("sending pending buffer %d\n", indirect->idx);
+ return -EINVAL;
+ }
+
+ if (indirect->start < buf->used) {
+ DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
+ indirect->start, buf->used);
+ return -EINVAL;
+ }
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+ VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+ buf->used = indirect->end;
+
+ /* Dispatch the indirect buffer full of commands from the
+ * X server. This is insecure and is thus only available to
+ * privileged clients.
+ */
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ r600_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
+ else {
+ /* Wait for the 3D stream to idle before the indirect buffer
+ * containing 2D acceleration commands is processed.
+ */
+ BEGIN_RING(2);
+ RADEON_WAIT_UNTIL_3D_IDLE();
+ ADVANCE_RING();
+ radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
+ }
+
+ if (indirect->discard) {
+ radeon_cp_discard_buffer(dev, file_priv->master, buf);
+ }
+
+ COMMIT_RING();
+ return 0;
+}
+
+static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
+ drm_radeon_vertex2_t *vertex = data;
+ int i;
+ unsigned char laststate;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ sarea_priv = master_priv->sarea_priv;
+
+ DRM_DEBUG("pid=%d index=%d discard=%d\n",
+ DRM_CURRENTPID, vertex->idx, vertex->discard);
+
+ if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
+ DRM_ERROR("buffer index %d (of %d max)\n",
+ vertex->idx, dma->buf_count - 1);
+ return -EINVAL;
+ }
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+ VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+ buf = dma->buflist[vertex->idx];
+
+ if (buf->file_priv != file_priv) {
+ DRM_ERROR("process %d using buffer owned by %p\n",
+ DRM_CURRENTPID, buf->file_priv);
+ return -EINVAL;
+ }
+
+ if (buf->pending) {
+ DRM_ERROR("sending pending buffer %d\n", vertex->idx);
+ return -EINVAL;
+ }
+
+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+ return -EINVAL;
+
+ for (laststate = 0xff, i = 0; i < vertex->nr_prims; i++) {
+ drm_radeon_prim_t prim;
+ drm_radeon_tcl_prim_t tclprim;
+
+ if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim)))
+ return -EFAULT;
+
+ if (prim.stateidx != laststate) {
+ drm_radeon_state_t state;
+
+ if (DRM_COPY_FROM_USER(&state,
+ &vertex->state[prim.stateidx],
+ sizeof(state)))
+ return -EFAULT;
+
+ if (radeon_emit_state2(dev_priv, file_priv, &state)) {
+ DRM_ERROR("radeon_emit_state2 failed\n");
+ return -EINVAL;
+ }
+
+ laststate = prim.stateidx;
+ }
+
+ tclprim.start = prim.start;
+ tclprim.finish = prim.finish;
+ tclprim.prim = prim.prim;
+ tclprim.vc_format = prim.vc_format;
+
+ if (prim.prim & RADEON_PRIM_WALK_IND) {
+ tclprim.offset = prim.numverts * 64;
+ tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */
+
+ radeon_cp_dispatch_indices(dev, file_priv->master, buf, &tclprim);
+ } else {
+ tclprim.numverts = prim.numverts;
+ tclprim.offset = 0; /* not used */
+
+ radeon_cp_dispatch_vertex(dev, file_priv, buf, &tclprim);
+ }
+
+ if (sarea_priv->nbox == 1)
+ sarea_priv->nbox = 0;
+ }
+
+ if (vertex->discard) {
+ radeon_cp_discard_buffer(dev, file_priv->master, buf);
+ }
+
+ COMMIT_RING();
+ return 0;
+}
+
+static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
+ struct drm_file *file_priv,
+ drm_radeon_cmd_header_t header,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ int id = (int)header.packet.packet_id;
+ int sz, reg;
+ RING_LOCALS;
+
+ if (id >= RADEON_MAX_STATE_PACKETS)
+ return -EINVAL;
+
+ sz = packet[id].len;
+ reg = packet[id].start;
+
+ if (sz * sizeof(u32) > drm_buffer_unprocessed(cmdbuf->buffer)) {
+ DRM_ERROR("Packet size provided larger than data provided\n");
+ return -EINVAL;
+ }
+
+ if (radeon_check_and_fixup_packets(dev_priv, file_priv, id,
+ cmdbuf->buffer)) {
+ DRM_ERROR("Packet verification failed\n");
+ return -EINVAL;
+ }
+
+ BEGIN_RING(sz + 1);
+ OUT_RING(CP_PACKET0(reg, (sz - 1)));
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+ ADVANCE_RING();
+
+ return 0;
+}
+
+static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
+ drm_radeon_cmd_header_t header,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ int sz = header.scalars.count;
+ int start = header.scalars.offset;
+ int stride = header.scalars.stride;
+ RING_LOCALS;
+
+ BEGIN_RING(3 + sz);
+ OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
+ OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
+ OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+ ADVANCE_RING();
+ return 0;
+}
+
+/* God this is ugly
+ */
+static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
+ drm_radeon_cmd_header_t header,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ int sz = header.scalars.count;
+ int start = ((unsigned int)header.scalars.offset) + 0x100;
+ int stride = header.scalars.stride;
+ RING_LOCALS;
+
+ BEGIN_RING(3 + sz);
+ OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
+ OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
+ OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+ ADVANCE_RING();
+ return 0;
+}
+
+static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
+ drm_radeon_cmd_header_t header,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ int sz = header.vectors.count;
+ int start = header.vectors.offset;
+ int stride = header.vectors.stride;
+ RING_LOCALS;
+
+ BEGIN_RING(5 + sz);
+ OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
+ OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
+ OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
+ OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+ ADVANCE_RING();
+
+ return 0;
+}
+
+static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
+ drm_radeon_cmd_header_t header,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ int sz = header.veclinear.count * 4;
+ int start = header.veclinear.addr_lo | (header.veclinear.addr_hi << 8);
+ RING_LOCALS;
+
+ if (!sz)
+ return 0;
+ if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
+ return -EINVAL;
+
+ BEGIN_RING(5 + sz);
+ OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
+ OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
+ OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
+ OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+ ADVANCE_RING();
+
+ return 0;
+}
+
+static int radeon_emit_packet3(struct drm_device * dev,
+ struct drm_file *file_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ unsigned int cmdsz;
+ int ret;
+ RING_LOCALS;
+
+ DRM_DEBUG("\n");
+
+ if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
+ cmdbuf, &cmdsz))) {
+ DRM_ERROR("Packet verification failed\n");
+ return ret;
+ }
+
+ BEGIN_RING(cmdsz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
+ ADVANCE_RING();
+
+ return 0;
+}
+
+static int radeon_emit_packet3_cliprect(struct drm_device *dev,
+ struct drm_file *file_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf,
+ int orig_nbox)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_clip_rect box;
+ unsigned int cmdsz;
+ int ret;
+ struct drm_clip_rect __user *boxes = cmdbuf->boxes;
+ int i = 0;
+ RING_LOCALS;
+
+ DRM_DEBUG("\n");
+
+ if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
+ cmdbuf, &cmdsz))) {
+ DRM_ERROR("Packet verification failed\n");
+ return ret;
+ }
+
+ if (!orig_nbox)
+ goto out;
+
+ do {
+ if (i < cmdbuf->nbox) {
+ if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box)))
+ return -EFAULT;
+ /* FIXME The second and subsequent times round
+ * this loop, send a WAIT_UNTIL_3D_IDLE before
+ * calling emit_clip_rect(). This fixes a
+ * lockup on fast machines when sending
+ * several cliprects with a cmdbuf, as when
+ * waving a 2D window over a 3D
+ * window. Something in the commands from user
+ * space seems to hang the card when they're
+ * sent several times in a row. That would be
+ * the correct place to fix it but this works
+ * around it until I can figure that out - Tim
+ * Smith */
+ if (i) {
+ BEGIN_RING(2);
+ RADEON_WAIT_UNTIL_3D_IDLE();
+ ADVANCE_RING();
+ }
+ radeon_emit_clip_rect(dev_priv, &box);
+ }
+
+ BEGIN_RING(cmdsz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
+ ADVANCE_RING();
+
+ } while (++i < cmdbuf->nbox);
+ if (cmdbuf->nbox == 1)
+ cmdbuf->nbox = 0;
+
+ return 0;
+ out:
+ drm_buffer_advance(cmdbuf->buffer, cmdsz * 4);
+ return 0;
+}
+
+static int radeon_emit_wait(struct drm_device * dev, int flags)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+ DRM_DEBUG("%x\n", flags);
+ switch (flags) {
+ case RADEON_WAIT_2D:
+ BEGIN_RING(2);
+ RADEON_WAIT_UNTIL_2D_IDLE();
+ ADVANCE_RING();
+ break;
+ case RADEON_WAIT_3D:
+ BEGIN_RING(2);
+ RADEON_WAIT_UNTIL_3D_IDLE();
+ ADVANCE_RING();
+ break;
+ case RADEON_WAIT_2D | RADEON_WAIT_3D:
+ BEGIN_RING(2);
+ RADEON_WAIT_UNTIL_IDLE();
+ ADVANCE_RING();
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf = NULL;
+ drm_radeon_cmd_header_t stack_header;
+ int idx;
+ drm_radeon_kcmd_buffer_t *cmdbuf = data;
+ int orig_nbox;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ RING_SPACE_TEST_WITH_RETURN(dev_priv);
+ VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+ if (cmdbuf->bufsz > 64 * 1024 || cmdbuf->bufsz < 0) {
+ return -EINVAL;
+ }
+
+ /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid
+ * races between checking values and using those values in other code,
+ * and simply to avoid a lot of function calls to copy in data.
+ */
+ if (cmdbuf->bufsz != 0) {
+ int rv;
+ void __user *buffer = cmdbuf->buffer;
+ rv = drm_buffer_alloc(&cmdbuf->buffer, cmdbuf->bufsz);
+ if (rv)
+ return rv;
+ rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
+ cmdbuf->bufsz);
+ if (rv) {
+ drm_buffer_free(cmdbuf->buffer);
+ return rv;
+ }
+ } else
+ goto done;
+
+ orig_nbox = cmdbuf->nbox;
+
+ if (dev_priv->microcode_version == UCODE_R300) {
+ int temp;
+ temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
+
+ drm_buffer_free(cmdbuf->buffer);
+
+ return temp;
+ }
+
+ /* microcode_version != r300 */
+ while (drm_buffer_unprocessed(cmdbuf->buffer) >= sizeof(stack_header)) {
+
+ drm_radeon_cmd_header_t *header;
+ header = drm_buffer_read_object(cmdbuf->buffer,
+ sizeof(stack_header), &stack_header);
+
+ switch (header->header.cmd_type) {
+ case RADEON_CMD_PACKET:
+ DRM_DEBUG("RADEON_CMD_PACKET\n");
+ if (radeon_emit_packets
+ (dev_priv, file_priv, *header, cmdbuf)) {
+ DRM_ERROR("radeon_emit_packets failed\n");
+ goto err;
+ }
+ break;
+
+ case RADEON_CMD_SCALARS:
+ DRM_DEBUG("RADEON_CMD_SCALARS\n");
+ if (radeon_emit_scalars(dev_priv, *header, cmdbuf)) {
+ DRM_ERROR("radeon_emit_scalars failed\n");
+ goto err;
+ }
+ break;
+
+ case RADEON_CMD_VECTORS:
+ DRM_DEBUG("RADEON_CMD_VECTORS\n");
+ if (radeon_emit_vectors(dev_priv, *header, cmdbuf)) {
+ DRM_ERROR("radeon_emit_vectors failed\n");
+ goto err;
+ }
+ break;
+
+ case RADEON_CMD_DMA_DISCARD:
+ DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
+ idx = header->dma.buf_idx;
+ if (idx < 0 || idx >= dma->buf_count) {
+ DRM_ERROR("buffer index %d (of %d max)\n",
+ idx, dma->buf_count - 1);
+ goto err;
+ }
+
+ buf = dma->buflist[idx];
+ if (buf->file_priv != file_priv || buf->pending) {
+ DRM_ERROR("bad buffer %p %p %d\n",
+ buf->file_priv, file_priv,
+ buf->pending);
+ goto err;
+ }
+
+ radeon_cp_discard_buffer(dev, file_priv->master, buf);
+ break;
+
+ case RADEON_CMD_PACKET3:
+ DRM_DEBUG("RADEON_CMD_PACKET3\n");
+ if (radeon_emit_packet3(dev, file_priv, cmdbuf)) {
+ DRM_ERROR("radeon_emit_packet3 failed\n");
+ goto err;
+ }
+ break;
+
+ case RADEON_CMD_PACKET3_CLIP:
+ DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
+ if (radeon_emit_packet3_cliprect
+ (dev, file_priv, cmdbuf, orig_nbox)) {
+ DRM_ERROR("radeon_emit_packet3_clip failed\n");
+ goto err;
+ }
+ break;
+
+ case RADEON_CMD_SCALARS2:
+ DRM_DEBUG("RADEON_CMD_SCALARS2\n");
+ if (radeon_emit_scalars2(dev_priv, *header, cmdbuf)) {
+ DRM_ERROR("radeon_emit_scalars2 failed\n");
+ goto err;
+ }
+ break;
+
+ case RADEON_CMD_WAIT:
+ DRM_DEBUG("RADEON_CMD_WAIT\n");
+ if (radeon_emit_wait(dev, header->wait.flags)) {
+ DRM_ERROR("radeon_emit_wait failed\n");
+ goto err;
+ }
+ break;
+ case RADEON_CMD_VECLINEAR:
+ DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
+ if (radeon_emit_veclinear(dev_priv, *header, cmdbuf)) {
+ DRM_ERROR("radeon_emit_veclinear failed\n");
+ goto err;
+ }
+ break;
+
+ default:
+ DRM_ERROR("bad cmd_type %d at byte %d\n",
+ header->header.cmd_type,
+ cmdbuf->buffer->iterator);
+ goto err;
+ }
+ }
+
+ drm_buffer_free(cmdbuf->buffer);
+
+ done:
+ DRM_DEBUG("DONE\n");
+ COMMIT_RING();
+ return 0;
+
+ err:
+ drm_buffer_free(cmdbuf->buffer);
+ return -EINVAL;
+}
+
+static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_getparam_t *param = data;
+ int value;
+
+ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+ switch (param->param) {
+ case RADEON_PARAM_GART_BUFFER_OFFSET:
+ value = dev_priv->gart_buffers_offset;
+ break;
+ case RADEON_PARAM_LAST_FRAME:
+ dev_priv->stats.last_frame_reads++;
+ value = GET_SCRATCH(dev_priv, 0);
+ break;
+ case RADEON_PARAM_LAST_DISPATCH:
+ value = GET_SCRATCH(dev_priv, 1);
+ break;
+ case RADEON_PARAM_LAST_CLEAR:
+ dev_priv->stats.last_clear_reads++;
+ value = GET_SCRATCH(dev_priv, 2);
+ break;
+ case RADEON_PARAM_IRQ_NR:
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ value = 0;
+ else
+ value = drm_dev_to_irq(dev);
+ break;
+ case RADEON_PARAM_GART_BASE:
+ value = dev_priv->gart_vm_start;
+ break;
+ case RADEON_PARAM_REGISTER_HANDLE:
+ value = dev_priv->mmio->offset;
+ break;
+ case RADEON_PARAM_STATUS_HANDLE:
+ value = dev_priv->ring_rptr_offset;
+ break;
+#if BITS_PER_LONG == 32
+ /*
+ * This ioctl() doesn't work on 64-bit platforms because hw_lock is a
+ * pointer which can't fit into an int-sized variable. According to
+ * Michel Dänzer, the ioctl() is only used on embedded platforms, so
+ * not supporting it shouldn't be a problem. If the same functionality
+ * is needed on 64-bit platforms, a new ioctl() would have to be added,
+ * so backwards-compatibility for the embedded platforms can be
+ * maintained. --davidm 4-Feb-2004.
+ */
+ case RADEON_PARAM_SAREA_HANDLE:
+ /* The lock is the first dword in the sarea. */
+ /* no users of this parameter */
+ break;
+#endif
+ case RADEON_PARAM_GART_TEX_HANDLE:
+ value = dev_priv->gart_textures_offset;
+ break;
+ case RADEON_PARAM_SCRATCH_OFFSET:
+ if (!dev_priv->writeback_works)
+ return -EINVAL;
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ value = R600_SCRATCH_REG_OFFSET;
+ else
+ value = RADEON_SCRATCH_REG_OFFSET;
+ break;
+ case RADEON_PARAM_CARD_TYPE:
+ if (dev_priv->flags & RADEON_IS_PCIE)
+ value = RADEON_CARD_PCIE;
+ else if (dev_priv->flags & RADEON_IS_AGP)
+ value = RADEON_CARD_AGP;
+ else
+ value = RADEON_CARD_PCI;
+ break;
+ case RADEON_PARAM_VBLANK_CRTC:
+ value = radeon_vblank_crtc_get(dev);
+ break;
+ case RADEON_PARAM_FB_LOCATION:
+ value = radeon_read_fb_location(dev_priv);
+ break;
+ case RADEON_PARAM_NUM_GB_PIPES:
+ value = dev_priv->num_gb_pipes;
+ break;
+ case RADEON_PARAM_NUM_Z_PIPES:
+ value = dev_priv->num_z_pipes;
+ break;
+ default:
+ DRM_DEBUG("Invalid parameter %d\n", param->param);
+ return -EINVAL;
+ }
+
+ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+ DRM_ERROR("copy_to_user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+ drm_radeon_setparam_t *sp = data;
+ struct drm_radeon_driver_file_fields *radeon_priv;
+
+ switch (sp->param) {
+ case RADEON_SETPARAM_FB_LOCATION:
+ radeon_priv = file_priv->driver_priv;
+ radeon_priv->radeon_fb_delta = dev_priv->fb_location -
+ sp->value;
+ break;
+ case RADEON_SETPARAM_SWITCH_TILING:
+ if (sp->value == 0) {
+ DRM_DEBUG("color tiling disabled\n");
+ dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
+ dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->tiling_enabled = 0;
+ } else if (sp->value == 1) {
+ DRM_DEBUG("color tiling enabled\n");
+ dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
+ dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->tiling_enabled = 1;
+ }
+ break;
+ case RADEON_SETPARAM_PCIGART_LOCATION:
+ dev_priv->pcigart_offset = sp->value;
+ dev_priv->pcigart_offset_set = 1;
+ break;
+ case RADEON_SETPARAM_NEW_MEMMAP:
+ dev_priv->new_memmap = sp->value;
+ break;
+ case RADEON_SETPARAM_PCIGART_TABLE_SIZE:
+ dev_priv->gart_info.table_size = sp->value;
+ if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE)
+ dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
+ break;
+ case RADEON_SETPARAM_VBLANK_CRTC:
+ return radeon_vblank_crtc_set(dev, sp->value);
+ break;
+ default:
+ DRM_DEBUG("Invalid parameter %d\n", sp->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* When a client dies:
+ * - Check for and clean up flipped page state
+ * - Free any alloced GART memory.
+ * - Free any alloced radeon surfaces.
+ *
+ * DRM infrastructure takes care of reclaiming dma buffers.
+ */
+void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+ if (dev->dev_private) {
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ dev_priv->page_flipping = 0;
+ radeon_mem_release(file_priv, dev_priv->gart_heap);
+ radeon_mem_release(file_priv, dev_priv->fb_heap);
+ radeon_surfaces_release(file_priv, dev_priv);
+ }
+}
+
+void radeon_driver_lastclose(struct drm_device *dev)
+{
+ radeon_surfaces_release(PCIGART_FILE_PRIV, dev->dev_private);
+ radeon_do_release(dev);
+}
+
+int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_driver_file_fields *radeon_priv;
+
+ DRM_DEBUG("\n");
+ radeon_priv = kmalloc(sizeof(*radeon_priv), GFP_KERNEL);
+
+ if (!radeon_priv)
+ return -ENOMEM;
+
+ file_priv->driver_priv = radeon_priv;
+
+ if (dev_priv)
+ radeon_priv->radeon_fb_delta = dev_priv->fb_location;
+ else
+ radeon_priv->radeon_fb_delta = 0;
+ return 0;
+}
+
+void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_radeon_driver_file_fields *radeon_priv =
+ file_priv->driver_priv;
+
+ kfree(radeon_priv);
+}
+
+struct drm_ioctl_desc radeon_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
+};
+
+int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
new file mode 100644
index 0000000..f9ebf2b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -0,0 +1,533 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Michel Dänzer
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+#define RADEON_TEST_COPY_BLIT 1
+#define RADEON_TEST_COPY_DMA 0
+
+
+/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
+static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
+{
+ struct radeon_bo *vram_obj = NULL;
+ struct radeon_bo **gtt_obj = NULL;
+ struct radeon_fence *fence = NULL;
+ uint64_t gtt_addr, vram_addr;
+ unsigned n, size;
+ int i, r, ring;
+
+ switch (flag) {
+ case RADEON_TEST_COPY_DMA:
+ ring = radeon_copy_dma_ring_index(rdev);
+ break;
+ case RADEON_TEST_COPY_BLIT:
+ ring = radeon_copy_blit_ring_index(rdev);
+ break;
+ default:
+ DRM_ERROR("Unknown copy method\n");
+ return;
+ }
+
+ size = 1024 * 1024;
+
+ /* Number of tests =
+ * (Total GTT - IB pool - writeback page - ring buffers) / test size
+ */
+ n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ n -= rdev->ring[i].ring_size;
+ if (rdev->wb.wb_obj)
+ n -= RADEON_GPU_PAGE_SIZE;
+ if (rdev->ih.ring_obj)
+ n -= rdev->ih.ring_size;
+ n /= size;
+
+ gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
+ if (!gtt_obj) {
+ DRM_ERROR("Failed to allocate %d pointers\n", n);
+ r = 1;
+ goto out_cleanup;
+ }
+
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ NULL, &vram_obj);
+ if (r) {
+ DRM_ERROR("Failed to create VRAM object\n");
+ goto out_cleanup;
+ }
+ r = radeon_bo_reserve(vram_obj, false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
+ if (r) {
+ DRM_ERROR("Failed to pin VRAM object\n");
+ goto out_cleanup;
+ }
+ for (i = 0; i < n; i++) {
+ void *gtt_map, *vram_map;
+ void **gtt_start, **gtt_end;
+ void **vram_start, **vram_end;
+
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
+ if (r) {
+ DRM_ERROR("Failed to create GTT object %d\n", i);
+ goto out_cleanup;
+ }
+
+ r = radeon_bo_reserve(gtt_obj[i], false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
+ if (r) {
+ DRM_ERROR("Failed to pin GTT object %d\n", i);
+ goto out_cleanup;
+ }
+
+ r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
+ if (r) {
+ DRM_ERROR("Failed to map GTT object %d\n", i);
+ goto out_cleanup;
+ }
+
+ for (gtt_start = gtt_map, gtt_end = gtt_map + size;
+ gtt_start < gtt_end;
+ gtt_start++)
+ *gtt_start = gtt_start;
+
+ radeon_bo_kunmap(gtt_obj[i]);
+
+ if (ring == R600_RING_TYPE_DMA_INDEX)
+ r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ else
+ r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ if (r) {
+ DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
+ goto out_cleanup;
+ }
+
+ r = radeon_fence_wait(fence, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
+ goto out_cleanup;
+ }
+
+ radeon_fence_unref(&fence);
+
+ r = radeon_bo_kmap(vram_obj, &vram_map);
+ if (r) {
+ DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
+ goto out_cleanup;
+ }
+
+ for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+ vram_start = vram_map, vram_end = vram_map + size;
+ vram_start < vram_end;
+ gtt_start++, vram_start++) {
+ if (*vram_start != gtt_start) {
+ DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
+ "expected 0x%p (GTT/VRAM offset "
+ "0x%16llx/0x%16llx)\n",
+ i, *vram_start, gtt_start,
+ (unsigned long long)
+ (gtt_addr - rdev->mc.gtt_start +
+ (void*)gtt_start - gtt_map),
+ (unsigned long long)
+ (vram_addr - rdev->mc.vram_start +
+ (void*)gtt_start - gtt_map));
+ radeon_bo_kunmap(vram_obj);
+ goto out_cleanup;
+ }
+ *vram_start = vram_start;
+ }
+
+ radeon_bo_kunmap(vram_obj);
+
+ if (ring == R600_RING_TYPE_DMA_INDEX)
+ r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ else
+ r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ if (r) {
+ DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
+ goto out_cleanup;
+ }
+
+ r = radeon_fence_wait(fence, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
+ goto out_cleanup;
+ }
+
+ radeon_fence_unref(&fence);
+
+ r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
+ if (r) {
+ DRM_ERROR("Failed to map GTT object after copy %d\n", i);
+ goto out_cleanup;
+ }
+
+ for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+ vram_start = vram_map, vram_end = vram_map + size;
+ gtt_start < gtt_end;
+ gtt_start++, vram_start++) {
+ if (*gtt_start != vram_start) {
+ DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
+ "expected 0x%p (VRAM/GTT offset "
+ "0x%16llx/0x%16llx)\n",
+ i, *gtt_start, vram_start,
+ (unsigned long long)
+ (vram_addr - rdev->mc.vram_start +
+ (void*)vram_start - vram_map),
+ (unsigned long long)
+ (gtt_addr - rdev->mc.gtt_start +
+ (void*)vram_start - vram_map));
+ radeon_bo_kunmap(gtt_obj[i]);
+ goto out_cleanup;
+ }
+ }
+
+ radeon_bo_kunmap(gtt_obj[i]);
+
+ DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
+ gtt_addr - rdev->mc.gtt_start);
+ }
+
+out_cleanup:
+ if (vram_obj) {
+ if (radeon_bo_is_reserved(vram_obj)) {
+ radeon_bo_unpin(vram_obj);
+ radeon_bo_unreserve(vram_obj);
+ }
+ radeon_bo_unref(&vram_obj);
+ }
+ if (gtt_obj) {
+ for (i = 0; i < n; i++) {
+ if (gtt_obj[i]) {
+ if (radeon_bo_is_reserved(gtt_obj[i])) {
+ radeon_bo_unpin(gtt_obj[i]);
+ radeon_bo_unreserve(gtt_obj[i]);
+ }
+ radeon_bo_unref(&gtt_obj[i]);
+ }
+ }
+ kfree(gtt_obj);
+ }
+ if (fence) {
+ radeon_fence_unref(&fence);
+ }
+ if (r) {
+ printk(KERN_WARNING "Error while testing BO move.\n");
+ }
+}
+
+void radeon_test_moves(struct radeon_device *rdev)
+{
+ if (rdev->asic->copy.dma)
+ radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
+ if (rdev->asic->copy.blit)
+ radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
+}
+
+static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_fence **fence)
+{
+ int r;
+
+ if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
+ r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
+ if (r) {
+ DRM_ERROR("Failed to get dummy create msg\n");
+ return r;
+ }
+
+ r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, fence);
+ if (r) {
+ DRM_ERROR("Failed to get dummy destroy msg\n");
+ return r;
+ }
+ } else {
+ r = radeon_ring_lock(rdev, ring, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
+ return r;
+ }
+ radeon_fence_emit(rdev, fence, ring->idx);
+ radeon_ring_unlock_commit(rdev, ring);
+ }
+ return 0;
+}
+
+void radeon_test_ring_sync(struct radeon_device *rdev,
+ struct radeon_ring *ringA,
+ struct radeon_ring *ringB)
+{
+ struct radeon_fence *fence1 = NULL, *fence2 = NULL;
+ struct radeon_semaphore *semaphore = NULL;
+ int r;
+
+ r = radeon_semaphore_create(rdev, &semaphore);
+ if (r) {
+ DRM_ERROR("Failed to create semaphore\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringA, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+ radeon_ring_unlock_commit(rdev, ringA);
+
+ r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
+ if (r)
+ goto out_cleanup;
+
+ r = radeon_ring_lock(rdev, ringA, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+ radeon_ring_unlock_commit(rdev, ringA);
+
+ r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
+ if (r)
+ goto out_cleanup;
+
+ mdelay(1000);
+
+ if (radeon_fence_signaled(fence1)) {
+ DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringB, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %p\n", ringB);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
+ radeon_ring_unlock_commit(rdev, ringB);
+
+ r = radeon_fence_wait(fence1, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for sync fence 1\n");
+ goto out_cleanup;
+ }
+
+ mdelay(1000);
+
+ if (radeon_fence_signaled(fence2)) {
+ DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringB, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %p\n", ringB);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
+ radeon_ring_unlock_commit(rdev, ringB);
+
+ r = radeon_fence_wait(fence2, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for sync fence 1\n");
+ goto out_cleanup;
+ }
+
+out_cleanup:
+ radeon_semaphore_free(rdev, &semaphore, NULL);
+
+ if (fence1)
+ radeon_fence_unref(&fence1);
+
+ if (fence2)
+ radeon_fence_unref(&fence2);
+
+ if (r)
+ printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
+}
+
+static void radeon_test_ring_sync2(struct radeon_device *rdev,
+ struct radeon_ring *ringA,
+ struct radeon_ring *ringB,
+ struct radeon_ring *ringC)
+{
+ struct radeon_fence *fenceA = NULL, *fenceB = NULL;
+ struct radeon_semaphore *semaphore = NULL;
+ bool sigA, sigB;
+ int i, r;
+
+ r = radeon_semaphore_create(rdev, &semaphore);
+ if (r) {
+ DRM_ERROR("Failed to create semaphore\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringA, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+ radeon_ring_unlock_commit(rdev, ringA);
+
+ r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
+ if (r)
+ goto out_cleanup;
+
+ r = radeon_ring_lock(rdev, ringB, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
+ radeon_ring_unlock_commit(rdev, ringB);
+ r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
+ if (r)
+ goto out_cleanup;
+
+ mdelay(1000);
+
+ if (radeon_fence_signaled(fenceA)) {
+ DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+ goto out_cleanup;
+ }
+ if (radeon_fence_signaled(fenceB)) {
+ DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringC, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %p\n", ringC);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
+ radeon_ring_unlock_commit(rdev, ringC);
+
+ for (i = 0; i < 30; ++i) {
+ mdelay(100);
+ sigA = radeon_fence_signaled(fenceA);
+ sigB = radeon_fence_signaled(fenceB);
+ if (sigA || sigB)
+ break;
+ }
+
+ if (!sigA && !sigB) {
+ DRM_ERROR("Neither fence A nor B has been signaled\n");
+ goto out_cleanup;
+ } else if (sigA && sigB) {
+ DRM_ERROR("Both fence A and B has been signaled\n");
+ goto out_cleanup;
+ }
+
+ DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
+
+ r = radeon_ring_lock(rdev, ringC, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %p\n", ringC);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
+ radeon_ring_unlock_commit(rdev, ringC);
+
+ mdelay(1000);
+
+ r = radeon_fence_wait(fenceA, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for sync fence A\n");
+ goto out_cleanup;
+ }
+ r = radeon_fence_wait(fenceB, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for sync fence B\n");
+ goto out_cleanup;
+ }
+
+out_cleanup:
+ radeon_semaphore_free(rdev, &semaphore, NULL);
+
+ if (fenceA)
+ radeon_fence_unref(&fenceA);
+
+ if (fenceB)
+ radeon_fence_unref(&fenceB);
+
+ if (r)
+ printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
+}
+
+void radeon_test_syncing(struct radeon_device *rdev)
+{
+ int i, j, k;
+
+ for (i = 1; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_ring *ringA = &rdev->ring[i];
+ if (!ringA->ready)
+ continue;
+
+ for (j = 0; j < i; ++j) {
+ struct radeon_ring *ringB = &rdev->ring[j];
+ if (!ringB->ready)
+ continue;
+
+ DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
+ radeon_test_ring_sync(rdev, ringA, ringB);
+
+ DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
+ radeon_test_ring_sync(rdev, ringB, ringA);
+
+ for (k = 0; k < j; ++k) {
+ struct radeon_ring *ringC = &rdev->ring[k];
+ if (!ringC->ready)
+ continue;
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
+ radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
+ radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
+ radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
+ radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
+ radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
+ radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
new file mode 100644
index 0000000..eafd816
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -0,0 +1,82 @@
+#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RADEON_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM radeon
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE radeon_trace
+
+TRACE_EVENT(radeon_bo_create,
+ TP_PROTO(struct radeon_bo *bo),
+ TP_ARGS(bo),
+ TP_STRUCT__entry(
+ __field(struct radeon_bo *, bo)
+ __field(u32, pages)
+ ),
+
+ TP_fast_assign(
+ __entry->bo = bo;
+ __entry->pages = bo->tbo.num_pages;
+ ),
+ TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
+);
+
+DECLARE_EVENT_CLASS(radeon_fence_request,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/radeon/radeon_trace_points.c b/drivers/gpu/drm/radeon/radeon_trace_points.c
new file mode 100644
index 0000000..e51d357
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_trace_points.c
@@ -0,0 +1,9 @@
+/* Copyright Red Hat Inc 2010.
+ * Author : Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#define CREATE_TRACE_POINTS
+#include "radeon_trace.h"
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
new file mode 100644
index 0000000..baf00dd
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -0,0 +1,899 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Jerome Glisse <glisse@freedesktop.org>
+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ * Dave Airlie
+ */
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+#include <ttm/ttm_page_alloc.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/swiotlb.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
+
+static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
+{
+ struct radeon_mman *mman;
+ struct radeon_device *rdev;
+
+ mman = container_of(bdev, struct radeon_mman, bdev);
+ rdev = container_of(mman, struct radeon_device, mman);
+ return rdev;
+}
+
+
+/*
+ * Global memory.
+ */
+static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int radeon_ttm_global_init(struct radeon_device *rdev)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ rdev->mman.mem_global_referenced = false;
+ global_ref = &rdev->mman.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &radeon_ttm_mem_global_init;
+ global_ref->release = &radeon_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ rdev->mman.bo_global_ref.mem_glob =
+ rdev->mman.mem_global_ref.object;
+ global_ref = &rdev->mman.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&rdev->mman.mem_global_ref);
+ return r;
+ }
+
+ rdev->mman.mem_global_referenced = true;
+ return 0;
+}
+
+static void radeon_ttm_global_fini(struct radeon_device *rdev)
+{
+ if (rdev->mman.mem_global_referenced) {
+ drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
+ drm_global_item_unref(&rdev->mman.mem_global_ref);
+ rdev->mman.mem_global_referenced = false;
+ }
+}
+
+static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+ return 0;
+}
+
+static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ struct radeon_device *rdev;
+
+ rdev = radeon_get_rdev(bdev);
+
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ /* System memory */
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_TT:
+ man->func = &ttm_bo_manager_func;
+ man->gpu_offset = rdev->mc.gtt_start;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
+ DRM_ERROR("AGP is not enabled for memory type %u\n",
+ (unsigned)type);
+ return -EINVAL;
+ }
+ if (!rdev->ddev->agp->cant_use_aperture)
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ }
+#endif
+ break;
+ case TTM_PL_VRAM:
+ /* "On-card" video ram */
+ man->func = &ttm_bo_manager_func;
+ man->gpu_offset = rdev->mc.vram_start;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void radeon_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ struct radeon_bo *rbo;
+ static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+
+ if (!radeon_ttm_bo_is_radeon_bo(bo)) {
+ placement->fpfn = 0;
+ placement->lpfn = 0;
+ placement->placement = &placements;
+ placement->busy_placement = &placements;
+ placement->num_placement = 1;
+ placement->num_busy_placement = 1;
+ return;
+ }
+ rbo = container_of(bo, struct radeon_bo, tbo);
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
+ else
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+ break;
+ case TTM_PL_TT:
+ default:
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
+ }
+ *placement = rbo->placement;
+}
+
+static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+static void radeon_move_null(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *new_mem)
+{
+ struct ttm_mem_reg *old_mem = &bo->mem;
+
+ BUG_ON(old_mem->mm_node != NULL);
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+}
+
+static int radeon_move_blit(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem,
+ struct ttm_mem_reg *old_mem)
+{
+ struct radeon_device *rdev;
+ uint64_t old_start, new_start;
+ struct radeon_fence *fence;
+ int r, ridx;
+
+ rdev = radeon_get_rdev(bo->bdev);
+ ridx = radeon_copy_ring_index(rdev);
+ old_start = old_mem->start << PAGE_SHIFT;
+ new_start = new_mem->start << PAGE_SHIFT;
+
+ switch (old_mem->mem_type) {
+ case TTM_PL_VRAM:
+ old_start += rdev->mc.vram_start;
+ break;
+ case TTM_PL_TT:
+ old_start += rdev->mc.gtt_start;
+ break;
+ default:
+ DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+ return -EINVAL;
+ }
+ switch (new_mem->mem_type) {
+ case TTM_PL_VRAM:
+ new_start += rdev->mc.vram_start;
+ break;
+ case TTM_PL_TT:
+ new_start += rdev->mc.gtt_start;
+ break;
+ default:
+ DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+ return -EINVAL;
+ }
+ if (!rdev->ring[ridx].ready) {
+ DRM_ERROR("Trying to move memory with ring turned off.\n");
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
+
+ /* sync other rings */
+ fence = bo->sync_obj;
+ r = radeon_copy(rdev, old_start, new_start,
+ new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
+ &fence);
+ /* FIXME: handle copy error */
+ r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
+ evict, no_wait_gpu, new_mem);
+ radeon_fence_unref(&fence);
+ return r;
+}
+
+static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ struct radeon_device *rdev;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_mem_reg tmp_mem;
+ u32 placements;
+ struct ttm_placement placement;
+ int r;
+
+ rdev = radeon_get_rdev(bo->bdev);
+ tmp_mem = *new_mem;
+ tmp_mem.mm_node = NULL;
+ placement.fpfn = 0;
+ placement.lpfn = 0;
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
+ interruptible, no_wait_gpu);
+ if (unlikely(r)) {
+ return r;
+ }
+
+ r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
+
+ r = ttm_tt_bind(bo->ttm, &tmp_mem);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
+ r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
+ r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+out_cleanup:
+ ttm_bo_mem_put(bo, &tmp_mem);
+ return r;
+}
+
+static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ struct radeon_device *rdev;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_mem_reg tmp_mem;
+ struct ttm_placement placement;
+ u32 placements;
+ int r;
+
+ rdev = radeon_get_rdev(bo->bdev);
+ tmp_mem = *new_mem;
+ tmp_mem.mm_node = NULL;
+ placement.fpfn = 0;
+ placement.lpfn = 0;
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
+ interruptible, no_wait_gpu);
+ if (unlikely(r)) {
+ return r;
+ }
+ r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
+ r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
+out_cleanup:
+ ttm_bo_mem_put(bo, &tmp_mem);
+ return r;
+}
+
+static int radeon_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ struct radeon_device *rdev;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ int r;
+
+ rdev = radeon_get_rdev(bo->bdev);
+ if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+ radeon_move_null(bo, new_mem);
+ return 0;
+ }
+ if ((old_mem->mem_type == TTM_PL_TT &&
+ new_mem->mem_type == TTM_PL_SYSTEM) ||
+ (old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_TT)) {
+ /* bind is enough */
+ radeon_move_null(bo, new_mem);
+ return 0;
+ }
+ if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
+ rdev->asic->copy.copy == NULL) {
+ /* use memcpy */
+ goto memcpy;
+ }
+
+ if (old_mem->mem_type == TTM_PL_VRAM &&
+ new_mem->mem_type == TTM_PL_SYSTEM) {
+ r = radeon_move_vram_ram(bo, evict, interruptible,
+ no_wait_gpu, new_mem);
+ } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_VRAM) {
+ r = radeon_move_ram_vram(bo, evict, interruptible,
+ no_wait_gpu, new_mem);
+ } else {
+ r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
+ }
+
+ if (r) {
+memcpy:
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ }
+ return r;
+}
+
+static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct radeon_device *rdev = radeon_get_rdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_TT:
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ /* RADEON_IS_AGP is set only if AGP is active */
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = rdev->mc.agp_base;
+ mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
+ }
+#endif
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ /* check if it's visible */
+ if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
+ return -EINVAL;
+ mem->bus.base = rdev->mc.aper_base;
+ mem->bus.is_iomem = true;
+#ifdef __alpha__
+ /*
+ * Alpha: use bus.addr to hold the ioremap() return,
+ * so we can modify bus.base below.
+ */
+ if (mem->placement & TTM_PL_FLAG_WC)
+ mem->bus.addr =
+ ioremap_wc(mem->bus.base + mem->bus.offset,
+ mem->bus.size);
+ else
+ mem->bus.addr =
+ ioremap_nocache(mem->bus.base + mem->bus.offset,
+ mem->bus.size);
+
+ /*
+ * Alpha: Use just the bus offset plus
+ * the hose/domain memory base for bus.base.
+ * It then can be used to build PTEs for VRAM
+ * access, as done in ttm_bo_vm_fault().
+ */
+ mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
+ rdev->ddev->hose->dense_mem_base;
+#endif
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
+{
+ return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
+}
+
+static int radeon_sync_obj_flush(void *sync_obj)
+{
+ return 0;
+}
+
+static void radeon_sync_obj_unref(void **sync_obj)
+{
+ radeon_fence_unref((struct radeon_fence **)sync_obj);
+}
+
+static void *radeon_sync_obj_ref(void *sync_obj)
+{
+ return radeon_fence_ref((struct radeon_fence *)sync_obj);
+}
+
+static bool radeon_sync_obj_signaled(void *sync_obj)
+{
+ return radeon_fence_signaled((struct radeon_fence *)sync_obj);
+}
+
+/*
+ * TTM backend functions.
+ */
+struct radeon_ttm_tt {
+ struct ttm_dma_tt ttm;
+ struct radeon_device *rdev;
+ u64 offset;
+};
+
+static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
+ struct ttm_mem_reg *bo_mem)
+{
+ struct radeon_ttm_tt *gtt = (void*)ttm;
+ int r;
+
+ gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+ if (!ttm->num_pages) {
+ WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+ ttm->num_pages, bo_mem, ttm);
+ }
+ r = radeon_gart_bind(gtt->rdev, gtt->offset,
+ ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
+ if (r) {
+ DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+ ttm->num_pages, (unsigned)gtt->offset);
+ return r;
+ }
+ return 0;
+}
+
+static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+
+ radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
+ return 0;
+}
+
+static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+
+ ttm_dma_tt_fini(&gtt->ttm);
+ kfree(gtt);
+}
+
+static struct ttm_backend_func radeon_backend_func = {
+ .bind = &radeon_ttm_backend_bind,
+ .unbind = &radeon_ttm_backend_unbind,
+ .destroy = &radeon_ttm_backend_destroy,
+};
+
+static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct radeon_device *rdev;
+ struct radeon_ttm_tt *gtt;
+
+ rdev = radeon_get_rdev(bdev);
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
+ size, page_flags, dummy_read_page);
+ }
+#endif
+
+ gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
+ if (gtt == NULL) {
+ return NULL;
+ }
+ gtt->ttm.ttm.func = &radeon_backend_func;
+ gtt->rdev = rdev;
+ if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(gtt);
+ return NULL;
+ }
+ return &gtt->ttm.ttm;
+}
+
+static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ struct radeon_device *rdev;
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+ unsigned i;
+ int r;
+ bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ if (slave && ttm->sg) {
+ drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+ gtt->ttm.dma_address, ttm->num_pages);
+ ttm->state = tt_unbound;
+ return 0;
+ }
+
+ rdev = radeon_get_rdev(ttm->bdev);
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ return ttm_agp_tt_populate(ttm);
+ }
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ return ttm_dma_populate(&gtt->ttm, rdev->dev);
+ }
+#endif
+#endif
+
+ r = ttm_pool_populate(ttm);
+ if (r) {
+ return r;
+ }
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
+ while (--i) {
+ pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ gtt->ttm.dma_address[i] = 0;
+ }
+ ttm_pool_unpopulate(ttm);
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+
+static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ struct radeon_device *rdev;
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+ unsigned i;
+ bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+ if (slave)
+ return;
+
+ rdev = radeon_get_rdev(ttm->bdev);
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ ttm_agp_tt_unpopulate(ttm);
+ return;
+ }
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
+ return;
+ }
+#endif
+#endif
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ if (gtt->ttm.dma_address[i]) {
+ pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ }
+ }
+
+ ttm_pool_unpopulate(ttm);
+}
+
+static struct ttm_bo_driver radeon_bo_driver = {
+ .ttm_tt_create = &radeon_ttm_tt_create,
+ .ttm_tt_populate = &radeon_ttm_tt_populate,
+ .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
+ .invalidate_caches = &radeon_invalidate_caches,
+ .init_mem_type = &radeon_init_mem_type,
+ .evict_flags = &radeon_evict_flags,
+ .move = &radeon_bo_move,
+ .verify_access = &radeon_verify_access,
+ .sync_obj_signaled = &radeon_sync_obj_signaled,
+ .sync_obj_wait = &radeon_sync_obj_wait,
+ .sync_obj_flush = &radeon_sync_obj_flush,
+ .sync_obj_unref = &radeon_sync_obj_unref,
+ .sync_obj_ref = &radeon_sync_obj_ref,
+ .move_notify = &radeon_bo_move_notify,
+ .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
+ .io_mem_reserve = &radeon_ttm_io_mem_reserve,
+ .io_mem_free = &radeon_ttm_io_mem_free,
+};
+
+int radeon_ttm_init(struct radeon_device *rdev)
+{
+ int r;
+
+ r = radeon_ttm_global_init(rdev);
+ if (r) {
+ return r;
+ }
+ /* No others user of address space so set it to 0 */
+ r = ttm_bo_device_init(&rdev->mman.bdev,
+ rdev->mman.bo_global_ref.ref.object,
+ &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
+ rdev->need_dma32);
+ if (r) {
+ DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+ return r;
+ }
+ rdev->mman.initialized = true;
+ r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
+ rdev->mc.real_vram_size >> PAGE_SHIFT);
+ if (r) {
+ DRM_ERROR("Failed initializing VRAM heap.\n");
+ return r;
+ }
+ r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM,
+ NULL, &rdev->stollen_vga_memory);
+ if (r) {
+ return r;
+ }
+ r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+ if (r)
+ return r;
+ r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+ radeon_bo_unreserve(rdev->stollen_vga_memory);
+ if (r) {
+ radeon_bo_unref(&rdev->stollen_vga_memory);
+ return r;
+ }
+ DRM_INFO("radeon: %uM of VRAM memory ready\n",
+ (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
+ r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
+ rdev->mc.gtt_size >> PAGE_SHIFT);
+ if (r) {
+ DRM_ERROR("Failed initializing GTT heap.\n");
+ return r;
+ }
+ DRM_INFO("radeon: %uM of GTT memory ready.\n",
+ (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
+ rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
+
+ r = radeon_ttm_debugfs_init(rdev);
+ if (r) {
+ DRM_ERROR("Failed to init debugfs\n");
+ return r;
+ }
+ return 0;
+}
+
+void radeon_ttm_fini(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->mman.initialized)
+ return;
+ if (rdev->stollen_vga_memory) {
+ r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+ if (r == 0) {
+ radeon_bo_unpin(rdev->stollen_vga_memory);
+ radeon_bo_unreserve(rdev->stollen_vga_memory);
+ }
+ radeon_bo_unref(&rdev->stollen_vga_memory);
+ }
+ ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+ ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
+ ttm_bo_device_release(&rdev->mman.bdev);
+ radeon_gart_fini(rdev);
+ radeon_ttm_global_fini(rdev);
+ rdev->mman.initialized = false;
+ DRM_INFO("radeon: ttm finalized\n");
+}
+
+/* this should only be called at bootup or when userspace
+ * isn't running */
+void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
+{
+ struct ttm_mem_type_manager *man;
+
+ if (!rdev->mman.initialized)
+ return;
+
+ man = &rdev->mman.bdev.man[TTM_PL_VRAM];
+ /* this just adjusts TTM size idea, which sets lpfn to the correct value */
+ man->size = size >> PAGE_SHIFT;
+}
+
+static struct vm_operations_struct radeon_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops = NULL;
+
+static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct ttm_buffer_object *bo;
+ struct radeon_device *rdev;
+ int r;
+
+ bo = (struct ttm_buffer_object *)vma->vm_private_data;
+ if (bo == NULL) {
+ return VM_FAULT_NOPAGE;
+ }
+ rdev = radeon_get_rdev(bo->bdev);
+ down_read(&rdev->pm.mclk_lock);
+ r = ttm_vm_ops->fault(vma, vmf);
+ up_read(&rdev->pm.mclk_lock);
+ return r;
+}
+
+int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct radeon_device *rdev;
+ int r;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
+ return drm_mmap(filp, vma);
+ }
+
+ file_priv = filp->private_data;
+ rdev = file_priv->minor->dev->dev_private;
+ if (rdev == NULL) {
+ return -EINVAL;
+ }
+ r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
+ if (unlikely(r != 0)) {
+ return r;
+ }
+ if (unlikely(ttm_vm_ops == NULL)) {
+ ttm_vm_ops = vma->vm_ops;
+ radeon_ttm_vm_ops = *ttm_vm_ops;
+ radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
+ }
+ vma->vm_ops = &radeon_ttm_vm_ops;
+ return 0;
+}
+
+
+#define RADEON_DEBUGFS_MEM_TYPES 2
+
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_mm_dump_table(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int ret;
+ struct ttm_bo_global *glob = rdev->mman.bdev.glob;
+
+ spin_lock(&glob->lru_lock);
+ ret = drm_mm_dump_table(m, mm);
+ spin_unlock(&glob->lru_lock);
+ return ret;
+}
+#endif
+
+static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
+ static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
+ unsigned i;
+
+ for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
+ if (i == 0)
+ sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
+ else
+ sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
+ radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+ radeon_mem_types_list[i].show = &radeon_mm_dump_table;
+ radeon_mem_types_list[i].driver_features = 0;
+ if (i == 0)
+ radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
+ else
+ radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
+
+ }
+ /* Add ttm page pool to debugfs */
+ sprintf(radeon_mem_types_names[i], "ttm_page_pool");
+ radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+ radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
+ radeon_mem_types_list[i].driver_features = 0;
+ radeon_mem_types_list[i++].data = NULL;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
+ radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+ radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
+ radeon_mem_types_list[i].driver_features = 0;
+ radeon_mem_types_list[i++].data = NULL;
+ }
+#endif
+#endif
+ return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
+
+#endif
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
new file mode 100644
index 0000000..f3ccf6d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -0,0 +1,869 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Christian König <deathsimple@vodafone.de>
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "radeon.h"
+#include "r600d.h"
+
+/* 1 second timeout */
+#define UVD_IDLE_TIMEOUT_MS 1000
+
+/* Firmware Names */
+#define FIRMWARE_RV710 "radeon/RV710_uvd.bin"
+#define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin"
+#define FIRMWARE_SUMO "radeon/SUMO_uvd.bin"
+#define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin"
+
+MODULE_FIRMWARE(FIRMWARE_RV710);
+MODULE_FIRMWARE(FIRMWARE_CYPRESS);
+MODULE_FIRMWARE(FIRMWARE_SUMO);
+MODULE_FIRMWARE(FIRMWARE_TAHITI);
+
+static void radeon_uvd_idle_work_handler(struct work_struct *work);
+
+int radeon_uvd_init(struct radeon_device *rdev)
+{
+ struct platform_device *pdev;
+ unsigned long bo_size;
+ const char *fw_name;
+ int i, r;
+
+ INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
+
+ pdev = platform_device_register_simple("radeon_uvd", 0, NULL, 0);
+ r = IS_ERR(pdev);
+ if (r) {
+ dev_err(rdev->dev, "radeon_uvd: Failed to register firmware\n");
+ return -EINVAL;
+ }
+
+ switch (rdev->family) {
+ case CHIP_RV710:
+ case CHIP_RV730:
+ case CHIP_RV740:
+ fw_name = FIRMWARE_RV710;
+ break;
+
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ case CHIP_JUNIPER:
+ case CHIP_REDWOOD:
+ case CHIP_CEDAR:
+ fw_name = FIRMWARE_CYPRESS;
+ break;
+
+ case CHIP_SUMO:
+ case CHIP_SUMO2:
+ case CHIP_PALM:
+ case CHIP_CAYMAN:
+ case CHIP_BARTS:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
+ fw_name = FIRMWARE_SUMO;
+ break;
+
+ case CHIP_TAHITI:
+ case CHIP_VERDE:
+ case CHIP_PITCAIRN:
+ case CHIP_ARUBA:
+ fw_name = FIRMWARE_TAHITI;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ r = request_firmware(&rdev->uvd_fw, fw_name, &pdev->dev);
+ if (r) {
+ dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
+ fw_name);
+ platform_device_unregister(pdev);
+ return r;
+ }
+
+ platform_device_unregister(pdev);
+
+ bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
+ RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
+ r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
+ return r;
+ }
+
+ r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
+ if (r) {
+ radeon_bo_unref(&rdev->uvd.vcpu_bo);
+ dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
+ return r;
+ }
+
+ r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->uvd.gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+ radeon_bo_unref(&rdev->uvd.vcpu_bo);
+ dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
+ return r;
+ }
+
+ r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
+ if (r) {
+ dev_err(rdev->dev, "(%d) UVD map failed\n", r);
+ return r;
+ }
+
+ radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ atomic_set(&rdev->uvd.handles[i], 0);
+ rdev->uvd.filp[i] = NULL;
+ }
+
+ return 0;
+}
+
+void radeon_uvd_fini(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->uvd.vcpu_bo == NULL)
+ return;
+
+ r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
+ if (!r) {
+ radeon_bo_kunmap(rdev->uvd.vcpu_bo);
+ radeon_bo_unpin(rdev->uvd.vcpu_bo);
+ radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+ }
+
+ radeon_bo_unref(&rdev->uvd.vcpu_bo);
+
+ release_firmware(rdev->uvd_fw);
+}
+
+int radeon_uvd_suspend(struct radeon_device *rdev)
+{
+ unsigned size;
+ void *ptr;
+ int i;
+
+ if (rdev->uvd.vcpu_bo == NULL)
+ return 0;
+
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+ if (atomic_read(&rdev->uvd.handles[i]))
+ break;
+
+ if (i == RADEON_MAX_UVD_HANDLES)
+ return 0;
+
+ size = radeon_bo_size(rdev->uvd.vcpu_bo);
+ size -= rdev->uvd_fw->size;
+
+ ptr = rdev->uvd.cpu_addr;
+ ptr += rdev->uvd_fw->size;
+
+ rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
+ memcpy(rdev->uvd.saved_bo, ptr, size);
+
+ return 0;
+}
+
+int radeon_uvd_resume(struct radeon_device *rdev)
+{
+ unsigned size;
+ void *ptr;
+
+ if (rdev->uvd.vcpu_bo == NULL)
+ return -EINVAL;
+
+ memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+
+ size = radeon_bo_size(rdev->uvd.vcpu_bo);
+ size -= rdev->uvd_fw->size;
+
+ ptr = rdev->uvd.cpu_addr;
+ ptr += rdev->uvd_fw->size;
+
+ if (rdev->uvd.saved_bo != NULL) {
+ memcpy(ptr, rdev->uvd.saved_bo, size);
+ kfree(rdev->uvd.saved_bo);
+ rdev->uvd.saved_bo = NULL;
+ } else
+ memset(ptr, 0, size);
+
+ return 0;
+}
+
+void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
+{
+ rbo->placement.fpfn = 0 >> PAGE_SHIFT;
+ rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
+}
+
+void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
+{
+ int i, r;
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+ if (handle != 0 && rdev->uvd.filp[i] == filp) {
+ struct radeon_fence *fence;
+
+ r = radeon_uvd_get_destroy_msg(rdev,
+ R600_RING_TYPE_UVD_INDEX, handle, &fence);
+ if (r) {
+ DRM_ERROR("Error destroying UVD (%d)!\n", r);
+ continue;
+ }
+
+ radeon_fence_wait(fence, false);
+ radeon_fence_unref(&fence);
+
+ rdev->uvd.filp[i] = NULL;
+ atomic_set(&rdev->uvd.handles[i], 0);
+ }
+ }
+}
+
+static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
+{
+ unsigned stream_type = msg[4];
+ unsigned width = msg[6];
+ unsigned height = msg[7];
+ unsigned dpb_size = msg[9];
+ unsigned pitch = msg[28];
+
+ unsigned width_in_mb = width / 16;
+ unsigned height_in_mb = ALIGN(height / 16, 2);
+
+ unsigned image_size, tmp, min_dpb_size;
+
+ image_size = width * height;
+ image_size += image_size / 2;
+ image_size = ALIGN(image_size, 1024);
+
+ switch (stream_type) {
+ case 0: /* H264 */
+
+ /* reference picture buffer */
+ min_dpb_size = image_size * 17;
+
+ /* macroblock context buffer */
+ min_dpb_size += width_in_mb * height_in_mb * 17 * 192;
+
+ /* IT surface buffer */
+ min_dpb_size += width_in_mb * height_in_mb * 32;
+ break;
+
+ case 1: /* VC1 */
+
+ /* reference picture buffer */
+ min_dpb_size = image_size * 3;
+
+ /* CONTEXT_BUFFER */
+ min_dpb_size += width_in_mb * height_in_mb * 128;
+
+ /* IT surface buffer */
+ min_dpb_size += width_in_mb * 64;
+
+ /* DB surface buffer */
+ min_dpb_size += width_in_mb * 128;
+
+ /* BP */
+ tmp = max(width_in_mb, height_in_mb);
+ min_dpb_size += ALIGN(tmp * 7 * 16, 64);
+ break;
+
+ case 3: /* MPEG2 */
+
+ /* reference picture buffer */
+ min_dpb_size = image_size * 3;
+ break;
+
+ case 4: /* MPEG4 */
+
+ /* reference picture buffer */
+ min_dpb_size = image_size * 3;
+
+ /* CM */
+ min_dpb_size += width_in_mb * height_in_mb * 64;
+
+ /* IT surface buffer */
+ min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
+ break;
+
+ default:
+ DRM_ERROR("UVD codec not handled %d!\n", stream_type);
+ return -EINVAL;
+ }
+
+ if (width > pitch) {
+ DRM_ERROR("Invalid UVD decoding target pitch!\n");
+ return -EINVAL;
+ }
+
+ if (dpb_size < min_dpb_size) {
+ DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
+ dpb_size, min_dpb_size);
+ return -EINVAL;
+ }
+
+ buf_sizes[0x1] = dpb_size;
+ buf_sizes[0x2] = image_size;
+ return 0;
+}
+
+static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
+ unsigned offset, unsigned buf_sizes[])
+{
+ int32_t *msg, msg_type, handle;
+ void *ptr;
+
+ int i, r;
+
+ if (offset & 0x3F) {
+ DRM_ERROR("UVD messages must be 64 byte aligned!\n");
+ return -EINVAL;
+ }
+
+ if (bo->tbo.sync_obj) {
+ r = radeon_fence_wait(bo->tbo.sync_obj, false);
+ if (r) {
+ DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+ return r;
+ }
+ }
+
+ r = radeon_bo_kmap(bo, &ptr);
+ if (r)
+ return r;
+
+ msg = ptr + offset;
+
+ msg_type = msg[1];
+ handle = msg[2];
+
+ if (handle == 0) {
+ DRM_ERROR("Invalid UVD handle!\n");
+ return -EINVAL;
+ }
+
+ if (msg_type == 1) {
+ /* it's a decode msg, calc buffer sizes */
+ r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
+ radeon_bo_kunmap(bo);
+ if (r)
+ return r;
+
+ } else if (msg_type == 2) {
+ /* it's a destroy msg, free the handle */
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+ atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
+ radeon_bo_kunmap(bo);
+ return 0;
+ } else {
+ /* it's a create msg, no special handling needed */
+ radeon_bo_kunmap(bo);
+ }
+
+ /* create or decode, validate the handle */
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
+ return 0;
+ }
+
+ /* handle not found try to alloc a new one */
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
+ p->rdev->uvd.filp[i] = p->filp;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("No more free UVD handles!\n");
+ return -EINVAL;
+}
+
+static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
+ int data0, int data1,
+ unsigned buf_sizes[])
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ struct radeon_cs_reloc *reloc;
+ unsigned idx, cmd, offset;
+ uint64_t start, end;
+ int r;
+
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ offset = radeon_get_ib_value(p, data0);
+ idx = radeon_get_ib_value(p, data1);
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ return -EINVAL;
+ }
+
+ reloc = p->relocs_ptr[(idx / 4)];
+ start = reloc->lobj.gpu_offset;
+ end = start + radeon_bo_size(reloc->robj);
+ start += offset;
+
+ p->ib.ptr[data0] = start & 0xFFFFFFFF;
+ p->ib.ptr[data1] = start >> 32;
+
+ cmd = radeon_get_ib_value(p, p->idx) >> 1;
+
+ if (cmd < 0x4) {
+ if ((end - start) < buf_sizes[cmd]) {
+ DRM_ERROR("buffer to small (%d / %d)!\n",
+ (unsigned)(end - start), buf_sizes[cmd]);
+ return -EINVAL;
+ }
+
+ } else if (cmd != 0x100) {
+ DRM_ERROR("invalid UVD command %X!\n", cmd);
+ return -EINVAL;
+ }
+
+ if ((start >> 28) != (end >> 28)) {
+ DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
+ start, end);
+ return -EINVAL;
+ }
+
+ /* TODO: is this still necessary on NI+ ? */
+ if ((cmd == 0 || cmd == 0x3) &&
+ (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
+ DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
+ start, end);
+ return -EINVAL;
+ }
+
+ if (cmd == 0) {
+ r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ int *data0, int *data1,
+ unsigned buf_sizes[])
+{
+ int i, r;
+
+ p->idx++;
+ for (i = 0; i <= pkt->count; ++i) {
+ switch (pkt->reg + i*4) {
+ case UVD_GPCOM_VCPU_DATA0:
+ *data0 = p->idx;
+ break;
+ case UVD_GPCOM_VCPU_DATA1:
+ *data1 = p->idx;
+ break;
+ case UVD_GPCOM_VCPU_CMD:
+ r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes);
+ if (r)
+ return r;
+ break;
+ case UVD_ENGINE_CNTL:
+ break;
+ default:
+ DRM_ERROR("Invalid reg 0x%X!\n",
+ pkt->reg + i*4);
+ return -EINVAL;
+ }
+ p->idx++;
+ }
+ return 0;
+}
+
+int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet pkt;
+ int r, data0 = 0, data1 = 0;
+
+ /* minimum buffer sizes */
+ unsigned buf_sizes[] = {
+ [0x00000000] = 2048,
+ [0x00000001] = 32 * 1024 * 1024,
+ [0x00000002] = 2048 * 1152 * 3,
+ [0x00000003] = 2048,
+ };
+
+ if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
+ DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
+ p->chunks[p->chunk_ib_idx].length_dw);
+ return -EINVAL;
+ }
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+
+
+ do {
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
+ if (r)
+ return r;
+ switch (pkt.type) {
+ case RADEON_PACKET_TYPE0:
+ r = radeon_uvd_cs_reg(p, &pkt, &data0,
+ &data1, buf_sizes);
+ if (r)
+ return r;
+ break;
+ case RADEON_PACKET_TYPE2:
+ p->idx += pkt.count + 2;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ return -EINVAL;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ return 0;
+}
+
+static int radeon_uvd_send_msg(struct radeon_device *rdev,
+ int ring, struct radeon_bo *bo,
+ struct radeon_fence **fence)
+{
+ struct ttm_validate_buffer tv;
+ struct list_head head;
+ struct radeon_ib ib;
+ uint64_t addr;
+ int i, r;
+
+ memset(&tv, 0, sizeof(tv));
+ tv.bo = &bo->tbo;
+
+ INIT_LIST_HEAD(&head);
+ list_add(&tv.head, &head);
+
+ r = ttm_eu_reserve_buffers(&head);
+ if (r)
+ return r;
+
+ radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM);
+ radeon_uvd_force_into_uvd_segment(bo);
+
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ if (r) {
+ ttm_eu_backoff_reservation(&head);
+ return r;
+ }
+
+ r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
+ if (r) {
+ ttm_eu_backoff_reservation(&head);
+ return r;
+ }
+
+ addr = radeon_bo_gpu_offset(bo);
+ ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
+ ib.ptr[1] = addr;
+ ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
+ ib.ptr[3] = addr >> 32;
+ ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
+ ib.ptr[5] = 0;
+ for (i = 6; i < 16; ++i)
+ ib.ptr[i] = PACKET2(0);
+ ib.length_dw = 16;
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ ttm_eu_backoff_reservation(&head);
+ return r;
+ }
+ ttm_eu_fence_buffer_objects(&head, ib.fence);
+
+ if (fence)
+ *fence = radeon_fence_ref(ib.fence);
+
+ radeon_ib_free(rdev, &ib);
+ radeon_bo_unref(&bo);
+ return 0;
+}
+
+/* multiple fence commands without any stream commands in between can
+ crash the vcpu so just try to emmit a dummy create/destroy msg to
+ avoid this */
+int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
+ uint32_t handle, struct radeon_fence **fence)
+{
+ struct radeon_bo *bo;
+ uint32_t *msg;
+ int r, i;
+
+ r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+ if (r)
+ return r;
+
+ r = radeon_bo_reserve(bo, false);
+ if (r) {
+ radeon_bo_unref(&bo);
+ return r;
+ }
+
+ r = radeon_bo_kmap(bo, (void **)&msg);
+ if (r) {
+ radeon_bo_unreserve(bo);
+ radeon_bo_unref(&bo);
+ return r;
+ }
+
+ /* stitch together an UVD create msg */
+ msg[0] = cpu_to_le32(0x00000de4);
+ msg[1] = cpu_to_le32(0x00000000);
+ msg[2] = cpu_to_le32(handle);
+ msg[3] = cpu_to_le32(0x00000000);
+ msg[4] = cpu_to_le32(0x00000000);
+ msg[5] = cpu_to_le32(0x00000000);
+ msg[6] = cpu_to_le32(0x00000000);
+ msg[7] = cpu_to_le32(0x00000780);
+ msg[8] = cpu_to_le32(0x00000440);
+ msg[9] = cpu_to_le32(0x00000000);
+ msg[10] = cpu_to_le32(0x01b37000);
+ for (i = 11; i < 1024; ++i)
+ msg[i] = cpu_to_le32(0x0);
+
+ radeon_bo_kunmap(bo);
+ radeon_bo_unreserve(bo);
+
+ return radeon_uvd_send_msg(rdev, ring, bo, fence);
+}
+
+int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
+ uint32_t handle, struct radeon_fence **fence)
+{
+ struct radeon_bo *bo;
+ uint32_t *msg;
+ int r, i;
+
+ r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+ if (r)
+ return r;
+
+ r = radeon_bo_reserve(bo, false);
+ if (r) {
+ radeon_bo_unref(&bo);
+ return r;
+ }
+
+ r = radeon_bo_kmap(bo, (void **)&msg);
+ if (r) {
+ radeon_bo_unreserve(bo);
+ radeon_bo_unref(&bo);
+ return r;
+ }
+
+ /* stitch together an UVD destroy msg */
+ msg[0] = cpu_to_le32(0x00000de4);
+ msg[1] = cpu_to_le32(0x00000002);
+ msg[2] = cpu_to_le32(handle);
+ msg[3] = cpu_to_le32(0x00000000);
+ for (i = 4; i < 1024; ++i)
+ msg[i] = cpu_to_le32(0x0);
+
+ radeon_bo_kunmap(bo);
+ radeon_bo_unreserve(bo);
+
+ return radeon_uvd_send_msg(rdev, ring, bo, fence);
+}
+
+static void radeon_uvd_idle_work_handler(struct work_struct *work)
+{
+ struct radeon_device *rdev =
+ container_of(work, struct radeon_device, uvd.idle_work.work);
+
+ if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0)
+ radeon_set_uvd_clocks(rdev, 0, 0);
+ else
+ schedule_delayed_work(&rdev->uvd.idle_work,
+ msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+}
+
+void radeon_uvd_note_usage(struct radeon_device *rdev)
+{
+ bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
+ set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
+ msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+ if (set_clocks)
+ radeon_set_uvd_clocks(rdev, 53300, 40000);
+}
+
+static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
+ unsigned target_freq,
+ unsigned pd_min,
+ unsigned pd_even)
+{
+ unsigned post_div = vco_freq / target_freq;
+
+ /* adjust to post divider minimum value */
+ if (post_div < pd_min)
+ post_div = pd_min;
+
+ /* we alway need a frequency less than or equal the target */
+ if ((vco_freq / post_div) > target_freq)
+ post_div += 1;
+
+ /* post dividers above a certain value must be even */
+ if (post_div > pd_even && post_div % 2)
+ post_div += 1;
+
+ return post_div;
+}
+
+/**
+ * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
+ *
+ * @rdev: radeon_device pointer
+ * @vclk: wanted VCLK
+ * @dclk: wanted DCLK
+ * @vco_min: minimum VCO frequency
+ * @vco_max: maximum VCO frequency
+ * @fb_factor: factor to multiply vco freq with
+ * @fb_mask: limit and bitmask for feedback divider
+ * @pd_min: post divider minimum
+ * @pd_max: post divider maximum
+ * @pd_even: post divider must be even above this value
+ * @optimal_fb_div: resulting feedback divider
+ * @optimal_vclk_div: resulting vclk post divider
+ * @optimal_dclk_div: resulting dclk post divider
+ *
+ * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
+ * Returns zero on success -EINVAL on error.
+ */
+int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
+ unsigned vclk, unsigned dclk,
+ unsigned vco_min, unsigned vco_max,
+ unsigned fb_factor, unsigned fb_mask,
+ unsigned pd_min, unsigned pd_max,
+ unsigned pd_even,
+ unsigned *optimal_fb_div,
+ unsigned *optimal_vclk_div,
+ unsigned *optimal_dclk_div)
+{
+ unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
+
+ /* start off with something large */
+ unsigned optimal_score = ~0;
+
+ /* loop through vco from low to high */
+ vco_min = max(max(vco_min, vclk), dclk);
+ for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
+
+ uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
+ unsigned vclk_div, dclk_div, score;
+
+ do_div(fb_div, ref_freq);
+
+ /* fb div out of range ? */
+ if (fb_div > fb_mask)
+ break; /* it can oly get worse */
+
+ fb_div &= fb_mask;
+
+ /* calc vclk divider with current vco freq */
+ vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk,
+ pd_min, pd_even);
+ if (vclk_div > pd_max)
+ break; /* vco is too big, it has to stop */
+
+ /* calc dclk divider with current vco freq */
+ dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
+ pd_min, pd_even);
+ if (vclk_div > pd_max)
+ break; /* vco is too big, it has to stop */
+
+ /* calc score with current vco freq */
+ score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
+
+ /* determine if this vco setting is better than current optimal settings */
+ if (score < optimal_score) {
+ *optimal_fb_div = fb_div;
+ *optimal_vclk_div = vclk_div;
+ *optimal_dclk_div = dclk_div;
+ optimal_score = score;
+ if (optimal_score == 0)
+ break; /* it can't get better than this */
+ }
+ }
+
+ /* did we found a valid setup ? */
+ if (optimal_score == ~0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
+ unsigned cg_upll_func_cntl)
+{
+ unsigned i;
+
+ /* make sure UPLL_CTLREQ is deasserted */
+ WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
+
+ mdelay(10);
+
+ /* assert UPLL_CTLREQ */
+ WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
+
+ /* wait for CTLACK and CTLACK2 to get asserted */
+ for (i = 0; i < 100; ++i) {
+ uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
+ if ((RREG32(cg_upll_func_cntl) & mask) == mask)
+ break;
+ mdelay(10);
+ }
+
+ /* deassert UPLL_CTLREQ */
+ WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
+
+ if (i == 100) {
+ DRM_ERROR("Timeout setting UVD clocks!\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
new file mode 100644
index 0000000..a072fa8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -0,0 +1,642 @@
+cayman 0x9400
+0x0000802C GRBM_GFX_INDEX
+0x00008040 WAIT_UNTIL
+0x000084FC CP_STRMOUT_CNTL
+0x000085F0 CP_COHER_CNTL
+0x000085F4 CP_COHER_SIZE
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x000088C4 VGT_CACHE_INVALIDATION
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00008958 VGT_PRIMITIVE_TYPE
+0x0000895C VGT_INDEX_TYPE
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00008990 VGT_COMPUTE_DIM_X
+0x00008994 VGT_COMPUTE_DIM_Y
+0x00008998 VGT_COMPUTE_DIM_Z
+0x0000899C VGT_COMPUTE_START_X
+0x000089A0 VGT_COMPUTE_START_Y
+0x000089A4 VGT_COMPUTE_START_Z
+0x000089A8 VGT_COMPUTE_INDEX
+0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
+0x000089B0 VGT_HS_OFFCHIP_PARAM
+0x00008A14 PA_CL_ENHANCE
+0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00008BF0 PA_SC_ENHANCE
+0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN
+0x00008C00 SQ_CONFIG
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
+0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
+0x00008DF8 SQ_CONST_MEM_BASE
+0x00008E20 SQ_STATIC_THREAD_MGMT_1
+0x00008E24 SQ_STATIC_THREAD_MGMT_2
+0x00008E28 SQ_STATIC_THREAD_MGMT_3
+0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x00009508 TA_CNTL_AUX
+0x00009830 DB_DEBUG
+0x00009834 DB_DEBUG2
+0x00009838 DB_DEBUG3
+0x0000983C DB_DEBUG4
+0x00009854 DB_WATERMARKS
+0x0000A400 TD_PS_BORDER_COLOR_INDEX
+0x0000A404 TD_PS_BORDER_COLOR_RED
+0x0000A408 TD_PS_BORDER_COLOR_GREEN
+0x0000A40C TD_PS_BORDER_COLOR_BLUE
+0x0000A410 TD_PS_BORDER_COLOR_ALPHA
+0x0000A414 TD_VS_BORDER_COLOR_INDEX
+0x0000A418 TD_VS_BORDER_COLOR_RED
+0x0000A41C TD_VS_BORDER_COLOR_GREEN
+0x0000A420 TD_VS_BORDER_COLOR_BLUE
+0x0000A424 TD_VS_BORDER_COLOR_ALPHA
+0x0000A428 TD_GS_BORDER_COLOR_INDEX
+0x0000A42C TD_GS_BORDER_COLOR_RED
+0x0000A430 TD_GS_BORDER_COLOR_GREEN
+0x0000A434 TD_GS_BORDER_COLOR_BLUE
+0x0000A438 TD_GS_BORDER_COLOR_ALPHA
+0x0000A43C TD_HS_BORDER_COLOR_INDEX
+0x0000A440 TD_HS_BORDER_COLOR_RED
+0x0000A444 TD_HS_BORDER_COLOR_GREEN
+0x0000A448 TD_HS_BORDER_COLOR_BLUE
+0x0000A44C TD_HS_BORDER_COLOR_ALPHA
+0x0000A450 TD_LS_BORDER_COLOR_INDEX
+0x0000A454 TD_LS_BORDER_COLOR_RED
+0x0000A458 TD_LS_BORDER_COLOR_GREEN
+0x0000A45C TD_LS_BORDER_COLOR_BLUE
+0x0000A460 TD_LS_BORDER_COLOR_ALPHA
+0x0000A464 TD_CS_BORDER_COLOR_INDEX
+0x0000A468 TD_CS_BORDER_COLOR_RED
+0x0000A46C TD_CS_BORDER_COLOR_GREEN
+0x0000A470 TD_CS_BORDER_COLOR_BLUE
+0x0000A474 TD_CS_BORDER_COLOR_ALPHA
+0x00028000 DB_RENDER_CONTROL
+0x00028004 DB_COUNT_CONTROL
+0x0002800C DB_RENDER_OVERRIDE
+0x00028010 DB_RENDER_OVERRIDE2
+0x00028028 DB_STENCIL_CLEAR
+0x0002802C DB_DEPTH_CLEAR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028230 PA_SC_EDGERULE
+0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x00028354 SX_SURFACE_SYNC
+0x0002835C SX_SCATTER_EXPORT_SIZE
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028408 VGT_INDX_OFFSET
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028414 CB_BLEND_RED
+0x00028418 CB_BLEND_GREEN
+0x0002841C CB_BLEND_BLUE
+0x00028420 CB_BLEND_ALPHA
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028438 SX_ALPHA_REF
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x000285BC PA_CL_UCP_0_X
+0x000285C0 PA_CL_UCP_0_Y
+0x000285C4 PA_CL_UCP_0_Z
+0x000285C8 PA_CL_UCP_0_W
+0x000285CC PA_CL_UCP_1_X
+0x000285D0 PA_CL_UCP_1_Y
+0x000285D4 PA_CL_UCP_1_Z
+0x000285D8 PA_CL_UCP_1_W
+0x000285DC PA_CL_UCP_2_X
+0x000285E0 PA_CL_UCP_2_Y
+0x000285E4 PA_CL_UCP_2_Z
+0x000285E8 PA_CL_UCP_2_W
+0x000285EC PA_CL_UCP_3_X
+0x000285F0 PA_CL_UCP_3_Y
+0x000285F4 PA_CL_UCP_3_Z
+0x000285F8 PA_CL_UCP_3_W
+0x000285FC PA_CL_UCP_4_X
+0x00028600 PA_CL_UCP_4_Y
+0x00028604 PA_CL_UCP_4_Z
+0x00028608 PA_CL_UCP_4_W
+0x0002860C PA_CL_UCP_5_X
+0x00028610 PA_CL_UCP_5_Y
+0x00028614 PA_CL_UCP_5_Z
+0x00028618 PA_CL_UCP_5_W
+0x0002861C SPI_VS_OUT_ID_0
+0x00028620 SPI_VS_OUT_ID_1
+0x00028624 SPI_VS_OUT_ID_2
+0x00028628 SPI_VS_OUT_ID_3
+0x0002862C SPI_VS_OUT_ID_4
+0x00028630 SPI_VS_OUT_ID_5
+0x00028634 SPI_VS_OUT_ID_6
+0x00028638 SPI_VS_OUT_ID_7
+0x0002863C SPI_VS_OUT_ID_8
+0x00028640 SPI_VS_OUT_ID_9
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286C4 SPI_VS_OUT_CONFIG
+0x000286C8 SPI_THREAD_GROUPING
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286D4 SPI_INTERP_CONTROL_0
+0x000286D8 SPI_INPUT_Z
+0x000286DC SPI_FOG_CNTL
+0x000286E0 SPI_BARYC_CNTL
+0x000286E4 SPI_PS_IN_CONTROL_2
+0x000286E8 SPI_COMPUTE_INPUT_CNTL
+0x000286EC SPI_COMPUTE_NUM_THREAD_X
+0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
+0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
+0x000286F8 SPI_GPR_MGMT
+0x000286FC SPI_LDS_MGMT
+0x00028700 SPI_STACK_MGMT
+0x00028704 SPI_WAVE_MGMT_1
+0x00028708 SPI_WAVE_MGMT_2
+0x00028720 GDS_ADDR_BASE
+0x00028724 GDS_ADDR_SIZE
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x000287CC CS_COPY_STATE
+0x000287D0 GFX_COPY_STATE
+0x000287D4 PA_CL_POINT_X_RAD
+0x000287D8 PA_CL_POINT_Y_RAD
+0x000287DC PA_CL_POINT_SIZE
+0x000287E0 PA_CL_POINT_CULL_RAD
+0x00028808 CB_COLOR_CONTROL
+0x0002880C DB_SHADER_CONTROL
+0x00028810 PA_CL_CLIP_CNTL
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028820 PA_CL_NANINF_CNTL
+0x00028824 PA_SU_LINE_STIPPLE_CNTL
+0x00028828 PA_SU_LINE_STIPPLE_SCALE
+0x0002882C PA_SU_PRIM_FILTER_CNTL
+0x00028844 SQ_PGM_RESOURCES_PS
+0x00028848 SQ_PGM_RESOURCES_2_PS
+0x0002884C SQ_PGM_EXPORTS_PS
+0x00028860 SQ_PGM_RESOURCES_VS
+0x00028864 SQ_PGM_RESOURCES_2_VS
+0x00028878 SQ_PGM_RESOURCES_GS
+0x0002887C SQ_PGM_RESOURCES_2_GS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x00028894 SQ_PGM_RESOURCES_2_ES
+0x000288A8 SQ_PGM_RESOURCES_FS
+0x000288BC SQ_PGM_RESOURCES_HS
+0x000288C0 SQ_PGM_RESOURCES_2_HS
+0x000288D4 SQ_PGM_RESOURCES_LS
+0x000288D8 SQ_PGM_RESOURCES_2_LS
+0x000288E8 SQ_LDS_ALLOC
+0x000288EC SQ_LDS_ALLOC_PS
+0x000288F0 SQ_VTX_SEMANTIC_CLEAR
+0x00028A00 PA_SU_POINT_SIZE
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A2C VGT_GROUP_DECR
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A48 PA_SC_MODE_CNTL_0
+0x00028A4C PA_SC_MODE_CNTL_1
+0x00028A50 VGT_ENHANCE
+0x00028A54 VGT_GS_PER_ES
+0x00028A58 VGT_ES_PER_GS
+0x00028A5C VGT_GS_PER_VS
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028A70 IA_ENHANCE
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028AA8 IA_MULTI_VGT_PARAM
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x00028AC0 DB_SRESULTS_COMPARE_STATE0
+0x00028AC4 DB_SRESULTS_COMPARE_STATE1
+0x00028AC8 DB_PRELOAD_CONTROL
+0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
+0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
+0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
+0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
+0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+0x00028B38 VGT_GS_MAX_VERT_OUT
+0x00028B54 VGT_SHADER_STAGES_EN
+0x00028B58 VGT_LS_HS_CONFIG
+0x00028B6C VGT_TF_PARAM
+0x00028B70 DB_ALPHA_TO_MASK
+0x00028B74 VGT_DISPATCH_INITIATOR
+0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028B7C PA_SU_POLY_OFFSET_CLAMP
+0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028BD4 PA_SC_CENTROID_PRIORITY_0
+0x00028BD8 PA_SC_CENTROID_PRIORITY_1
+0x00028BDC PA_SC_LINE_CNTL
+0x00028BE4 PA_SU_VTX_CNTL
+0x00028BE8 PA_CL_GB_VERT_CLIP_ADJ
+0x00028BEC PA_CL_GB_VERT_DISC_ADJ
+0x00028BF0 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028BF4 PA_CL_GB_HORZ_DISC_ADJ
+0x00028BF8 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_0
+0x00028BFC PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_1
+0x00028C00 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_2
+0x00028C04 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_3
+0x00028C08 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_0
+0x00028C0C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_1
+0x00028C10 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_2
+0x00028C14 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_3
+0x00028C18 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_0
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_1
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_2
+0x00028C24 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_3
+0x00028C28 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_0
+0x00028C2C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_1
+0x00028C30 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_2
+0x00028C34 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_3
+0x00028C38 PA_SC_AA_MASK_X0_Y0_X1_Y0
+0x00028C3C PA_SC_AA_MASK_X0_Y1_X1_Y1
+0x00028C78 CB_COLOR0_DIM
+0x00028CB4 CB_COLOR1_DIM
+0x00028CF0 CB_COLOR2_DIM
+0x00028D2C CB_COLOR3_DIM
+0x00028D68 CB_COLOR4_DIM
+0x00028DA4 CB_COLOR5_DIM
+0x00028DE0 CB_COLOR6_DIM
+0x00028E1C CB_COLOR7_DIM
+0x00028E58 CB_COLOR8_DIM
+0x00028E74 CB_COLOR9_DIM
+0x00028E90 CB_COLOR10_DIM
+0x00028EAC CB_COLOR11_DIM
+0x00028C8C CB_COLOR0_CLEAR_WORD0
+0x00028C90 CB_COLOR0_CLEAR_WORD1
+0x00028C94 CB_COLOR0_CLEAR_WORD2
+0x00028C98 CB_COLOR0_CLEAR_WORD3
+0x00028CC8 CB_COLOR1_CLEAR_WORD0
+0x00028CCC CB_COLOR1_CLEAR_WORD1
+0x00028CD0 CB_COLOR1_CLEAR_WORD2
+0x00028CD4 CB_COLOR1_CLEAR_WORD3
+0x00028D04 CB_COLOR2_CLEAR_WORD0
+0x00028D08 CB_COLOR2_CLEAR_WORD1
+0x00028D0C CB_COLOR2_CLEAR_WORD2
+0x00028D10 CB_COLOR2_CLEAR_WORD3
+0x00028D40 CB_COLOR3_CLEAR_WORD0
+0x00028D44 CB_COLOR3_CLEAR_WORD1
+0x00028D48 CB_COLOR3_CLEAR_WORD2
+0x00028D4C CB_COLOR3_CLEAR_WORD3
+0x00028D7C CB_COLOR4_CLEAR_WORD0
+0x00028D80 CB_COLOR4_CLEAR_WORD1
+0x00028D84 CB_COLOR4_CLEAR_WORD2
+0x00028D88 CB_COLOR4_CLEAR_WORD3
+0x00028DB8 CB_COLOR5_CLEAR_WORD0
+0x00028DBC CB_COLOR5_CLEAR_WORD1
+0x00028DC0 CB_COLOR5_CLEAR_WORD2
+0x00028DC4 CB_COLOR5_CLEAR_WORD3
+0x00028DF4 CB_COLOR6_CLEAR_WORD0
+0x00028DF8 CB_COLOR6_CLEAR_WORD1
+0x00028DFC CB_COLOR6_CLEAR_WORD2
+0x00028E00 CB_COLOR6_CLEAR_WORD3
+0x00028E30 CB_COLOR7_CLEAR_WORD0
+0x00028E34 CB_COLOR7_CLEAR_WORD1
+0x00028E38 CB_COLOR7_CLEAR_WORD2
+0x00028E3C CB_COLOR7_CLEAR_WORD3
+0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
+0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
+0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
+0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
+0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
+0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
+0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
+0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
+0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
+0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
+0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
+0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
+0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
+0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
+0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
+0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
+0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
+0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
+0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
+0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
+0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
+0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
+0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
+0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
+0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
+0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
+0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
+0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
+0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
+0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
+0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
+0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
+0x0003CFF0 SQ_VTX_BASE_VTX_LOC
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x0003FF00 SQ_TEX_SAMPLER_CLEAR
+0x0003FF04 SQ_TEX_RESOURCE_CLEAR
+0x0003FF08 SQ_LOOP_BOOL_CLEAR
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
new file mode 100644
index 0000000..b912a37
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -0,0 +1,644 @@
+evergreen 0x9400
+0x0000802C GRBM_GFX_INDEX
+0x00008040 WAIT_UNTIL
+0x00008044 WAIT_UNTIL_POLL_CNTL
+0x00008048 WAIT_UNTIL_POLL_MASK
+0x0000804c WAIT_UNTIL_POLL_REFDATA
+0x000084FC CP_STRMOUT_CNTL
+0x000085F0 CP_COHER_CNTL
+0x000085F4 CP_COHER_SIZE
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x000088C4 VGT_CACHE_INVALIDATION
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00008958 VGT_PRIMITIVE_TYPE
+0x0000895C VGT_INDEX_TYPE
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00008990 VGT_COMPUTE_DIM_X
+0x00008994 VGT_COMPUTE_DIM_Y
+0x00008998 VGT_COMPUTE_DIM_Z
+0x0000899C VGT_COMPUTE_START_X
+0x000089A0 VGT_COMPUTE_START_Y
+0x000089A4 VGT_COMPUTE_START_Z
+0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
+0x00008A14 PA_CL_ENHANCE
+0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00008BF0 PA_SC_ENHANCE
+0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008D90 SQ_DYN_GPR_OPTIMIZATION
+0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN
+0x00008D98 SQ_DYN_GPR_THREAD_LIMIT
+0x00008D9C SQ_DYN_GPR_LDS_LIMIT
+0x00008C00 SQ_CONFIG
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C08 SQ_GPR_RESOURCE_MGMT_2
+0x00008C0C SQ_GPR_RESOURCE_MGMT_3
+0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
+0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
+0x00008C18 SQ_THREAD_RESOURCE_MGMT
+0x00008C1C SQ_THREAD_RESOURCE_MGMT_2
+0x00008C20 SQ_STACK_RESOURCE_MGMT_1
+0x00008C24 SQ_STACK_RESOURCE_MGMT_2
+0x00008C28 SQ_STACK_RESOURCE_MGMT_3
+0x00008DF8 SQ_CONST_MEM_BASE
+0x00008E20 SQ_STATIC_THREAD_MGMT_1
+0x00008E24 SQ_STATIC_THREAD_MGMT_2
+0x00008E28 SQ_STATIC_THREAD_MGMT_3
+0x00008E2C SQ_LDS_RESOURCE_MGMT
+0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
+0x00009014 SX_MEMORY_EXPORT_SIZE
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x00009508 TA_CNTL_AUX
+0x00009700 VC_CNTL
+0x00009714 VC_ENHANCE
+0x00009830 DB_DEBUG
+0x00009834 DB_DEBUG2
+0x00009838 DB_DEBUG3
+0x0000983C DB_DEBUG4
+0x00009854 DB_WATERMARKS
+0x0000A400 TD_PS_BORDER_COLOR_INDEX
+0x0000A404 TD_PS_BORDER_COLOR_RED
+0x0000A408 TD_PS_BORDER_COLOR_GREEN
+0x0000A40C TD_PS_BORDER_COLOR_BLUE
+0x0000A410 TD_PS_BORDER_COLOR_ALPHA
+0x0000A414 TD_VS_BORDER_COLOR_INDEX
+0x0000A418 TD_VS_BORDER_COLOR_RED
+0x0000A41C TD_VS_BORDER_COLOR_GREEN
+0x0000A420 TD_VS_BORDER_COLOR_BLUE
+0x0000A424 TD_VS_BORDER_COLOR_ALPHA
+0x0000A428 TD_GS_BORDER_COLOR_INDEX
+0x0000A42C TD_GS_BORDER_COLOR_RED
+0x0000A430 TD_GS_BORDER_COLOR_GREEN
+0x0000A434 TD_GS_BORDER_COLOR_BLUE
+0x0000A438 TD_GS_BORDER_COLOR_ALPHA
+0x0000A43C TD_HS_BORDER_COLOR_INDEX
+0x0000A440 TD_HS_BORDER_COLOR_RED
+0x0000A444 TD_HS_BORDER_COLOR_GREEN
+0x0000A448 TD_HS_BORDER_COLOR_BLUE
+0x0000A44C TD_HS_BORDER_COLOR_ALPHA
+0x0000A450 TD_LS_BORDER_COLOR_INDEX
+0x0000A454 TD_LS_BORDER_COLOR_RED
+0x0000A458 TD_LS_BORDER_COLOR_GREEN
+0x0000A45C TD_LS_BORDER_COLOR_BLUE
+0x0000A460 TD_LS_BORDER_COLOR_ALPHA
+0x0000A464 TD_CS_BORDER_COLOR_INDEX
+0x0000A468 TD_CS_BORDER_COLOR_RED
+0x0000A46C TD_CS_BORDER_COLOR_GREEN
+0x0000A470 TD_CS_BORDER_COLOR_BLUE
+0x0000A474 TD_CS_BORDER_COLOR_ALPHA
+0x00028000 DB_RENDER_CONTROL
+0x00028004 DB_COUNT_CONTROL
+0x0002800C DB_RENDER_OVERRIDE
+0x00028010 DB_RENDER_OVERRIDE2
+0x00028028 DB_STENCIL_CLEAR
+0x0002802C DB_DEPTH_CLEAR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028230 PA_SC_EDGERULE
+0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x00028354 SX_SURFACE_SYNC
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028408 VGT_INDX_OFFSET
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028414 CB_BLEND_RED
+0x00028418 CB_BLEND_GREEN
+0x0002841C CB_BLEND_BLUE
+0x00028420 CB_BLEND_ALPHA
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028438 SX_ALPHA_REF
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x000285BC PA_CL_UCP_0_X
+0x000285C0 PA_CL_UCP_0_Y
+0x000285C4 PA_CL_UCP_0_Z
+0x000285C8 PA_CL_UCP_0_W
+0x000285CC PA_CL_UCP_1_X
+0x000285D0 PA_CL_UCP_1_Y
+0x000285D4 PA_CL_UCP_1_Z
+0x000285D8 PA_CL_UCP_1_W
+0x000285DC PA_CL_UCP_2_X
+0x000285E0 PA_CL_UCP_2_Y
+0x000285E4 PA_CL_UCP_2_Z
+0x000285E8 PA_CL_UCP_2_W
+0x000285EC PA_CL_UCP_3_X
+0x000285F0 PA_CL_UCP_3_Y
+0x000285F4 PA_CL_UCP_3_Z
+0x000285F8 PA_CL_UCP_3_W
+0x000285FC PA_CL_UCP_4_X
+0x00028600 PA_CL_UCP_4_Y
+0x00028604 PA_CL_UCP_4_Z
+0x00028608 PA_CL_UCP_4_W
+0x0002860C PA_CL_UCP_5_X
+0x00028610 PA_CL_UCP_5_Y
+0x00028614 PA_CL_UCP_5_Z
+0x00028618 PA_CL_UCP_5_W
+0x0002861C SPI_VS_OUT_ID_0
+0x00028620 SPI_VS_OUT_ID_1
+0x00028624 SPI_VS_OUT_ID_2
+0x00028628 SPI_VS_OUT_ID_3
+0x0002862C SPI_VS_OUT_ID_4
+0x00028630 SPI_VS_OUT_ID_5
+0x00028634 SPI_VS_OUT_ID_6
+0x00028638 SPI_VS_OUT_ID_7
+0x0002863C SPI_VS_OUT_ID_8
+0x00028640 SPI_VS_OUT_ID_9
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286C4 SPI_VS_OUT_CONFIG
+0x000286C8 SPI_THREAD_GROUPING
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286D4 SPI_INTERP_CONTROL_0
+0x000286D8 SPI_INPUT_Z
+0x000286DC SPI_FOG_CNTL
+0x000286E0 SPI_BARYC_CNTL
+0x000286E4 SPI_PS_IN_CONTROL_2
+0x000286E8 SPI_COMPUTE_INPUT_CNTL
+0x000286EC SPI_COMPUTE_NUM_THREAD_X
+0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
+0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
+0x00028720 GDS_ADDR_BASE
+0x00028724 GDS_ADDR_SIZE
+0x00028728 GDS_ORDERED_WAVE_PER_SE
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x000287CC CS_COPY_STATE
+0x000287D0 GFX_COPY_STATE
+0x000287D4 PA_CL_POINT_X_RAD
+0x000287D8 PA_CL_POINT_Y_RAD
+0x000287DC PA_CL_POINT_SIZE
+0x000287E0 PA_CL_POINT_CULL_RAD
+0x00028808 CB_COLOR_CONTROL
+0x0002880C DB_SHADER_CONTROL
+0x00028810 PA_CL_CLIP_CNTL
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028820 PA_CL_NANINF_CNTL
+0x00028824 PA_SU_LINE_STIPPLE_CNTL
+0x00028828 PA_SU_LINE_STIPPLE_SCALE
+0x0002882C PA_SU_PRIM_FILTER_CNTL
+0x00028838 SQ_DYN_GPR_RESOURCE_LIMIT_1
+0x00028844 SQ_PGM_RESOURCES_PS
+0x00028848 SQ_PGM_RESOURCES_2_PS
+0x0002884C SQ_PGM_EXPORTS_PS
+0x00028860 SQ_PGM_RESOURCES_VS
+0x00028864 SQ_PGM_RESOURCES_2_VS
+0x00028878 SQ_PGM_RESOURCES_GS
+0x0002887C SQ_PGM_RESOURCES_2_GS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x00028894 SQ_PGM_RESOURCES_2_ES
+0x000288A8 SQ_PGM_RESOURCES_FS
+0x000288BC SQ_PGM_RESOURCES_HS
+0x000288C0 SQ_PGM_RESOURCES_2_HS
+0x000288D4 SQ_PGM_RESOURCES_LS
+0x000288D8 SQ_PGM_RESOURCES_2_LS
+0x000288E8 SQ_LDS_ALLOC
+0x000288EC SQ_LDS_ALLOC_PS
+0x000288F0 SQ_VTX_SEMANTIC_CLEAR
+0x00028A00 PA_SU_POINT_SIZE
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A2C VGT_GROUP_DECR
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A48 PA_SC_MODE_CNTL_0
+0x00028A4C PA_SC_MODE_CNTL_1
+0x00028A50 VGT_ENHANCE
+0x00028A54 VGT_GS_PER_ES
+0x00028A58 VGT_ES_PER_GS
+0x00028A5C VGT_GS_PER_VS
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x00028AC0 DB_SRESULTS_COMPARE_STATE0
+0x00028AC4 DB_SRESULTS_COMPARE_STATE1
+0x00028AC8 DB_PRELOAD_CONTROL
+0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
+0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
+0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
+0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
+0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+0x00028B38 VGT_GS_MAX_VERT_OUT
+0x00028B54 VGT_SHADER_STAGES_EN
+0x00028B58 VGT_LS_HS_CONFIG
+0x00028B5C VGT_LS_SIZE
+0x00028B60 VGT_HS_SIZE
+0x00028B64 VGT_LS_HS_ALLOC
+0x00028B68 VGT_HS_PATCH_CONST
+0x00028B6C VGT_TF_PARAM
+0x00028B70 DB_ALPHA_TO_MASK
+0x00028B74 VGT_DISPATCH_INITIATOR
+0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028B7C PA_SU_POLY_OFFSET_CLAMP
+0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028C00 PA_SC_LINE_CNTL
+0x00028C08 PA_SU_VTX_CNTL
+0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
+0x00028C10 PA_CL_GB_VERT_DISC_ADJ
+0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028C18 PA_CL_GB_HORZ_DISC_ADJ
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_0
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_1
+0x00028C24 PA_SC_AA_SAMPLE_LOCS_2
+0x00028C28 PA_SC_AA_SAMPLE_LOCS_3
+0x00028C2C PA_SC_AA_SAMPLE_LOCS_4
+0x00028C30 PA_SC_AA_SAMPLE_LOCS_5
+0x00028C34 PA_SC_AA_SAMPLE_LOCS_6
+0x00028C38 PA_SC_AA_SAMPLE_LOCS_7
+0x00028C3C PA_SC_AA_MASK
+0x00028C78 CB_COLOR0_DIM
+0x00028CB4 CB_COLOR1_DIM
+0x00028CF0 CB_COLOR2_DIM
+0x00028D2C CB_COLOR3_DIM
+0x00028D68 CB_COLOR4_DIM
+0x00028DA4 CB_COLOR5_DIM
+0x00028DE0 CB_COLOR6_DIM
+0x00028E1C CB_COLOR7_DIM
+0x00028E58 CB_COLOR8_DIM
+0x00028E74 CB_COLOR9_DIM
+0x00028E90 CB_COLOR10_DIM
+0x00028EAC CB_COLOR11_DIM
+0x00028C8C CB_COLOR0_CLEAR_WORD0
+0x00028C90 CB_COLOR0_CLEAR_WORD1
+0x00028C94 CB_COLOR0_CLEAR_WORD2
+0x00028C98 CB_COLOR0_CLEAR_WORD3
+0x00028CC8 CB_COLOR1_CLEAR_WORD0
+0x00028CCC CB_COLOR1_CLEAR_WORD1
+0x00028CD0 CB_COLOR1_CLEAR_WORD2
+0x00028CD4 CB_COLOR1_CLEAR_WORD3
+0x00028D04 CB_COLOR2_CLEAR_WORD0
+0x00028D08 CB_COLOR2_CLEAR_WORD1
+0x00028D0C CB_COLOR2_CLEAR_WORD2
+0x00028D10 CB_COLOR2_CLEAR_WORD3
+0x00028D40 CB_COLOR3_CLEAR_WORD0
+0x00028D44 CB_COLOR3_CLEAR_WORD1
+0x00028D48 CB_COLOR3_CLEAR_WORD2
+0x00028D4C CB_COLOR3_CLEAR_WORD3
+0x00028D7C CB_COLOR4_CLEAR_WORD0
+0x00028D80 CB_COLOR4_CLEAR_WORD1
+0x00028D84 CB_COLOR4_CLEAR_WORD2
+0x00028D88 CB_COLOR4_CLEAR_WORD3
+0x00028DB8 CB_COLOR5_CLEAR_WORD0
+0x00028DBC CB_COLOR5_CLEAR_WORD1
+0x00028DC0 CB_COLOR5_CLEAR_WORD2
+0x00028DC4 CB_COLOR5_CLEAR_WORD3
+0x00028DF4 CB_COLOR6_CLEAR_WORD0
+0x00028DF8 CB_COLOR6_CLEAR_WORD1
+0x00028DFC CB_COLOR6_CLEAR_WORD2
+0x00028E00 CB_COLOR6_CLEAR_WORD3
+0x00028E30 CB_COLOR7_CLEAR_WORD0
+0x00028E34 CB_COLOR7_CLEAR_WORD1
+0x00028E38 CB_COLOR7_CLEAR_WORD2
+0x00028E3C CB_COLOR7_CLEAR_WORD3
+0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
+0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
+0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
+0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
+0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
+0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
+0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
+0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
+0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
+0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
+0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
+0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
+0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
+0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
+0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
+0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
+0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
+0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
+0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
+0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
+0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
+0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
+0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
+0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
+0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
+0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
+0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
+0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
+0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
+0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
+0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
+0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
+0x0003CFF0 SQ_VTX_BASE_VTX_LOC
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x0003FF00 SQ_TEX_SAMPLER_CLEAR
+0x0003FF04 SQ_TEX_RESOURCE_CLEAR
+0x0003FF08 SQ_LOOP_BOOL_CLEAR
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r100 b/drivers/gpu/drm/radeon/reg_srcs/r100
new file mode 100644
index 0000000..f7ee062
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r100
@@ -0,0 +1,105 @@
+r100 0x3294
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1810 FOG_3D_TABLE_START
+0x1814 FOG_3D_TABLE_END
+0x1a14 FOG_TABLE_INDEX
+0x1a18 FOG_TABLE_DATA
+0x1c14 PP_MISC
+0x1c18 PP_FOG_COLOR
+0x1c1c RE_SOLID_COLOR
+0x1c20 RB3D_BLENDCNTL
+0x1c4c SE_CNTL
+0x1c50 SE_COORD_FMT
+0x1c60 PP_TXCBLEND_0
+0x1c64 PP_TXABLEND_0
+0x1c68 PP_TFACTOR_0
+0x1c78 PP_TXCBLEND_1
+0x1c7c PP_TXABLEND_1
+0x1c80 PP_TFACTOR_1
+0x1c90 PP_TXCBLEND_2
+0x1c94 PP_TXABLEND_2
+0x1c98 PP_TFACTOR_2
+0x1cc8 RE_STIPPLE_ADDR
+0x1ccc RE_STIPPLE_DATA
+0x1cd0 RE_LINE_PATTERN
+0x1cd4 RE_LINE_STATE
+0x1d40 PP_BORDER_COLOR0
+0x1d44 PP_BORDER_COLOR1
+0x1d48 PP_BORDER_COLOR2
+0x1d7c RB3D_STENCILREFMASK
+0x1d80 RB3D_ROPCNTL
+0x1d84 RB3D_PLANEMASK
+0x1d98 VAP_VPORT_XSCALE
+0x1d9C VAP_VPORT_XOFFSET
+0x1da0 VAP_VPORT_YSCALE
+0x1da4 VAP_VPORT_YOFFSET
+0x1da8 VAP_VPORT_ZSCALE
+0x1dac VAP_VPORT_ZOFFSET
+0x1db0 SE_ZBIAS_FACTOR
+0x1db4 SE_ZBIAS_CONSTANT
+0x1db8 SE_LINE_WIDTH
+0x2140 SE_CNTL_STATUS
+0x2200 SE_TCL_VECTOR_INDX_REG
+0x2204 SE_TCL_VECTOR_DATA_REG
+0x2208 SE_TCL_SCALAR_INDX_REG
+0x220c SE_TCL_SCALAR_DATA_REG
+0x2210 SE_TCL_MATERIAL_EMISSIVE_RED
+0x2214 SE_TCL_MATERIAL_EMISSIVE_GREEN
+0x2218 SE_TCL_MATERIAL_EMISSIVE_BLUE
+0x221c SE_TCL_MATERIAL_EMISSIVE_ALPHA
+0x2220 SE_TCL_MATERIAL_AMBIENT_RED
+0x2224 SE_TCL_MATERIAL_AMBIENT_GREEN
+0x2228 SE_TCL_MATERIAL_AMBIENT_BLUE
+0x222c SE_TCL_MATERIAL_AMBIENT_ALPHA
+0x2230 SE_TCL_MATERIAL_DIFFUSE_RED
+0x2234 SE_TCL_MATERIAL_DIFFUSE_GREEN
+0x2238 SE_TCL_MATERIAL_DIFFUSE_BLUE
+0x223c SE_TCL_MATERIAL_DIFFUSE_ALPHA
+0x2240 SE_TCL_MATERIAL_SPECULAR_RED
+0x2244 SE_TCL_MATERIAL_SPECULAR_GREEN
+0x2248 SE_TCL_MATERIAL_SPECULAR_BLUE
+0x224c SE_TCL_MATERIAL_SPECULAR_ALPHA
+0x2250 SE_TCL_SHININESS
+0x2254 SE_TCL_OUTPUT_VTX_FMT
+0x2258 SE_TCL_OUTPUT_VTX_SEL
+0x225c SE_TCL_MATRIX_SELECT_0
+0x2260 SE_TCL_MATRIX_SELECT_1
+0x2264 SE_TCL_UCP_VERT_BLEND_CNTL
+0x2268 SE_TCL_TEXTURE_PROC_CTL
+0x226c SE_TCL_LIGHT_MODEL_CTL
+0x2270 SE_TCL_PER_LIGHT_CTL_0
+0x2274 SE_TCL_PER_LIGHT_CTL_1
+0x2278 SE_TCL_PER_LIGHT_CTL_2
+0x227c SE_TCL_PER_LIGHT_CTL_3
+0x2284 SE_TCL_STATE_FLUSH
+0x26c0 RE_TOP_LEFT
+0x26c4 RE_MISC
+0x3290 RB3D_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r200 b/drivers/gpu/drm/radeon/reg_srcs/r200
new file mode 100644
index 0000000..c29ac43
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r200
@@ -0,0 +1,186 @@
+r200 0x3294
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1c14 PP_MISC
+0x1c18 PP_FOG_COLOR
+0x1c1c RE_SOLID_COLOR
+0x1c20 RB3D_BLENDCNTL
+0x1c4c SE_CNTL
+0x1c50 RE_CNTL
+0x1cc8 RE_STIPPLE_ADDR
+0x1ccc RE_STIPPLE_DATA
+0x1cd0 RE_LINE_PATTERN
+0x1cd4 RE_LINE_STATE
+0x1cd8 RE_SCISSOR_TL_0
+0x1cdc RE_SCISSOR_BR_0
+0x1ce0 RE_SCISSOR_TL_1
+0x1ce4 RE_SCISSOR_BR_1
+0x1ce8 RE_SCISSOR_TL_2
+0x1cec RE_SCISSOR_BR_2
+0x1d60 RB3D_DEPTHXY_OFFSET
+0x1d7c RB3D_STENCILREFMASK
+0x1d80 RB3D_ROPCNTL
+0x1d84 RB3D_PLANEMASK
+0x1d98 VAP_VPORT_XSCALE
+0x1d9c VAP_VPORT_XOFFSET
+0x1da0 VAP_VPORT_YSCALE
+0x1da4 VAP_VPORT_YOFFSET
+0x1da8 VAP_VPORT_ZSCALE
+0x1dac VAP_VPORT_ZOFFSET
+0x1db0 SE_ZBIAS_FACTOR
+0x1db4 SE_ZBIAS_CONSTANT
+0x1db8 SE_LINE_WIDTH
+0x2080 SE_VAP_CNTL
+0x2090 SE_TCL_OUTPUT_VTX_FMT_0
+0x2094 SE_TCL_OUTPUT_VTX_FMT_1
+0x20b0 SE_VTE_CNTL
+0x2140 SE_CNTL_STATUS
+0x2180 SE_VTX_STATE_CNTL
+0x2200 SE_TCL_VECTOR_INDX_REG
+0x2204 SE_TCL_VECTOR_DATA_REG
+0x2208 SE_TCL_SCALAR_INDX_REG
+0x220c SE_TCL_SCALAR_DATA_REG
+0x2230 SE_TCL_MATRIX_SEL_0
+0x2234 SE_TCL_MATRIX_SEL_1
+0x2238 SE_TCL_MATRIX_SEL_2
+0x223c SE_TCL_MATRIX_SEL_3
+0x2240 SE_TCL_MATRIX_SEL_4
+0x2250 SE_TCL_OUTPUT_VTX_COMP_SEL
+0x2254 SE_TCL_INPUT_VTX_VECTOR_ADDR_0
+0x2258 SE_TCL_INPUT_VTX_VECTOR_ADDR_1
+0x225c SE_TCL_INPUT_VTX_VECTOR_ADDR_2
+0x2260 SE_TCL_INPUT_VTX_VECTOR_ADDR_3
+0x2268 SE_TCL_LIGHT_MODEL_CTL_0
+0x226c SE_TCL_LIGHT_MODEL_CTL_1
+0x2270 SE_TCL_PER_LIGHT_CTL_0
+0x2274 SE_TCL_PER_LIGHT_CTL_1
+0x2278 SE_TCL_PER_LIGHT_CTL_2
+0x227c SE_TCL_PER_LIGHT_CTL_3
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x22a8 SE_TCL_TEX_PROC_CTL_2
+0x22ac SE_TCL_TEX_PROC_CTL_3
+0x22b0 SE_TCL_TEX_PROC_CTL_0
+0x22b4 SE_TCL_TEX_PROC_CTL_1
+0x22b8 SE_TCL_TEX_CYL_WRAP_CTL
+0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL
+0x22c4 SE_TCL_POINT_SPRITE_CNTL
+0x22d0 SE_PVS_CNTL
+0x22d4 SE_PVS_CONST_CNTL
+0x2648 RE_POINTSIZE
+0x26c0 RE_TOP_LEFT
+0x26c4 RE_MISC
+0x26f0 RE_AUX_SCISSOR_CNTL
+0x2c14 PP_BORDER_COLOR_0
+0x2c34 PP_BORDER_COLOR_1
+0x2c54 PP_BORDER_COLOR_2
+0x2c74 PP_BORDER_COLOR_3
+0x2c94 PP_BORDER_COLOR_4
+0x2cb4 PP_BORDER_COLOR_5
+0x2cc4 PP_CNTL_X
+0x2cf8 PP_TRI_PERF
+0x2cfc PP_PERF_CNTL
+0x2d9c PP_TAM_DEBUG3
+0x2ee0 PP_TFACTOR_0
+0x2ee4 PP_TFACTOR_1
+0x2ee8 PP_TFACTOR_2
+0x2eec PP_TFACTOR_3
+0x2ef0 PP_TFACTOR_4
+0x2ef4 PP_TFACTOR_5
+0x2ef8 PP_TFACTOR_6
+0x2efc PP_TFACTOR_7
+0x2f00 PP_TXCBLEND_0
+0x2f04 PP_TXCBLEND2_0
+0x2f08 PP_TXABLEND_0
+0x2f0c PP_TXABLEND2_0
+0x2f10 PP_TXCBLEND_1
+0x2f14 PP_TXCBLEND2_1
+0x2f18 PP_TXABLEND_1
+0x2f1c PP_TXABLEND2_1
+0x2f20 PP_TXCBLEND_2
+0x2f24 PP_TXCBLEND2_2
+0x2f28 PP_TXABLEND_2
+0x2f2c PP_TXABLEND2_2
+0x2f30 PP_TXCBLEND_3
+0x2f34 PP_TXCBLEND2_3
+0x2f38 PP_TXABLEND_3
+0x2f3c PP_TXABLEND2_3
+0x2f40 PP_TXCBLEND_4
+0x2f44 PP_TXCBLEND2_4
+0x2f48 PP_TXABLEND_4
+0x2f4c PP_TXABLEND2_4
+0x2f50 PP_TXCBLEND_5
+0x2f54 PP_TXCBLEND2_5
+0x2f58 PP_TXABLEND_5
+0x2f5c PP_TXABLEND2_5
+0x2f60 PP_TXCBLEND_6
+0x2f64 PP_TXCBLEND2_6
+0x2f68 PP_TXABLEND_6
+0x2f6c PP_TXABLEND2_6
+0x2f70 PP_TXCBLEND_7
+0x2f74 PP_TXCBLEND2_7
+0x2f78 PP_TXABLEND_7
+0x2f7c PP_TXABLEND2_7
+0x2f80 PP_TXCBLEND_8
+0x2f84 PP_TXCBLEND2_8
+0x2f88 PP_TXABLEND_8
+0x2f8c PP_TXABLEND2_8
+0x2f90 PP_TXCBLEND_9
+0x2f94 PP_TXCBLEND2_9
+0x2f98 PP_TXABLEND_9
+0x2f9c PP_TXABLEND2_9
+0x2fa0 PP_TXCBLEND_10
+0x2fa4 PP_TXCBLEND2_10
+0x2fa8 PP_TXABLEND_10
+0x2fac PP_TXABLEND2_10
+0x2fb0 PP_TXCBLEND_11
+0x2fb4 PP_TXCBLEND2_11
+0x2fb8 PP_TXABLEND_11
+0x2fbc PP_TXABLEND2_11
+0x2fc0 PP_TXCBLEND_12
+0x2fc4 PP_TXCBLEND2_12
+0x2fc8 PP_TXABLEND_12
+0x2fcc PP_TXABLEND2_12
+0x2fd0 PP_TXCBLEND_13
+0x2fd4 PP_TXCBLEND2_13
+0x2fd8 PP_TXABLEND_13
+0x2fdc PP_TXABLEND2_13
+0x2fe0 PP_TXCBLEND_14
+0x2fe4 PP_TXCBLEND2_14
+0x2fe8 PP_TXABLEND_14
+0x2fec PP_TXABLEND2_14
+0x2ff0 PP_TXCBLEND_15
+0x2ff4 PP_TXCBLEND2_15
+0x2ff8 PP_TXABLEND_15
+0x2ffc PP_TXABLEND2_15
+0x3218 RB3D_BLENCOLOR
+0x321c RB3D_ABLENDCNTL
+0x3220 RB3D_CBLENDCNTL
+0x3290 RB3D_ZPASS_DATA
+
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
new file mode 100644
index 0000000..e8a1786
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -0,0 +1,714 @@
+r300 0x4f60
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4310 RS_IP_0
+0x4314 RS_IP_1
+0x4318 RS_IP_2
+0x431C RS_IP_3
+0x4320 RS_IP_4
+0x4324 RS_IP_5
+0x4328 RS_IP_6
+0x432C RS_IP_7
+0x4330 RS_INST_0
+0x4334 RS_INST_1
+0x4338 RS_INST_2
+0x433C RS_INST_3
+0x4340 RS_INST_4
+0x4344 RS_INST_5
+0x4348 RS_INST_6
+0x434C RS_INST_7
+0x4350 RS_INST_8
+0x4354 RS_INST_9
+0x4358 RS_INST_10
+0x435C RS_INST_11
+0x4360 RS_INST_12
+0x4364 RS_INST_13
+0x4368 RS_INST_14
+0x436C RS_INST_15
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4608 US_CODE_OFFSET
+0x460C US_RESET
+0x4610 US_CODE_ADDR_0
+0x4614 US_CODE_ADDR_1
+0x4618 US_CODE_ADDR_2
+0x461C US_CODE_ADDR_3
+0x4620 US_TEX_INST_0
+0x4624 US_TEX_INST_1
+0x4628 US_TEX_INST_2
+0x462C US_TEX_INST_3
+0x4630 US_TEX_INST_4
+0x4634 US_TEX_INST_5
+0x4638 US_TEX_INST_6
+0x463C US_TEX_INST_7
+0x4640 US_TEX_INST_8
+0x4644 US_TEX_INST_9
+0x4648 US_TEX_INST_10
+0x464C US_TEX_INST_11
+0x4650 US_TEX_INST_12
+0x4654 US_TEX_INST_13
+0x4658 US_TEX_INST_14
+0x465C US_TEX_INST_15
+0x4660 US_TEX_INST_16
+0x4664 US_TEX_INST_17
+0x4668 US_TEX_INST_18
+0x466C US_TEX_INST_19
+0x4670 US_TEX_INST_20
+0x4674 US_TEX_INST_21
+0x4678 US_TEX_INST_22
+0x467C US_TEX_INST_23
+0x4680 US_TEX_INST_24
+0x4684 US_TEX_INST_25
+0x4688 US_TEX_INST_26
+0x468C US_TEX_INST_27
+0x4690 US_TEX_INST_28
+0x4694 US_TEX_INST_29
+0x4698 US_TEX_INST_30
+0x469C US_TEX_INST_31
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46C0 US_ALU_RGB_ADDR_0
+0x46C4 US_ALU_RGB_ADDR_1
+0x46C8 US_ALU_RGB_ADDR_2
+0x46CC US_ALU_RGB_ADDR_3
+0x46D0 US_ALU_RGB_ADDR_4
+0x46D4 US_ALU_RGB_ADDR_5
+0x46D8 US_ALU_RGB_ADDR_6
+0x46DC US_ALU_RGB_ADDR_7
+0x46E0 US_ALU_RGB_ADDR_8
+0x46E4 US_ALU_RGB_ADDR_9
+0x46E8 US_ALU_RGB_ADDR_10
+0x46EC US_ALU_RGB_ADDR_11
+0x46F0 US_ALU_RGB_ADDR_12
+0x46F4 US_ALU_RGB_ADDR_13
+0x46F8 US_ALU_RGB_ADDR_14
+0x46FC US_ALU_RGB_ADDR_15
+0x4700 US_ALU_RGB_ADDR_16
+0x4704 US_ALU_RGB_ADDR_17
+0x4708 US_ALU_RGB_ADDR_18
+0x470C US_ALU_RGB_ADDR_19
+0x4710 US_ALU_RGB_ADDR_20
+0x4714 US_ALU_RGB_ADDR_21
+0x4718 US_ALU_RGB_ADDR_22
+0x471C US_ALU_RGB_ADDR_23
+0x4720 US_ALU_RGB_ADDR_24
+0x4724 US_ALU_RGB_ADDR_25
+0x4728 US_ALU_RGB_ADDR_26
+0x472C US_ALU_RGB_ADDR_27
+0x4730 US_ALU_RGB_ADDR_28
+0x4734 US_ALU_RGB_ADDR_29
+0x4738 US_ALU_RGB_ADDR_30
+0x473C US_ALU_RGB_ADDR_31
+0x4740 US_ALU_RGB_ADDR_32
+0x4744 US_ALU_RGB_ADDR_33
+0x4748 US_ALU_RGB_ADDR_34
+0x474C US_ALU_RGB_ADDR_35
+0x4750 US_ALU_RGB_ADDR_36
+0x4754 US_ALU_RGB_ADDR_37
+0x4758 US_ALU_RGB_ADDR_38
+0x475C US_ALU_RGB_ADDR_39
+0x4760 US_ALU_RGB_ADDR_40
+0x4764 US_ALU_RGB_ADDR_41
+0x4768 US_ALU_RGB_ADDR_42
+0x476C US_ALU_RGB_ADDR_43
+0x4770 US_ALU_RGB_ADDR_44
+0x4774 US_ALU_RGB_ADDR_45
+0x4778 US_ALU_RGB_ADDR_46
+0x477C US_ALU_RGB_ADDR_47
+0x4780 US_ALU_RGB_ADDR_48
+0x4784 US_ALU_RGB_ADDR_49
+0x4788 US_ALU_RGB_ADDR_50
+0x478C US_ALU_RGB_ADDR_51
+0x4790 US_ALU_RGB_ADDR_52
+0x4794 US_ALU_RGB_ADDR_53
+0x4798 US_ALU_RGB_ADDR_54
+0x479C US_ALU_RGB_ADDR_55
+0x47A0 US_ALU_RGB_ADDR_56
+0x47A4 US_ALU_RGB_ADDR_57
+0x47A8 US_ALU_RGB_ADDR_58
+0x47AC US_ALU_RGB_ADDR_59
+0x47B0 US_ALU_RGB_ADDR_60
+0x47B4 US_ALU_RGB_ADDR_61
+0x47B8 US_ALU_RGB_ADDR_62
+0x47BC US_ALU_RGB_ADDR_63
+0x47C0 US_ALU_ALPHA_ADDR_0
+0x47C4 US_ALU_ALPHA_ADDR_1
+0x47C8 US_ALU_ALPHA_ADDR_2
+0x47CC US_ALU_ALPHA_ADDR_3
+0x47D0 US_ALU_ALPHA_ADDR_4
+0x47D4 US_ALU_ALPHA_ADDR_5
+0x47D8 US_ALU_ALPHA_ADDR_6
+0x47DC US_ALU_ALPHA_ADDR_7
+0x47E0 US_ALU_ALPHA_ADDR_8
+0x47E4 US_ALU_ALPHA_ADDR_9
+0x47E8 US_ALU_ALPHA_ADDR_10
+0x47EC US_ALU_ALPHA_ADDR_11
+0x47F0 US_ALU_ALPHA_ADDR_12
+0x47F4 US_ALU_ALPHA_ADDR_13
+0x47F8 US_ALU_ALPHA_ADDR_14
+0x47FC US_ALU_ALPHA_ADDR_15
+0x4800 US_ALU_ALPHA_ADDR_16
+0x4804 US_ALU_ALPHA_ADDR_17
+0x4808 US_ALU_ALPHA_ADDR_18
+0x480C US_ALU_ALPHA_ADDR_19
+0x4810 US_ALU_ALPHA_ADDR_20
+0x4814 US_ALU_ALPHA_ADDR_21
+0x4818 US_ALU_ALPHA_ADDR_22
+0x481C US_ALU_ALPHA_ADDR_23
+0x4820 US_ALU_ALPHA_ADDR_24
+0x4824 US_ALU_ALPHA_ADDR_25
+0x4828 US_ALU_ALPHA_ADDR_26
+0x482C US_ALU_ALPHA_ADDR_27
+0x4830 US_ALU_ALPHA_ADDR_28
+0x4834 US_ALU_ALPHA_ADDR_29
+0x4838 US_ALU_ALPHA_ADDR_30
+0x483C US_ALU_ALPHA_ADDR_31
+0x4840 US_ALU_ALPHA_ADDR_32
+0x4844 US_ALU_ALPHA_ADDR_33
+0x4848 US_ALU_ALPHA_ADDR_34
+0x484C US_ALU_ALPHA_ADDR_35
+0x4850 US_ALU_ALPHA_ADDR_36
+0x4854 US_ALU_ALPHA_ADDR_37
+0x4858 US_ALU_ALPHA_ADDR_38
+0x485C US_ALU_ALPHA_ADDR_39
+0x4860 US_ALU_ALPHA_ADDR_40
+0x4864 US_ALU_ALPHA_ADDR_41
+0x4868 US_ALU_ALPHA_ADDR_42
+0x486C US_ALU_ALPHA_ADDR_43
+0x4870 US_ALU_ALPHA_ADDR_44
+0x4874 US_ALU_ALPHA_ADDR_45
+0x4878 US_ALU_ALPHA_ADDR_46
+0x487C US_ALU_ALPHA_ADDR_47
+0x4880 US_ALU_ALPHA_ADDR_48
+0x4884 US_ALU_ALPHA_ADDR_49
+0x4888 US_ALU_ALPHA_ADDR_50
+0x488C US_ALU_ALPHA_ADDR_51
+0x4890 US_ALU_ALPHA_ADDR_52
+0x4894 US_ALU_ALPHA_ADDR_53
+0x4898 US_ALU_ALPHA_ADDR_54
+0x489C US_ALU_ALPHA_ADDR_55
+0x48A0 US_ALU_ALPHA_ADDR_56
+0x48A4 US_ALU_ALPHA_ADDR_57
+0x48A8 US_ALU_ALPHA_ADDR_58
+0x48AC US_ALU_ALPHA_ADDR_59
+0x48B0 US_ALU_ALPHA_ADDR_60
+0x48B4 US_ALU_ALPHA_ADDR_61
+0x48B8 US_ALU_ALPHA_ADDR_62
+0x48BC US_ALU_ALPHA_ADDR_63
+0x48C0 US_ALU_RGB_INST_0
+0x48C4 US_ALU_RGB_INST_1
+0x48C8 US_ALU_RGB_INST_2
+0x48CC US_ALU_RGB_INST_3
+0x48D0 US_ALU_RGB_INST_4
+0x48D4 US_ALU_RGB_INST_5
+0x48D8 US_ALU_RGB_INST_6
+0x48DC US_ALU_RGB_INST_7
+0x48E0 US_ALU_RGB_INST_8
+0x48E4 US_ALU_RGB_INST_9
+0x48E8 US_ALU_RGB_INST_10
+0x48EC US_ALU_RGB_INST_11
+0x48F0 US_ALU_RGB_INST_12
+0x48F4 US_ALU_RGB_INST_13
+0x48F8 US_ALU_RGB_INST_14
+0x48FC US_ALU_RGB_INST_15
+0x4900 US_ALU_RGB_INST_16
+0x4904 US_ALU_RGB_INST_17
+0x4908 US_ALU_RGB_INST_18
+0x490C US_ALU_RGB_INST_19
+0x4910 US_ALU_RGB_INST_20
+0x4914 US_ALU_RGB_INST_21
+0x4918 US_ALU_RGB_INST_22
+0x491C US_ALU_RGB_INST_23
+0x4920 US_ALU_RGB_INST_24
+0x4924 US_ALU_RGB_INST_25
+0x4928 US_ALU_RGB_INST_26
+0x492C US_ALU_RGB_INST_27
+0x4930 US_ALU_RGB_INST_28
+0x4934 US_ALU_RGB_INST_29
+0x4938 US_ALU_RGB_INST_30
+0x493C US_ALU_RGB_INST_31
+0x4940 US_ALU_RGB_INST_32
+0x4944 US_ALU_RGB_INST_33
+0x4948 US_ALU_RGB_INST_34
+0x494C US_ALU_RGB_INST_35
+0x4950 US_ALU_RGB_INST_36
+0x4954 US_ALU_RGB_INST_37
+0x4958 US_ALU_RGB_INST_38
+0x495C US_ALU_RGB_INST_39
+0x4960 US_ALU_RGB_INST_40
+0x4964 US_ALU_RGB_INST_41
+0x4968 US_ALU_RGB_INST_42
+0x496C US_ALU_RGB_INST_43
+0x4970 US_ALU_RGB_INST_44
+0x4974 US_ALU_RGB_INST_45
+0x4978 US_ALU_RGB_INST_46
+0x497C US_ALU_RGB_INST_47
+0x4980 US_ALU_RGB_INST_48
+0x4984 US_ALU_RGB_INST_49
+0x4988 US_ALU_RGB_INST_50
+0x498C US_ALU_RGB_INST_51
+0x4990 US_ALU_RGB_INST_52
+0x4994 US_ALU_RGB_INST_53
+0x4998 US_ALU_RGB_INST_54
+0x499C US_ALU_RGB_INST_55
+0x49A0 US_ALU_RGB_INST_56
+0x49A4 US_ALU_RGB_INST_57
+0x49A8 US_ALU_RGB_INST_58
+0x49AC US_ALU_RGB_INST_59
+0x49B0 US_ALU_RGB_INST_60
+0x49B4 US_ALU_RGB_INST_61
+0x49B8 US_ALU_RGB_INST_62
+0x49BC US_ALU_RGB_INST_63
+0x49C0 US_ALU_ALPHA_INST_0
+0x49C4 US_ALU_ALPHA_INST_1
+0x49C8 US_ALU_ALPHA_INST_2
+0x49CC US_ALU_ALPHA_INST_3
+0x49D0 US_ALU_ALPHA_INST_4
+0x49D4 US_ALU_ALPHA_INST_5
+0x49D8 US_ALU_ALPHA_INST_6
+0x49DC US_ALU_ALPHA_INST_7
+0x49E0 US_ALU_ALPHA_INST_8
+0x49E4 US_ALU_ALPHA_INST_9
+0x49E8 US_ALU_ALPHA_INST_10
+0x49EC US_ALU_ALPHA_INST_11
+0x49F0 US_ALU_ALPHA_INST_12
+0x49F4 US_ALU_ALPHA_INST_13
+0x49F8 US_ALU_ALPHA_INST_14
+0x49FC US_ALU_ALPHA_INST_15
+0x4A00 US_ALU_ALPHA_INST_16
+0x4A04 US_ALU_ALPHA_INST_17
+0x4A08 US_ALU_ALPHA_INST_18
+0x4A0C US_ALU_ALPHA_INST_19
+0x4A10 US_ALU_ALPHA_INST_20
+0x4A14 US_ALU_ALPHA_INST_21
+0x4A18 US_ALU_ALPHA_INST_22
+0x4A1C US_ALU_ALPHA_INST_23
+0x4A20 US_ALU_ALPHA_INST_24
+0x4A24 US_ALU_ALPHA_INST_25
+0x4A28 US_ALU_ALPHA_INST_26
+0x4A2C US_ALU_ALPHA_INST_27
+0x4A30 US_ALU_ALPHA_INST_28
+0x4A34 US_ALU_ALPHA_INST_29
+0x4A38 US_ALU_ALPHA_INST_30
+0x4A3C US_ALU_ALPHA_INST_31
+0x4A40 US_ALU_ALPHA_INST_32
+0x4A44 US_ALU_ALPHA_INST_33
+0x4A48 US_ALU_ALPHA_INST_34
+0x4A4C US_ALU_ALPHA_INST_35
+0x4A50 US_ALU_ALPHA_INST_36
+0x4A54 US_ALU_ALPHA_INST_37
+0x4A58 US_ALU_ALPHA_INST_38
+0x4A5C US_ALU_ALPHA_INST_39
+0x4A60 US_ALU_ALPHA_INST_40
+0x4A64 US_ALU_ALPHA_INST_41
+0x4A68 US_ALU_ALPHA_INST_42
+0x4A6C US_ALU_ALPHA_INST_43
+0x4A70 US_ALU_ALPHA_INST_44
+0x4A74 US_ALU_ALPHA_INST_45
+0x4A78 US_ALU_ALPHA_INST_46
+0x4A7C US_ALU_ALPHA_INST_47
+0x4A80 US_ALU_ALPHA_INST_48
+0x4A84 US_ALU_ALPHA_INST_49
+0x4A88 US_ALU_ALPHA_INST_50
+0x4A8C US_ALU_ALPHA_INST_51
+0x4A90 US_ALU_ALPHA_INST_52
+0x4A94 US_ALU_ALPHA_INST_53
+0x4A98 US_ALU_ALPHA_INST_54
+0x4A9C US_ALU_ALPHA_INST_55
+0x4AA0 US_ALU_ALPHA_INST_56
+0x4AA4 US_ALU_ALPHA_INST_57
+0x4AA8 US_ALU_ALPHA_INST_58
+0x4AAC US_ALU_ALPHA_INST_59
+0x4AB0 US_ALU_ALPHA_INST_60
+0x4AB4 US_ALU_ALPHA_INST_61
+0x4AB8 US_ALU_ALPHA_INST_62
+0x4ABC US_ALU_ALPHA_INST_63
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
new file mode 100644
index 0000000..722074e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -0,0 +1,780 @@
+r420 0x4f60
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4310 RS_IP_0
+0x4314 RS_IP_1
+0x4318 RS_IP_2
+0x431C RS_IP_3
+0x4320 RS_IP_4
+0x4324 RS_IP_5
+0x4328 RS_IP_6
+0x432C RS_IP_7
+0x4330 RS_INST_0
+0x4334 RS_INST_1
+0x4338 RS_INST_2
+0x433C RS_INST_3
+0x4340 RS_INST_4
+0x4344 RS_INST_5
+0x4348 RS_INST_6
+0x434C RS_INST_7
+0x4350 RS_INST_8
+0x4354 RS_INST_9
+0x4358 RS_INST_10
+0x435C RS_INST_11
+0x4360 RS_INST_12
+0x4364 RS_INST_13
+0x4368 RS_INST_14
+0x436C RS_INST_15
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4608 US_CODE_OFFSET
+0x460C US_RESET
+0x4610 US_CODE_ADDR_0
+0x4614 US_CODE_ADDR_1
+0x4618 US_CODE_ADDR_2
+0x461C US_CODE_ADDR_3
+0x4620 US_TEX_INST_0
+0x4624 US_TEX_INST_1
+0x4628 US_TEX_INST_2
+0x462C US_TEX_INST_3
+0x4630 US_TEX_INST_4
+0x4634 US_TEX_INST_5
+0x4638 US_TEX_INST_6
+0x463C US_TEX_INST_7
+0x4640 US_TEX_INST_8
+0x4644 US_TEX_INST_9
+0x4648 US_TEX_INST_10
+0x464C US_TEX_INST_11
+0x4650 US_TEX_INST_12
+0x4654 US_TEX_INST_13
+0x4658 US_TEX_INST_14
+0x465C US_TEX_INST_15
+0x4660 US_TEX_INST_16
+0x4664 US_TEX_INST_17
+0x4668 US_TEX_INST_18
+0x466C US_TEX_INST_19
+0x4670 US_TEX_INST_20
+0x4674 US_TEX_INST_21
+0x4678 US_TEX_INST_22
+0x467C US_TEX_INST_23
+0x4680 US_TEX_INST_24
+0x4684 US_TEX_INST_25
+0x4688 US_TEX_INST_26
+0x468C US_TEX_INST_27
+0x4690 US_TEX_INST_28
+0x4694 US_TEX_INST_29
+0x4698 US_TEX_INST_30
+0x469C US_TEX_INST_31
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46B8 US_CODE_BANK
+0x46BC US_CODE_EXT
+0x46C0 US_ALU_RGB_ADDR_0
+0x46C4 US_ALU_RGB_ADDR_1
+0x46C8 US_ALU_RGB_ADDR_2
+0x46CC US_ALU_RGB_ADDR_3
+0x46D0 US_ALU_RGB_ADDR_4
+0x46D4 US_ALU_RGB_ADDR_5
+0x46D8 US_ALU_RGB_ADDR_6
+0x46DC US_ALU_RGB_ADDR_7
+0x46E0 US_ALU_RGB_ADDR_8
+0x46E4 US_ALU_RGB_ADDR_9
+0x46E8 US_ALU_RGB_ADDR_10
+0x46EC US_ALU_RGB_ADDR_11
+0x46F0 US_ALU_RGB_ADDR_12
+0x46F4 US_ALU_RGB_ADDR_13
+0x46F8 US_ALU_RGB_ADDR_14
+0x46FC US_ALU_RGB_ADDR_15
+0x4700 US_ALU_RGB_ADDR_16
+0x4704 US_ALU_RGB_ADDR_17
+0x4708 US_ALU_RGB_ADDR_18
+0x470C US_ALU_RGB_ADDR_19
+0x4710 US_ALU_RGB_ADDR_20
+0x4714 US_ALU_RGB_ADDR_21
+0x4718 US_ALU_RGB_ADDR_22
+0x471C US_ALU_RGB_ADDR_23
+0x4720 US_ALU_RGB_ADDR_24
+0x4724 US_ALU_RGB_ADDR_25
+0x4728 US_ALU_RGB_ADDR_26
+0x472C US_ALU_RGB_ADDR_27
+0x4730 US_ALU_RGB_ADDR_28
+0x4734 US_ALU_RGB_ADDR_29
+0x4738 US_ALU_RGB_ADDR_30
+0x473C US_ALU_RGB_ADDR_31
+0x4740 US_ALU_RGB_ADDR_32
+0x4744 US_ALU_RGB_ADDR_33
+0x4748 US_ALU_RGB_ADDR_34
+0x474C US_ALU_RGB_ADDR_35
+0x4750 US_ALU_RGB_ADDR_36
+0x4754 US_ALU_RGB_ADDR_37
+0x4758 US_ALU_RGB_ADDR_38
+0x475C US_ALU_RGB_ADDR_39
+0x4760 US_ALU_RGB_ADDR_40
+0x4764 US_ALU_RGB_ADDR_41
+0x4768 US_ALU_RGB_ADDR_42
+0x476C US_ALU_RGB_ADDR_43
+0x4770 US_ALU_RGB_ADDR_44
+0x4774 US_ALU_RGB_ADDR_45
+0x4778 US_ALU_RGB_ADDR_46
+0x477C US_ALU_RGB_ADDR_47
+0x4780 US_ALU_RGB_ADDR_48
+0x4784 US_ALU_RGB_ADDR_49
+0x4788 US_ALU_RGB_ADDR_50
+0x478C US_ALU_RGB_ADDR_51
+0x4790 US_ALU_RGB_ADDR_52
+0x4794 US_ALU_RGB_ADDR_53
+0x4798 US_ALU_RGB_ADDR_54
+0x479C US_ALU_RGB_ADDR_55
+0x47A0 US_ALU_RGB_ADDR_56
+0x47A4 US_ALU_RGB_ADDR_57
+0x47A8 US_ALU_RGB_ADDR_58
+0x47AC US_ALU_RGB_ADDR_59
+0x47B0 US_ALU_RGB_ADDR_60
+0x47B4 US_ALU_RGB_ADDR_61
+0x47B8 US_ALU_RGB_ADDR_62
+0x47BC US_ALU_RGB_ADDR_63
+0x47C0 US_ALU_ALPHA_ADDR_0
+0x47C4 US_ALU_ALPHA_ADDR_1
+0x47C8 US_ALU_ALPHA_ADDR_2
+0x47CC US_ALU_ALPHA_ADDR_3
+0x47D0 US_ALU_ALPHA_ADDR_4
+0x47D4 US_ALU_ALPHA_ADDR_5
+0x47D8 US_ALU_ALPHA_ADDR_6
+0x47DC US_ALU_ALPHA_ADDR_7
+0x47E0 US_ALU_ALPHA_ADDR_8
+0x47E4 US_ALU_ALPHA_ADDR_9
+0x47E8 US_ALU_ALPHA_ADDR_10
+0x47EC US_ALU_ALPHA_ADDR_11
+0x47F0 US_ALU_ALPHA_ADDR_12
+0x47F4 US_ALU_ALPHA_ADDR_13
+0x47F8 US_ALU_ALPHA_ADDR_14
+0x47FC US_ALU_ALPHA_ADDR_15
+0x4800 US_ALU_ALPHA_ADDR_16
+0x4804 US_ALU_ALPHA_ADDR_17
+0x4808 US_ALU_ALPHA_ADDR_18
+0x480C US_ALU_ALPHA_ADDR_19
+0x4810 US_ALU_ALPHA_ADDR_20
+0x4814 US_ALU_ALPHA_ADDR_21
+0x4818 US_ALU_ALPHA_ADDR_22
+0x481C US_ALU_ALPHA_ADDR_23
+0x4820 US_ALU_ALPHA_ADDR_24
+0x4824 US_ALU_ALPHA_ADDR_25
+0x4828 US_ALU_ALPHA_ADDR_26
+0x482C US_ALU_ALPHA_ADDR_27
+0x4830 US_ALU_ALPHA_ADDR_28
+0x4834 US_ALU_ALPHA_ADDR_29
+0x4838 US_ALU_ALPHA_ADDR_30
+0x483C US_ALU_ALPHA_ADDR_31
+0x4840 US_ALU_ALPHA_ADDR_32
+0x4844 US_ALU_ALPHA_ADDR_33
+0x4848 US_ALU_ALPHA_ADDR_34
+0x484C US_ALU_ALPHA_ADDR_35
+0x4850 US_ALU_ALPHA_ADDR_36
+0x4854 US_ALU_ALPHA_ADDR_37
+0x4858 US_ALU_ALPHA_ADDR_38
+0x485C US_ALU_ALPHA_ADDR_39
+0x4860 US_ALU_ALPHA_ADDR_40
+0x4864 US_ALU_ALPHA_ADDR_41
+0x4868 US_ALU_ALPHA_ADDR_42
+0x486C US_ALU_ALPHA_ADDR_43
+0x4870 US_ALU_ALPHA_ADDR_44
+0x4874 US_ALU_ALPHA_ADDR_45
+0x4878 US_ALU_ALPHA_ADDR_46
+0x487C US_ALU_ALPHA_ADDR_47
+0x4880 US_ALU_ALPHA_ADDR_48
+0x4884 US_ALU_ALPHA_ADDR_49
+0x4888 US_ALU_ALPHA_ADDR_50
+0x488C US_ALU_ALPHA_ADDR_51
+0x4890 US_ALU_ALPHA_ADDR_52
+0x4894 US_ALU_ALPHA_ADDR_53
+0x4898 US_ALU_ALPHA_ADDR_54
+0x489C US_ALU_ALPHA_ADDR_55
+0x48A0 US_ALU_ALPHA_ADDR_56
+0x48A4 US_ALU_ALPHA_ADDR_57
+0x48A8 US_ALU_ALPHA_ADDR_58
+0x48AC US_ALU_ALPHA_ADDR_59
+0x48B0 US_ALU_ALPHA_ADDR_60
+0x48B4 US_ALU_ALPHA_ADDR_61
+0x48B8 US_ALU_ALPHA_ADDR_62
+0x48BC US_ALU_ALPHA_ADDR_63
+0x48C0 US_ALU_RGB_INST_0
+0x48C4 US_ALU_RGB_INST_1
+0x48C8 US_ALU_RGB_INST_2
+0x48CC US_ALU_RGB_INST_3
+0x48D0 US_ALU_RGB_INST_4
+0x48D4 US_ALU_RGB_INST_5
+0x48D8 US_ALU_RGB_INST_6
+0x48DC US_ALU_RGB_INST_7
+0x48E0 US_ALU_RGB_INST_8
+0x48E4 US_ALU_RGB_INST_9
+0x48E8 US_ALU_RGB_INST_10
+0x48EC US_ALU_RGB_INST_11
+0x48F0 US_ALU_RGB_INST_12
+0x48F4 US_ALU_RGB_INST_13
+0x48F8 US_ALU_RGB_INST_14
+0x48FC US_ALU_RGB_INST_15
+0x4900 US_ALU_RGB_INST_16
+0x4904 US_ALU_RGB_INST_17
+0x4908 US_ALU_RGB_INST_18
+0x490C US_ALU_RGB_INST_19
+0x4910 US_ALU_RGB_INST_20
+0x4914 US_ALU_RGB_INST_21
+0x4918 US_ALU_RGB_INST_22
+0x491C US_ALU_RGB_INST_23
+0x4920 US_ALU_RGB_INST_24
+0x4924 US_ALU_RGB_INST_25
+0x4928 US_ALU_RGB_INST_26
+0x492C US_ALU_RGB_INST_27
+0x4930 US_ALU_RGB_INST_28
+0x4934 US_ALU_RGB_INST_29
+0x4938 US_ALU_RGB_INST_30
+0x493C US_ALU_RGB_INST_31
+0x4940 US_ALU_RGB_INST_32
+0x4944 US_ALU_RGB_INST_33
+0x4948 US_ALU_RGB_INST_34
+0x494C US_ALU_RGB_INST_35
+0x4950 US_ALU_RGB_INST_36
+0x4954 US_ALU_RGB_INST_37
+0x4958 US_ALU_RGB_INST_38
+0x495C US_ALU_RGB_INST_39
+0x4960 US_ALU_RGB_INST_40
+0x4964 US_ALU_RGB_INST_41
+0x4968 US_ALU_RGB_INST_42
+0x496C US_ALU_RGB_INST_43
+0x4970 US_ALU_RGB_INST_44
+0x4974 US_ALU_RGB_INST_45
+0x4978 US_ALU_RGB_INST_46
+0x497C US_ALU_RGB_INST_47
+0x4980 US_ALU_RGB_INST_48
+0x4984 US_ALU_RGB_INST_49
+0x4988 US_ALU_RGB_INST_50
+0x498C US_ALU_RGB_INST_51
+0x4990 US_ALU_RGB_INST_52
+0x4994 US_ALU_RGB_INST_53
+0x4998 US_ALU_RGB_INST_54
+0x499C US_ALU_RGB_INST_55
+0x49A0 US_ALU_RGB_INST_56
+0x49A4 US_ALU_RGB_INST_57
+0x49A8 US_ALU_RGB_INST_58
+0x49AC US_ALU_RGB_INST_59
+0x49B0 US_ALU_RGB_INST_60
+0x49B4 US_ALU_RGB_INST_61
+0x49B8 US_ALU_RGB_INST_62
+0x49BC US_ALU_RGB_INST_63
+0x49C0 US_ALU_ALPHA_INST_0
+0x49C4 US_ALU_ALPHA_INST_1
+0x49C8 US_ALU_ALPHA_INST_2
+0x49CC US_ALU_ALPHA_INST_3
+0x49D0 US_ALU_ALPHA_INST_4
+0x49D4 US_ALU_ALPHA_INST_5
+0x49D8 US_ALU_ALPHA_INST_6
+0x49DC US_ALU_ALPHA_INST_7
+0x49E0 US_ALU_ALPHA_INST_8
+0x49E4 US_ALU_ALPHA_INST_9
+0x49E8 US_ALU_ALPHA_INST_10
+0x49EC US_ALU_ALPHA_INST_11
+0x49F0 US_ALU_ALPHA_INST_12
+0x49F4 US_ALU_ALPHA_INST_13
+0x49F8 US_ALU_ALPHA_INST_14
+0x49FC US_ALU_ALPHA_INST_15
+0x4A00 US_ALU_ALPHA_INST_16
+0x4A04 US_ALU_ALPHA_INST_17
+0x4A08 US_ALU_ALPHA_INST_18
+0x4A0C US_ALU_ALPHA_INST_19
+0x4A10 US_ALU_ALPHA_INST_20
+0x4A14 US_ALU_ALPHA_INST_21
+0x4A18 US_ALU_ALPHA_INST_22
+0x4A1C US_ALU_ALPHA_INST_23
+0x4A20 US_ALU_ALPHA_INST_24
+0x4A24 US_ALU_ALPHA_INST_25
+0x4A28 US_ALU_ALPHA_INST_26
+0x4A2C US_ALU_ALPHA_INST_27
+0x4A30 US_ALU_ALPHA_INST_28
+0x4A34 US_ALU_ALPHA_INST_29
+0x4A38 US_ALU_ALPHA_INST_30
+0x4A3C US_ALU_ALPHA_INST_31
+0x4A40 US_ALU_ALPHA_INST_32
+0x4A44 US_ALU_ALPHA_INST_33
+0x4A48 US_ALU_ALPHA_INST_34
+0x4A4C US_ALU_ALPHA_INST_35
+0x4A50 US_ALU_ALPHA_INST_36
+0x4A54 US_ALU_ALPHA_INST_37
+0x4A58 US_ALU_ALPHA_INST_38
+0x4A5C US_ALU_ALPHA_INST_39
+0x4A60 US_ALU_ALPHA_INST_40
+0x4A64 US_ALU_ALPHA_INST_41
+0x4A68 US_ALU_ALPHA_INST_42
+0x4A6C US_ALU_ALPHA_INST_43
+0x4A70 US_ALU_ALPHA_INST_44
+0x4A74 US_ALU_ALPHA_INST_45
+0x4A78 US_ALU_ALPHA_INST_46
+0x4A7C US_ALU_ALPHA_INST_47
+0x4A80 US_ALU_ALPHA_INST_48
+0x4A84 US_ALU_ALPHA_INST_49
+0x4A88 US_ALU_ALPHA_INST_50
+0x4A8C US_ALU_ALPHA_INST_51
+0x4A90 US_ALU_ALPHA_INST_52
+0x4A94 US_ALU_ALPHA_INST_53
+0x4A98 US_ALU_ALPHA_INST_54
+0x4A9C US_ALU_ALPHA_INST_55
+0x4AA0 US_ALU_ALPHA_INST_56
+0x4AA4 US_ALU_ALPHA_INST_57
+0x4AA8 US_ALU_ALPHA_INST_58
+0x4AAC US_ALU_ALPHA_INST_59
+0x4AB0 US_ALU_ALPHA_INST_60
+0x4AB4 US_ALU_ALPHA_INST_61
+0x4AB8 US_ALU_ALPHA_INST_62
+0x4ABC US_ALU_ALPHA_INST_63
+0x4AC0 US_ALU_EXT_ADDR_0
+0x4AC4 US_ALU_EXT_ADDR_1
+0x4AC8 US_ALU_EXT_ADDR_2
+0x4ACC US_ALU_EXT_ADDR_3
+0x4AD0 US_ALU_EXT_ADDR_4
+0x4AD4 US_ALU_EXT_ADDR_5
+0x4AD8 US_ALU_EXT_ADDR_6
+0x4ADC US_ALU_EXT_ADDR_7
+0x4AE0 US_ALU_EXT_ADDR_8
+0x4AE4 US_ALU_EXT_ADDR_9
+0x4AE8 US_ALU_EXT_ADDR_10
+0x4AEC US_ALU_EXT_ADDR_11
+0x4AF0 US_ALU_EXT_ADDR_12
+0x4AF4 US_ALU_EXT_ADDR_13
+0x4AF8 US_ALU_EXT_ADDR_14
+0x4AFC US_ALU_EXT_ADDR_15
+0x4B00 US_ALU_EXT_ADDR_16
+0x4B04 US_ALU_EXT_ADDR_17
+0x4B08 US_ALU_EXT_ADDR_18
+0x4B0C US_ALU_EXT_ADDR_19
+0x4B10 US_ALU_EXT_ADDR_20
+0x4B14 US_ALU_EXT_ADDR_21
+0x4B18 US_ALU_EXT_ADDR_22
+0x4B1C US_ALU_EXT_ADDR_23
+0x4B20 US_ALU_EXT_ADDR_24
+0x4B24 US_ALU_EXT_ADDR_25
+0x4B28 US_ALU_EXT_ADDR_26
+0x4B2C US_ALU_EXT_ADDR_27
+0x4B30 US_ALU_EXT_ADDR_28
+0x4B34 US_ALU_EXT_ADDR_29
+0x4B38 US_ALU_EXT_ADDR_30
+0x4B3C US_ALU_EXT_ADDR_31
+0x4B40 US_ALU_EXT_ADDR_32
+0x4B44 US_ALU_EXT_ADDR_33
+0x4B48 US_ALU_EXT_ADDR_34
+0x4B4C US_ALU_EXT_ADDR_35
+0x4B50 US_ALU_EXT_ADDR_36
+0x4B54 US_ALU_EXT_ADDR_37
+0x4B58 US_ALU_EXT_ADDR_38
+0x4B5C US_ALU_EXT_ADDR_39
+0x4B60 US_ALU_EXT_ADDR_40
+0x4B64 US_ALU_EXT_ADDR_41
+0x4B68 US_ALU_EXT_ADDR_42
+0x4B6C US_ALU_EXT_ADDR_43
+0x4B70 US_ALU_EXT_ADDR_44
+0x4B74 US_ALU_EXT_ADDR_45
+0x4B78 US_ALU_EXT_ADDR_46
+0x4B7C US_ALU_EXT_ADDR_47
+0x4B80 US_ALU_EXT_ADDR_48
+0x4B84 US_ALU_EXT_ADDR_49
+0x4B88 US_ALU_EXT_ADDR_50
+0x4B8C US_ALU_EXT_ADDR_51
+0x4B90 US_ALU_EXT_ADDR_52
+0x4B94 US_ALU_EXT_ADDR_53
+0x4B98 US_ALU_EXT_ADDR_54
+0x4B9C US_ALU_EXT_ADDR_55
+0x4BA0 US_ALU_EXT_ADDR_56
+0x4BA4 US_ALU_EXT_ADDR_57
+0x4BA8 US_ALU_EXT_ADDR_58
+0x4BAC US_ALU_EXT_ADDR_59
+0x4BB0 US_ALU_EXT_ADDR_60
+0x4BB4 US_ALU_EXT_ADDR_61
+0x4BB8 US_ALU_EXT_ADDR_62
+0x4BBC US_ALU_EXT_ADDR_63
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
new file mode 100644
index 0000000..20bfbda
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -0,0 +1,755 @@
+r600 0x9400
+0x000287A0 R7xx_CB_SHADER_CONTROL
+0x00028230 R7xx_PA_SC_EDGERULE
+0x000286C8 R7xx_SPI_THREAD_GROUPING
+0x00008D8C R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008490 CP_STRMOUT_CNTL
+0x000085F0 CP_COHER_CNTL
+0x000085F4 CP_COHER_SIZE
+0x000088C4 VGT_CACHE_INVALIDATION
+0x00028A50 VGT_ENHANCE
+0x000088CC VGT_ES_PER_GS
+0x00028A2C VGT_GROUP_DECR
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x000088C8 VGT_GS_PER_ES
+0x000088E8 VGT_GS_PER_VS
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x0000895C VGT_INDEX_TYPE
+0x00028408 VGT_INDX_OFFSET
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00008958 VGT_PRIMITIVE_TYPE
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
+0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
+0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
+0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
+0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+0x00028810 PA_CL_CLIP_CNTL
+0x00008A14 PA_CL_ENHANCE
+0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028C18 PA_CL_GB_HORZ_DISC_ADJ
+0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
+0x00028C10 PA_CL_GB_VERT_DISC_ADJ
+0x00028820 PA_CL_NANINF_CNTL
+0x00028E1C PA_CL_POINT_CULL_RAD
+0x00028E18 PA_CL_POINT_SIZE
+0x00028E10 PA_CL_POINT_X_RAD
+0x00028E14 PA_CL_POINT_Y_RAD
+0x00028E2C PA_CL_UCP_0_W
+0x00028E3C PA_CL_UCP_1_W
+0x00028E4C PA_CL_UCP_2_W
+0x00028E5C PA_CL_UCP_3_W
+0x00028E6C PA_CL_UCP_4_W
+0x00028E7C PA_CL_UCP_5_W
+0x00028E20 PA_CL_UCP_0_X
+0x00028E30 PA_CL_UCP_1_X
+0x00028E40 PA_CL_UCP_2_X
+0x00028E50 PA_CL_UCP_3_X
+0x00028E60 PA_CL_UCP_4_X
+0x00028E70 PA_CL_UCP_5_X
+0x00028E24 PA_CL_UCP_0_Y
+0x00028E34 PA_CL_UCP_1_Y
+0x00028E44 PA_CL_UCP_2_Y
+0x00028E54 PA_CL_UCP_3_Y
+0x00028E64 PA_CL_UCP_4_Y
+0x00028E74 PA_CL_UCP_5_Y
+0x00028E28 PA_CL_UCP_0_Z
+0x00028E38 PA_CL_UCP_1_Z
+0x00028E48 PA_CL_UCP_2_Z
+0x00028E58 PA_CL_UCP_3_Z
+0x00028E68 PA_CL_UCP_4_Z
+0x00028E78 PA_CL_UCP_5_Z
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x00028C48 PA_SC_AA_MASK
+0x00008B40 PA_SC_AA_SAMPLE_LOCS_2S
+0x00008B44 PA_SC_AA_SAMPLE_LOCS_4S
+0x00008B48 PA_SC_AA_SAMPLE_LOCS_8S_WD0
+0x00008B4C PA_SC_AA_SAMPLE_LOCS_8S_WD1
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_MCTX
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00008BF0 PA_SC_ENHANCE
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028C00 PA_SC_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00028A4C PA_SC_MODE_CNTL
+0x00028A48 PA_SC_MPASS_PS_CNTL
+0x00008B20 PA_SC_MULTI_CHIP_CNTL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A00 PA_SU_POINT_SIZE
+0x00028E0C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028E08 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028DFC PA_SU_POLY_OFFSET_CLAMP
+0x00028DF8 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028E04 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028C08 PA_SU_VTX_CNTL
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C08 SQ_GPR_RESOURCE_MGMT_2
+0x00008C10 SQ_STACK_RESOURCE_MGMT_1
+0x00008C14 SQ_STACK_RESOURCE_MGMT_2
+0x00008C0C SQ_THREAD_RESOURCE_MGMT
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x000288E0 SQ_VTX_SEMANTIC_CLEAR
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000288D8 SQ_PGM_CF_OFFSET_ES
+0x000288DC SQ_PGM_CF_OFFSET_FS
+0x000288D4 SQ_PGM_CF_OFFSET_GS
+0x000288CC SQ_PGM_CF_OFFSET_PS
+0x000288D0 SQ_PGM_CF_OFFSET_VS
+0x00028854 SQ_PGM_EXPORTS_PS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x000288A4 SQ_PGM_RESOURCES_FS
+0x0002887C SQ_PGM_RESOURCES_GS
+0x00028850 SQ_PGM_RESOURCES_PS
+0x00028868 SQ_PGM_RESOURCES_VS
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x000286DC SPI_FOG_CNTL
+0x000286E4 SPI_FOG_FUNC_BIAS
+0x000286E0 SPI_FOG_FUNC_SCALE
+0x000286D8 SPI_INPUT_Z
+0x000286D4 SPI_INTERP_CONTROL_0
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286C4 SPI_VS_OUT_CONFIG
+0x00028614 SPI_VS_OUT_ID_0
+0x00028618 SPI_VS_OUT_ID_1
+0x0002861C SPI_VS_OUT_ID_2
+0x00028620 SPI_VS_OUT_ID_3
+0x00028624 SPI_VS_OUT_ID_4
+0x00028628 SPI_VS_OUT_ID_5
+0x0002862C SPI_VS_OUT_ID_6
+0x00028630 SPI_VS_OUT_ID_7
+0x00028634 SPI_VS_OUT_ID_8
+0x00028638 SPI_VS_OUT_ID_9
+0x00028438 SX_ALPHA_REF
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028354 SX_SURFACE_SYNC
+0x00009014 SX_MEMORY_EXPORT_SIZE
+0x00009604 TC_INVALIDATE
+0x00009400 TD_FILTER4
+0x00009404 TD_FILTER4_1
+0x00009408 TD_FILTER4_2
+0x0000940C TD_FILTER4_3
+0x00009410 TD_FILTER4_4
+0x00009414 TD_FILTER4_5
+0x00009418 TD_FILTER4_6
+0x0000941C TD_FILTER4_7
+0x00009420 TD_FILTER4_8
+0x00009424 TD_FILTER4_9
+0x00009428 TD_FILTER4_10
+0x0000942C TD_FILTER4_11
+0x00009430 TD_FILTER4_12
+0x00009434 TD_FILTER4_13
+0x00009438 TD_FILTER4_14
+0x0000943C TD_FILTER4_15
+0x00009440 TD_FILTER4_16
+0x00009444 TD_FILTER4_17
+0x00009448 TD_FILTER4_18
+0x0000944C TD_FILTER4_19
+0x00009450 TD_FILTER4_20
+0x00009454 TD_FILTER4_21
+0x00009458 TD_FILTER4_22
+0x0000945C TD_FILTER4_23
+0x00009460 TD_FILTER4_24
+0x00009464 TD_FILTER4_25
+0x00009468 TD_FILTER4_26
+0x0000946C TD_FILTER4_27
+0x00009470 TD_FILTER4_28
+0x00009474 TD_FILTER4_29
+0x00009478 TD_FILTER4_30
+0x0000947C TD_FILTER4_31
+0x00009480 TD_FILTER4_32
+0x00009484 TD_FILTER4_33
+0x00009488 TD_FILTER4_34
+0x0000948C TD_FILTER4_35
+0x0000A80C TD_GS_SAMPLER0_BORDER_ALPHA
+0x0000A81C TD_GS_SAMPLER1_BORDER_ALPHA
+0x0000A82C TD_GS_SAMPLER2_BORDER_ALPHA
+0x0000A83C TD_GS_SAMPLER3_BORDER_ALPHA
+0x0000A84C TD_GS_SAMPLER4_BORDER_ALPHA
+0x0000A85C TD_GS_SAMPLER5_BORDER_ALPHA
+0x0000A86C TD_GS_SAMPLER6_BORDER_ALPHA
+0x0000A87C TD_GS_SAMPLER7_BORDER_ALPHA
+0x0000A88C TD_GS_SAMPLER8_BORDER_ALPHA
+0x0000A89C TD_GS_SAMPLER9_BORDER_ALPHA
+0x0000A8AC TD_GS_SAMPLER10_BORDER_ALPHA
+0x0000A8BC TD_GS_SAMPLER11_BORDER_ALPHA
+0x0000A8CC TD_GS_SAMPLER12_BORDER_ALPHA
+0x0000A8DC TD_GS_SAMPLER13_BORDER_ALPHA
+0x0000A8EC TD_GS_SAMPLER14_BORDER_ALPHA
+0x0000A8FC TD_GS_SAMPLER15_BORDER_ALPHA
+0x0000A90C TD_GS_SAMPLER16_BORDER_ALPHA
+0x0000A91C TD_GS_SAMPLER17_BORDER_ALPHA
+0x0000A808 TD_GS_SAMPLER0_BORDER_BLUE
+0x0000A818 TD_GS_SAMPLER1_BORDER_BLUE
+0x0000A828 TD_GS_SAMPLER2_BORDER_BLUE
+0x0000A838 TD_GS_SAMPLER3_BORDER_BLUE
+0x0000A848 TD_GS_SAMPLER4_BORDER_BLUE
+0x0000A858 TD_GS_SAMPLER5_BORDER_BLUE
+0x0000A868 TD_GS_SAMPLER6_BORDER_BLUE
+0x0000A878 TD_GS_SAMPLER7_BORDER_BLUE
+0x0000A888 TD_GS_SAMPLER8_BORDER_BLUE
+0x0000A898 TD_GS_SAMPLER9_BORDER_BLUE
+0x0000A8A8 TD_GS_SAMPLER10_BORDER_BLUE
+0x0000A8B8 TD_GS_SAMPLER11_BORDER_BLUE
+0x0000A8C8 TD_GS_SAMPLER12_BORDER_BLUE
+0x0000A8D8 TD_GS_SAMPLER13_BORDER_BLUE
+0x0000A8E8 TD_GS_SAMPLER14_BORDER_BLUE
+0x0000A8F8 TD_GS_SAMPLER15_BORDER_BLUE
+0x0000A908 TD_GS_SAMPLER16_BORDER_BLUE
+0x0000A918 TD_GS_SAMPLER17_BORDER_BLUE
+0x0000A804 TD_GS_SAMPLER0_BORDER_GREEN
+0x0000A814 TD_GS_SAMPLER1_BORDER_GREEN
+0x0000A824 TD_GS_SAMPLER2_BORDER_GREEN
+0x0000A834 TD_GS_SAMPLER3_BORDER_GREEN
+0x0000A844 TD_GS_SAMPLER4_BORDER_GREEN
+0x0000A854 TD_GS_SAMPLER5_BORDER_GREEN
+0x0000A864 TD_GS_SAMPLER6_BORDER_GREEN
+0x0000A874 TD_GS_SAMPLER7_BORDER_GREEN
+0x0000A884 TD_GS_SAMPLER8_BORDER_GREEN
+0x0000A894 TD_GS_SAMPLER9_BORDER_GREEN
+0x0000A8A4 TD_GS_SAMPLER10_BORDER_GREEN
+0x0000A8B4 TD_GS_SAMPLER11_BORDER_GREEN
+0x0000A8C4 TD_GS_SAMPLER12_BORDER_GREEN
+0x0000A8D4 TD_GS_SAMPLER13_BORDER_GREEN
+0x0000A8E4 TD_GS_SAMPLER14_BORDER_GREEN
+0x0000A8F4 TD_GS_SAMPLER15_BORDER_GREEN
+0x0000A904 TD_GS_SAMPLER16_BORDER_GREEN
+0x0000A914 TD_GS_SAMPLER17_BORDER_GREEN
+0x0000A800 TD_GS_SAMPLER0_BORDER_RED
+0x0000A810 TD_GS_SAMPLER1_BORDER_RED
+0x0000A820 TD_GS_SAMPLER2_BORDER_RED
+0x0000A830 TD_GS_SAMPLER3_BORDER_RED
+0x0000A840 TD_GS_SAMPLER4_BORDER_RED
+0x0000A850 TD_GS_SAMPLER5_BORDER_RED
+0x0000A860 TD_GS_SAMPLER6_BORDER_RED
+0x0000A870 TD_GS_SAMPLER7_BORDER_RED
+0x0000A880 TD_GS_SAMPLER8_BORDER_RED
+0x0000A890 TD_GS_SAMPLER9_BORDER_RED
+0x0000A8A0 TD_GS_SAMPLER10_BORDER_RED
+0x0000A8B0 TD_GS_SAMPLER11_BORDER_RED
+0x0000A8C0 TD_GS_SAMPLER12_BORDER_RED
+0x0000A8D0 TD_GS_SAMPLER13_BORDER_RED
+0x0000A8E0 TD_GS_SAMPLER14_BORDER_RED
+0x0000A8F0 TD_GS_SAMPLER15_BORDER_RED
+0x0000A900 TD_GS_SAMPLER16_BORDER_RED
+0x0000A910 TD_GS_SAMPLER17_BORDER_RED
+0x0000A40C TD_PS_SAMPLER0_BORDER_ALPHA
+0x0000A41C TD_PS_SAMPLER1_BORDER_ALPHA
+0x0000A42C TD_PS_SAMPLER2_BORDER_ALPHA
+0x0000A43C TD_PS_SAMPLER3_BORDER_ALPHA
+0x0000A44C TD_PS_SAMPLER4_BORDER_ALPHA
+0x0000A45C TD_PS_SAMPLER5_BORDER_ALPHA
+0x0000A46C TD_PS_SAMPLER6_BORDER_ALPHA
+0x0000A47C TD_PS_SAMPLER7_BORDER_ALPHA
+0x0000A48C TD_PS_SAMPLER8_BORDER_ALPHA
+0x0000A49C TD_PS_SAMPLER9_BORDER_ALPHA
+0x0000A4AC TD_PS_SAMPLER10_BORDER_ALPHA
+0x0000A4BC TD_PS_SAMPLER11_BORDER_ALPHA
+0x0000A4CC TD_PS_SAMPLER12_BORDER_ALPHA
+0x0000A4DC TD_PS_SAMPLER13_BORDER_ALPHA
+0x0000A4EC TD_PS_SAMPLER14_BORDER_ALPHA
+0x0000A4FC TD_PS_SAMPLER15_BORDER_ALPHA
+0x0000A50C TD_PS_SAMPLER16_BORDER_ALPHA
+0x0000A51C TD_PS_SAMPLER17_BORDER_ALPHA
+0x0000A408 TD_PS_SAMPLER0_BORDER_BLUE
+0x0000A418 TD_PS_SAMPLER1_BORDER_BLUE
+0x0000A428 TD_PS_SAMPLER2_BORDER_BLUE
+0x0000A438 TD_PS_SAMPLER3_BORDER_BLUE
+0x0000A448 TD_PS_SAMPLER4_BORDER_BLUE
+0x0000A458 TD_PS_SAMPLER5_BORDER_BLUE
+0x0000A468 TD_PS_SAMPLER6_BORDER_BLUE
+0x0000A478 TD_PS_SAMPLER7_BORDER_BLUE
+0x0000A488 TD_PS_SAMPLER8_BORDER_BLUE
+0x0000A498 TD_PS_SAMPLER9_BORDER_BLUE
+0x0000A4A8 TD_PS_SAMPLER10_BORDER_BLUE
+0x0000A4B8 TD_PS_SAMPLER11_BORDER_BLUE
+0x0000A4C8 TD_PS_SAMPLER12_BORDER_BLUE
+0x0000A4D8 TD_PS_SAMPLER13_BORDER_BLUE
+0x0000A4E8 TD_PS_SAMPLER14_BORDER_BLUE
+0x0000A4F8 TD_PS_SAMPLER15_BORDER_BLUE
+0x0000A508 TD_PS_SAMPLER16_BORDER_BLUE
+0x0000A518 TD_PS_SAMPLER17_BORDER_BLUE
+0x0000A404 TD_PS_SAMPLER0_BORDER_GREEN
+0x0000A414 TD_PS_SAMPLER1_BORDER_GREEN
+0x0000A424 TD_PS_SAMPLER2_BORDER_GREEN
+0x0000A434 TD_PS_SAMPLER3_BORDER_GREEN
+0x0000A444 TD_PS_SAMPLER4_BORDER_GREEN
+0x0000A454 TD_PS_SAMPLER5_BORDER_GREEN
+0x0000A464 TD_PS_SAMPLER6_BORDER_GREEN
+0x0000A474 TD_PS_SAMPLER7_BORDER_GREEN
+0x0000A484 TD_PS_SAMPLER8_BORDER_GREEN
+0x0000A494 TD_PS_SAMPLER9_BORDER_GREEN
+0x0000A4A4 TD_PS_SAMPLER10_BORDER_GREEN
+0x0000A4B4 TD_PS_SAMPLER11_BORDER_GREEN
+0x0000A4C4 TD_PS_SAMPLER12_BORDER_GREEN
+0x0000A4D4 TD_PS_SAMPLER13_BORDER_GREEN
+0x0000A4E4 TD_PS_SAMPLER14_BORDER_GREEN
+0x0000A4F4 TD_PS_SAMPLER15_BORDER_GREEN
+0x0000A504 TD_PS_SAMPLER16_BORDER_GREEN
+0x0000A514 TD_PS_SAMPLER17_BORDER_GREEN
+0x0000A400 TD_PS_SAMPLER0_BORDER_RED
+0x0000A410 TD_PS_SAMPLER1_BORDER_RED
+0x0000A420 TD_PS_SAMPLER2_BORDER_RED
+0x0000A430 TD_PS_SAMPLER3_BORDER_RED
+0x0000A440 TD_PS_SAMPLER4_BORDER_RED
+0x0000A450 TD_PS_SAMPLER5_BORDER_RED
+0x0000A460 TD_PS_SAMPLER6_BORDER_RED
+0x0000A470 TD_PS_SAMPLER7_BORDER_RED
+0x0000A480 TD_PS_SAMPLER8_BORDER_RED
+0x0000A490 TD_PS_SAMPLER9_BORDER_RED
+0x0000A4A0 TD_PS_SAMPLER10_BORDER_RED
+0x0000A4B0 TD_PS_SAMPLER11_BORDER_RED
+0x0000A4C0 TD_PS_SAMPLER12_BORDER_RED
+0x0000A4D0 TD_PS_SAMPLER13_BORDER_RED
+0x0000A4E0 TD_PS_SAMPLER14_BORDER_RED
+0x0000A4F0 TD_PS_SAMPLER15_BORDER_RED
+0x0000A500 TD_PS_SAMPLER16_BORDER_RED
+0x0000A510 TD_PS_SAMPLER17_BORDER_RED
+0x0000AA00 TD_PS_SAMPLER0_CLEARTYPE_KERNEL
+0x0000AA04 TD_PS_SAMPLER1_CLEARTYPE_KERNEL
+0x0000AA08 TD_PS_SAMPLER2_CLEARTYPE_KERNEL
+0x0000AA0C TD_PS_SAMPLER3_CLEARTYPE_KERNEL
+0x0000AA10 TD_PS_SAMPLER4_CLEARTYPE_KERNEL
+0x0000AA14 TD_PS_SAMPLER5_CLEARTYPE_KERNEL
+0x0000AA18 TD_PS_SAMPLER6_CLEARTYPE_KERNEL
+0x0000AA1C TD_PS_SAMPLER7_CLEARTYPE_KERNEL
+0x0000AA20 TD_PS_SAMPLER8_CLEARTYPE_KERNEL
+0x0000AA24 TD_PS_SAMPLER9_CLEARTYPE_KERNEL
+0x0000AA28 TD_PS_SAMPLER10_CLEARTYPE_KERNEL
+0x0000AA2C TD_PS_SAMPLER11_CLEARTYPE_KERNEL
+0x0000AA30 TD_PS_SAMPLER12_CLEARTYPE_KERNEL
+0x0000AA34 TD_PS_SAMPLER13_CLEARTYPE_KERNEL
+0x0000AA38 TD_PS_SAMPLER14_CLEARTYPE_KERNEL
+0x0000AA3C TD_PS_SAMPLER15_CLEARTYPE_KERNEL
+0x0000AA40 TD_PS_SAMPLER16_CLEARTYPE_KERNEL
+0x0000AA44 TD_PS_SAMPLER17_CLEARTYPE_KERNEL
+0x0000A60C TD_VS_SAMPLER0_BORDER_ALPHA
+0x0000A61C TD_VS_SAMPLER1_BORDER_ALPHA
+0x0000A62C TD_VS_SAMPLER2_BORDER_ALPHA
+0x0000A63C TD_VS_SAMPLER3_BORDER_ALPHA
+0x0000A64C TD_VS_SAMPLER4_BORDER_ALPHA
+0x0000A65C TD_VS_SAMPLER5_BORDER_ALPHA
+0x0000A66C TD_VS_SAMPLER6_BORDER_ALPHA
+0x0000A67C TD_VS_SAMPLER7_BORDER_ALPHA
+0x0000A68C TD_VS_SAMPLER8_BORDER_ALPHA
+0x0000A69C TD_VS_SAMPLER9_BORDER_ALPHA
+0x0000A6AC TD_VS_SAMPLER10_BORDER_ALPHA
+0x0000A6BC TD_VS_SAMPLER11_BORDER_ALPHA
+0x0000A6CC TD_VS_SAMPLER12_BORDER_ALPHA
+0x0000A6DC TD_VS_SAMPLER13_BORDER_ALPHA
+0x0000A6EC TD_VS_SAMPLER14_BORDER_ALPHA
+0x0000A6FC TD_VS_SAMPLER15_BORDER_ALPHA
+0x0000A70C TD_VS_SAMPLER16_BORDER_ALPHA
+0x0000A71C TD_VS_SAMPLER17_BORDER_ALPHA
+0x0000A608 TD_VS_SAMPLER0_BORDER_BLUE
+0x0000A618 TD_VS_SAMPLER1_BORDER_BLUE
+0x0000A628 TD_VS_SAMPLER2_BORDER_BLUE
+0x0000A638 TD_VS_SAMPLER3_BORDER_BLUE
+0x0000A648 TD_VS_SAMPLER4_BORDER_BLUE
+0x0000A658 TD_VS_SAMPLER5_BORDER_BLUE
+0x0000A668 TD_VS_SAMPLER6_BORDER_BLUE
+0x0000A678 TD_VS_SAMPLER7_BORDER_BLUE
+0x0000A688 TD_VS_SAMPLER8_BORDER_BLUE
+0x0000A698 TD_VS_SAMPLER9_BORDER_BLUE
+0x0000A6A8 TD_VS_SAMPLER10_BORDER_BLUE
+0x0000A6B8 TD_VS_SAMPLER11_BORDER_BLUE
+0x0000A6C8 TD_VS_SAMPLER12_BORDER_BLUE
+0x0000A6D8 TD_VS_SAMPLER13_BORDER_BLUE
+0x0000A6E8 TD_VS_SAMPLER14_BORDER_BLUE
+0x0000A6F8 TD_VS_SAMPLER15_BORDER_BLUE
+0x0000A708 TD_VS_SAMPLER16_BORDER_BLUE
+0x0000A718 TD_VS_SAMPLER17_BORDER_BLUE
+0x0000A604 TD_VS_SAMPLER0_BORDER_GREEN
+0x0000A614 TD_VS_SAMPLER1_BORDER_GREEN
+0x0000A624 TD_VS_SAMPLER2_BORDER_GREEN
+0x0000A634 TD_VS_SAMPLER3_BORDER_GREEN
+0x0000A644 TD_VS_SAMPLER4_BORDER_GREEN
+0x0000A654 TD_VS_SAMPLER5_BORDER_GREEN
+0x0000A664 TD_VS_SAMPLER6_BORDER_GREEN
+0x0000A674 TD_VS_SAMPLER7_BORDER_GREEN
+0x0000A684 TD_VS_SAMPLER8_BORDER_GREEN
+0x0000A694 TD_VS_SAMPLER9_BORDER_GREEN
+0x0000A6A4 TD_VS_SAMPLER10_BORDER_GREEN
+0x0000A6B4 TD_VS_SAMPLER11_BORDER_GREEN
+0x0000A6C4 TD_VS_SAMPLER12_BORDER_GREEN
+0x0000A6D4 TD_VS_SAMPLER13_BORDER_GREEN
+0x0000A6E4 TD_VS_SAMPLER14_BORDER_GREEN
+0x0000A6F4 TD_VS_SAMPLER15_BORDER_GREEN
+0x0000A704 TD_VS_SAMPLER16_BORDER_GREEN
+0x0000A714 TD_VS_SAMPLER17_BORDER_GREEN
+0x0000A600 TD_VS_SAMPLER0_BORDER_RED
+0x0000A610 TD_VS_SAMPLER1_BORDER_RED
+0x0000A620 TD_VS_SAMPLER2_BORDER_RED
+0x0000A630 TD_VS_SAMPLER3_BORDER_RED
+0x0000A640 TD_VS_SAMPLER4_BORDER_RED
+0x0000A650 TD_VS_SAMPLER5_BORDER_RED
+0x0000A660 TD_VS_SAMPLER6_BORDER_RED
+0x0000A670 TD_VS_SAMPLER7_BORDER_RED
+0x0000A680 TD_VS_SAMPLER8_BORDER_RED
+0x0000A690 TD_VS_SAMPLER9_BORDER_RED
+0x0000A6A0 TD_VS_SAMPLER10_BORDER_RED
+0x0000A6B0 TD_VS_SAMPLER11_BORDER_RED
+0x0000A6C0 TD_VS_SAMPLER12_BORDER_RED
+0x0000A6D0 TD_VS_SAMPLER13_BORDER_RED
+0x0000A6E0 TD_VS_SAMPLER14_BORDER_RED
+0x0000A6F0 TD_VS_SAMPLER15_BORDER_RED
+0x0000A700 TD_VS_SAMPLER16_BORDER_RED
+0x0000A710 TD_VS_SAMPLER17_BORDER_RED
+0x00009508 TA_CNTL_AUX
+0x0002802C DB_DEPTH_CLEAR
+0x00028D34 DB_PREFETCH_LIMIT
+0x00028D30 DB_PRELOAD_CONTROL
+0x00028D0C DB_RENDER_CONTROL
+0x00028D10 DB_RENDER_OVERRIDE
+0x0002880C DB_SHADER_CONTROL
+0x00028D28 DB_SRESULTS_COMPARE_STATE0
+0x00028D2C DB_SRESULTS_COMPARE_STATE1
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028028 DB_STENCIL_CLEAR
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x00028804 CB_BLEND_CONTROL
+0x00028420 CB_BLEND_ALPHA
+0x0002841C CB_BLEND_BLUE
+0x00028418 CB_BLEND_GREEN
+0x00028414 CB_BLEND_RED
+0x0002812C CB_CLEAR_ALPHA
+0x00028128 CB_CLEAR_BLUE
+0x00028124 CB_CLEAR_GREEN
+0x00028120 CB_CLEAR_RED
+0x00028C30 CB_CLRCMP_CONTROL
+0x00028C38 CB_CLRCMP_DST
+0x00028C3C CB_CLRCMP_MSK
+0x00028C34 CB_CLRCMP_SRC
+0x0002842C CB_FOG_BLUE
+0x00028428 CB_FOG_GREEN
+0x00028424 CB_FOG_RED
+0x00008040 WAIT_UNTIL
+0x00009714 VC_ENHANCE
+0x00009830 DB_DEBUG
+0x00009838 DB_WATERMARKS
+0x00028D44 DB_ALPHA_TO_MASK
+0x00009700 VC_CNTL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rn50 b/drivers/gpu/drm/radeon/reg_srcs/rn50
new file mode 100644
index 0000000..2687b63
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/rn50
@@ -0,0 +1,30 @@
+rn50 0x3294
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
new file mode 100644
index 0000000..d9f6286
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -0,0 +1,780 @@
+rs600 0x6d40
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4310 RS_IP_0
+0x4314 RS_IP_1
+0x4318 RS_IP_2
+0x431C RS_IP_3
+0x4320 RS_IP_4
+0x4324 RS_IP_5
+0x4328 RS_IP_6
+0x432C RS_IP_7
+0x4330 RS_INST_0
+0x4334 RS_INST_1
+0x4338 RS_INST_2
+0x433C RS_INST_3
+0x4340 RS_INST_4
+0x4344 RS_INST_5
+0x4348 RS_INST_6
+0x434C RS_INST_7
+0x4350 RS_INST_8
+0x4354 RS_INST_9
+0x4358 RS_INST_10
+0x435C RS_INST_11
+0x4360 RS_INST_12
+0x4364 RS_INST_13
+0x4368 RS_INST_14
+0x436C RS_INST_15
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4608 US_CODE_OFFSET
+0x460C US_RESET
+0x4610 US_CODE_ADDR_0
+0x4614 US_CODE_ADDR_1
+0x4618 US_CODE_ADDR_2
+0x461C US_CODE_ADDR_3
+0x4620 US_TEX_INST_0
+0x4624 US_TEX_INST_1
+0x4628 US_TEX_INST_2
+0x462C US_TEX_INST_3
+0x4630 US_TEX_INST_4
+0x4634 US_TEX_INST_5
+0x4638 US_TEX_INST_6
+0x463C US_TEX_INST_7
+0x4640 US_TEX_INST_8
+0x4644 US_TEX_INST_9
+0x4648 US_TEX_INST_10
+0x464C US_TEX_INST_11
+0x4650 US_TEX_INST_12
+0x4654 US_TEX_INST_13
+0x4658 US_TEX_INST_14
+0x465C US_TEX_INST_15
+0x4660 US_TEX_INST_16
+0x4664 US_TEX_INST_17
+0x4668 US_TEX_INST_18
+0x466C US_TEX_INST_19
+0x4670 US_TEX_INST_20
+0x4674 US_TEX_INST_21
+0x4678 US_TEX_INST_22
+0x467C US_TEX_INST_23
+0x4680 US_TEX_INST_24
+0x4684 US_TEX_INST_25
+0x4688 US_TEX_INST_26
+0x468C US_TEX_INST_27
+0x4690 US_TEX_INST_28
+0x4694 US_TEX_INST_29
+0x4698 US_TEX_INST_30
+0x469C US_TEX_INST_31
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46B8 US_CODE_BANK
+0x46BC US_CODE_EXT
+0x46C0 US_ALU_RGB_ADDR_0
+0x46C4 US_ALU_RGB_ADDR_1
+0x46C8 US_ALU_RGB_ADDR_2
+0x46CC US_ALU_RGB_ADDR_3
+0x46D0 US_ALU_RGB_ADDR_4
+0x46D4 US_ALU_RGB_ADDR_5
+0x46D8 US_ALU_RGB_ADDR_6
+0x46DC US_ALU_RGB_ADDR_7
+0x46E0 US_ALU_RGB_ADDR_8
+0x46E4 US_ALU_RGB_ADDR_9
+0x46E8 US_ALU_RGB_ADDR_10
+0x46EC US_ALU_RGB_ADDR_11
+0x46F0 US_ALU_RGB_ADDR_12
+0x46F4 US_ALU_RGB_ADDR_13
+0x46F8 US_ALU_RGB_ADDR_14
+0x46FC US_ALU_RGB_ADDR_15
+0x4700 US_ALU_RGB_ADDR_16
+0x4704 US_ALU_RGB_ADDR_17
+0x4708 US_ALU_RGB_ADDR_18
+0x470C US_ALU_RGB_ADDR_19
+0x4710 US_ALU_RGB_ADDR_20
+0x4714 US_ALU_RGB_ADDR_21
+0x4718 US_ALU_RGB_ADDR_22
+0x471C US_ALU_RGB_ADDR_23
+0x4720 US_ALU_RGB_ADDR_24
+0x4724 US_ALU_RGB_ADDR_25
+0x4728 US_ALU_RGB_ADDR_26
+0x472C US_ALU_RGB_ADDR_27
+0x4730 US_ALU_RGB_ADDR_28
+0x4734 US_ALU_RGB_ADDR_29
+0x4738 US_ALU_RGB_ADDR_30
+0x473C US_ALU_RGB_ADDR_31
+0x4740 US_ALU_RGB_ADDR_32
+0x4744 US_ALU_RGB_ADDR_33
+0x4748 US_ALU_RGB_ADDR_34
+0x474C US_ALU_RGB_ADDR_35
+0x4750 US_ALU_RGB_ADDR_36
+0x4754 US_ALU_RGB_ADDR_37
+0x4758 US_ALU_RGB_ADDR_38
+0x475C US_ALU_RGB_ADDR_39
+0x4760 US_ALU_RGB_ADDR_40
+0x4764 US_ALU_RGB_ADDR_41
+0x4768 US_ALU_RGB_ADDR_42
+0x476C US_ALU_RGB_ADDR_43
+0x4770 US_ALU_RGB_ADDR_44
+0x4774 US_ALU_RGB_ADDR_45
+0x4778 US_ALU_RGB_ADDR_46
+0x477C US_ALU_RGB_ADDR_47
+0x4780 US_ALU_RGB_ADDR_48
+0x4784 US_ALU_RGB_ADDR_49
+0x4788 US_ALU_RGB_ADDR_50
+0x478C US_ALU_RGB_ADDR_51
+0x4790 US_ALU_RGB_ADDR_52
+0x4794 US_ALU_RGB_ADDR_53
+0x4798 US_ALU_RGB_ADDR_54
+0x479C US_ALU_RGB_ADDR_55
+0x47A0 US_ALU_RGB_ADDR_56
+0x47A4 US_ALU_RGB_ADDR_57
+0x47A8 US_ALU_RGB_ADDR_58
+0x47AC US_ALU_RGB_ADDR_59
+0x47B0 US_ALU_RGB_ADDR_60
+0x47B4 US_ALU_RGB_ADDR_61
+0x47B8 US_ALU_RGB_ADDR_62
+0x47BC US_ALU_RGB_ADDR_63
+0x47C0 US_ALU_ALPHA_ADDR_0
+0x47C4 US_ALU_ALPHA_ADDR_1
+0x47C8 US_ALU_ALPHA_ADDR_2
+0x47CC US_ALU_ALPHA_ADDR_3
+0x47D0 US_ALU_ALPHA_ADDR_4
+0x47D4 US_ALU_ALPHA_ADDR_5
+0x47D8 US_ALU_ALPHA_ADDR_6
+0x47DC US_ALU_ALPHA_ADDR_7
+0x47E0 US_ALU_ALPHA_ADDR_8
+0x47E4 US_ALU_ALPHA_ADDR_9
+0x47E8 US_ALU_ALPHA_ADDR_10
+0x47EC US_ALU_ALPHA_ADDR_11
+0x47F0 US_ALU_ALPHA_ADDR_12
+0x47F4 US_ALU_ALPHA_ADDR_13
+0x47F8 US_ALU_ALPHA_ADDR_14
+0x47FC US_ALU_ALPHA_ADDR_15
+0x4800 US_ALU_ALPHA_ADDR_16
+0x4804 US_ALU_ALPHA_ADDR_17
+0x4808 US_ALU_ALPHA_ADDR_18
+0x480C US_ALU_ALPHA_ADDR_19
+0x4810 US_ALU_ALPHA_ADDR_20
+0x4814 US_ALU_ALPHA_ADDR_21
+0x4818 US_ALU_ALPHA_ADDR_22
+0x481C US_ALU_ALPHA_ADDR_23
+0x4820 US_ALU_ALPHA_ADDR_24
+0x4824 US_ALU_ALPHA_ADDR_25
+0x4828 US_ALU_ALPHA_ADDR_26
+0x482C US_ALU_ALPHA_ADDR_27
+0x4830 US_ALU_ALPHA_ADDR_28
+0x4834 US_ALU_ALPHA_ADDR_29
+0x4838 US_ALU_ALPHA_ADDR_30
+0x483C US_ALU_ALPHA_ADDR_31
+0x4840 US_ALU_ALPHA_ADDR_32
+0x4844 US_ALU_ALPHA_ADDR_33
+0x4848 US_ALU_ALPHA_ADDR_34
+0x484C US_ALU_ALPHA_ADDR_35
+0x4850 US_ALU_ALPHA_ADDR_36
+0x4854 US_ALU_ALPHA_ADDR_37
+0x4858 US_ALU_ALPHA_ADDR_38
+0x485C US_ALU_ALPHA_ADDR_39
+0x4860 US_ALU_ALPHA_ADDR_40
+0x4864 US_ALU_ALPHA_ADDR_41
+0x4868 US_ALU_ALPHA_ADDR_42
+0x486C US_ALU_ALPHA_ADDR_43
+0x4870 US_ALU_ALPHA_ADDR_44
+0x4874 US_ALU_ALPHA_ADDR_45
+0x4878 US_ALU_ALPHA_ADDR_46
+0x487C US_ALU_ALPHA_ADDR_47
+0x4880 US_ALU_ALPHA_ADDR_48
+0x4884 US_ALU_ALPHA_ADDR_49
+0x4888 US_ALU_ALPHA_ADDR_50
+0x488C US_ALU_ALPHA_ADDR_51
+0x4890 US_ALU_ALPHA_ADDR_52
+0x4894 US_ALU_ALPHA_ADDR_53
+0x4898 US_ALU_ALPHA_ADDR_54
+0x489C US_ALU_ALPHA_ADDR_55
+0x48A0 US_ALU_ALPHA_ADDR_56
+0x48A4 US_ALU_ALPHA_ADDR_57
+0x48A8 US_ALU_ALPHA_ADDR_58
+0x48AC US_ALU_ALPHA_ADDR_59
+0x48B0 US_ALU_ALPHA_ADDR_60
+0x48B4 US_ALU_ALPHA_ADDR_61
+0x48B8 US_ALU_ALPHA_ADDR_62
+0x48BC US_ALU_ALPHA_ADDR_63
+0x48C0 US_ALU_RGB_INST_0
+0x48C4 US_ALU_RGB_INST_1
+0x48C8 US_ALU_RGB_INST_2
+0x48CC US_ALU_RGB_INST_3
+0x48D0 US_ALU_RGB_INST_4
+0x48D4 US_ALU_RGB_INST_5
+0x48D8 US_ALU_RGB_INST_6
+0x48DC US_ALU_RGB_INST_7
+0x48E0 US_ALU_RGB_INST_8
+0x48E4 US_ALU_RGB_INST_9
+0x48E8 US_ALU_RGB_INST_10
+0x48EC US_ALU_RGB_INST_11
+0x48F0 US_ALU_RGB_INST_12
+0x48F4 US_ALU_RGB_INST_13
+0x48F8 US_ALU_RGB_INST_14
+0x48FC US_ALU_RGB_INST_15
+0x4900 US_ALU_RGB_INST_16
+0x4904 US_ALU_RGB_INST_17
+0x4908 US_ALU_RGB_INST_18
+0x490C US_ALU_RGB_INST_19
+0x4910 US_ALU_RGB_INST_20
+0x4914 US_ALU_RGB_INST_21
+0x4918 US_ALU_RGB_INST_22
+0x491C US_ALU_RGB_INST_23
+0x4920 US_ALU_RGB_INST_24
+0x4924 US_ALU_RGB_INST_25
+0x4928 US_ALU_RGB_INST_26
+0x492C US_ALU_RGB_INST_27
+0x4930 US_ALU_RGB_INST_28
+0x4934 US_ALU_RGB_INST_29
+0x4938 US_ALU_RGB_INST_30
+0x493C US_ALU_RGB_INST_31
+0x4940 US_ALU_RGB_INST_32
+0x4944 US_ALU_RGB_INST_33
+0x4948 US_ALU_RGB_INST_34
+0x494C US_ALU_RGB_INST_35
+0x4950 US_ALU_RGB_INST_36
+0x4954 US_ALU_RGB_INST_37
+0x4958 US_ALU_RGB_INST_38
+0x495C US_ALU_RGB_INST_39
+0x4960 US_ALU_RGB_INST_40
+0x4964 US_ALU_RGB_INST_41
+0x4968 US_ALU_RGB_INST_42
+0x496C US_ALU_RGB_INST_43
+0x4970 US_ALU_RGB_INST_44
+0x4974 US_ALU_RGB_INST_45
+0x4978 US_ALU_RGB_INST_46
+0x497C US_ALU_RGB_INST_47
+0x4980 US_ALU_RGB_INST_48
+0x4984 US_ALU_RGB_INST_49
+0x4988 US_ALU_RGB_INST_50
+0x498C US_ALU_RGB_INST_51
+0x4990 US_ALU_RGB_INST_52
+0x4994 US_ALU_RGB_INST_53
+0x4998 US_ALU_RGB_INST_54
+0x499C US_ALU_RGB_INST_55
+0x49A0 US_ALU_RGB_INST_56
+0x49A4 US_ALU_RGB_INST_57
+0x49A8 US_ALU_RGB_INST_58
+0x49AC US_ALU_RGB_INST_59
+0x49B0 US_ALU_RGB_INST_60
+0x49B4 US_ALU_RGB_INST_61
+0x49B8 US_ALU_RGB_INST_62
+0x49BC US_ALU_RGB_INST_63
+0x49C0 US_ALU_ALPHA_INST_0
+0x49C4 US_ALU_ALPHA_INST_1
+0x49C8 US_ALU_ALPHA_INST_2
+0x49CC US_ALU_ALPHA_INST_3
+0x49D0 US_ALU_ALPHA_INST_4
+0x49D4 US_ALU_ALPHA_INST_5
+0x49D8 US_ALU_ALPHA_INST_6
+0x49DC US_ALU_ALPHA_INST_7
+0x49E0 US_ALU_ALPHA_INST_8
+0x49E4 US_ALU_ALPHA_INST_9
+0x49E8 US_ALU_ALPHA_INST_10
+0x49EC US_ALU_ALPHA_INST_11
+0x49F0 US_ALU_ALPHA_INST_12
+0x49F4 US_ALU_ALPHA_INST_13
+0x49F8 US_ALU_ALPHA_INST_14
+0x49FC US_ALU_ALPHA_INST_15
+0x4A00 US_ALU_ALPHA_INST_16
+0x4A04 US_ALU_ALPHA_INST_17
+0x4A08 US_ALU_ALPHA_INST_18
+0x4A0C US_ALU_ALPHA_INST_19
+0x4A10 US_ALU_ALPHA_INST_20
+0x4A14 US_ALU_ALPHA_INST_21
+0x4A18 US_ALU_ALPHA_INST_22
+0x4A1C US_ALU_ALPHA_INST_23
+0x4A20 US_ALU_ALPHA_INST_24
+0x4A24 US_ALU_ALPHA_INST_25
+0x4A28 US_ALU_ALPHA_INST_26
+0x4A2C US_ALU_ALPHA_INST_27
+0x4A30 US_ALU_ALPHA_INST_28
+0x4A34 US_ALU_ALPHA_INST_29
+0x4A38 US_ALU_ALPHA_INST_30
+0x4A3C US_ALU_ALPHA_INST_31
+0x4A40 US_ALU_ALPHA_INST_32
+0x4A44 US_ALU_ALPHA_INST_33
+0x4A48 US_ALU_ALPHA_INST_34
+0x4A4C US_ALU_ALPHA_INST_35
+0x4A50 US_ALU_ALPHA_INST_36
+0x4A54 US_ALU_ALPHA_INST_37
+0x4A58 US_ALU_ALPHA_INST_38
+0x4A5C US_ALU_ALPHA_INST_39
+0x4A60 US_ALU_ALPHA_INST_40
+0x4A64 US_ALU_ALPHA_INST_41
+0x4A68 US_ALU_ALPHA_INST_42
+0x4A6C US_ALU_ALPHA_INST_43
+0x4A70 US_ALU_ALPHA_INST_44
+0x4A74 US_ALU_ALPHA_INST_45
+0x4A78 US_ALU_ALPHA_INST_46
+0x4A7C US_ALU_ALPHA_INST_47
+0x4A80 US_ALU_ALPHA_INST_48
+0x4A84 US_ALU_ALPHA_INST_49
+0x4A88 US_ALU_ALPHA_INST_50
+0x4A8C US_ALU_ALPHA_INST_51
+0x4A90 US_ALU_ALPHA_INST_52
+0x4A94 US_ALU_ALPHA_INST_53
+0x4A98 US_ALU_ALPHA_INST_54
+0x4A9C US_ALU_ALPHA_INST_55
+0x4AA0 US_ALU_ALPHA_INST_56
+0x4AA4 US_ALU_ALPHA_INST_57
+0x4AA8 US_ALU_ALPHA_INST_58
+0x4AAC US_ALU_ALPHA_INST_59
+0x4AB0 US_ALU_ALPHA_INST_60
+0x4AB4 US_ALU_ALPHA_INST_61
+0x4AB8 US_ALU_ALPHA_INST_62
+0x4ABC US_ALU_ALPHA_INST_63
+0x4AC0 US_ALU_EXT_ADDR_0
+0x4AC4 US_ALU_EXT_ADDR_1
+0x4AC8 US_ALU_EXT_ADDR_2
+0x4ACC US_ALU_EXT_ADDR_3
+0x4AD0 US_ALU_EXT_ADDR_4
+0x4AD4 US_ALU_EXT_ADDR_5
+0x4AD8 US_ALU_EXT_ADDR_6
+0x4ADC US_ALU_EXT_ADDR_7
+0x4AE0 US_ALU_EXT_ADDR_8
+0x4AE4 US_ALU_EXT_ADDR_9
+0x4AE8 US_ALU_EXT_ADDR_10
+0x4AEC US_ALU_EXT_ADDR_11
+0x4AF0 US_ALU_EXT_ADDR_12
+0x4AF4 US_ALU_EXT_ADDR_13
+0x4AF8 US_ALU_EXT_ADDR_14
+0x4AFC US_ALU_EXT_ADDR_15
+0x4B00 US_ALU_EXT_ADDR_16
+0x4B04 US_ALU_EXT_ADDR_17
+0x4B08 US_ALU_EXT_ADDR_18
+0x4B0C US_ALU_EXT_ADDR_19
+0x4B10 US_ALU_EXT_ADDR_20
+0x4B14 US_ALU_EXT_ADDR_21
+0x4B18 US_ALU_EXT_ADDR_22
+0x4B1C US_ALU_EXT_ADDR_23
+0x4B20 US_ALU_EXT_ADDR_24
+0x4B24 US_ALU_EXT_ADDR_25
+0x4B28 US_ALU_EXT_ADDR_26
+0x4B2C US_ALU_EXT_ADDR_27
+0x4B30 US_ALU_EXT_ADDR_28
+0x4B34 US_ALU_EXT_ADDR_29
+0x4B38 US_ALU_EXT_ADDR_30
+0x4B3C US_ALU_EXT_ADDR_31
+0x4B40 US_ALU_EXT_ADDR_32
+0x4B44 US_ALU_EXT_ADDR_33
+0x4B48 US_ALU_EXT_ADDR_34
+0x4B4C US_ALU_EXT_ADDR_35
+0x4B50 US_ALU_EXT_ADDR_36
+0x4B54 US_ALU_EXT_ADDR_37
+0x4B58 US_ALU_EXT_ADDR_38
+0x4B5C US_ALU_EXT_ADDR_39
+0x4B60 US_ALU_EXT_ADDR_40
+0x4B64 US_ALU_EXT_ADDR_41
+0x4B68 US_ALU_EXT_ADDR_42
+0x4B6C US_ALU_EXT_ADDR_43
+0x4B70 US_ALU_EXT_ADDR_44
+0x4B74 US_ALU_EXT_ADDR_45
+0x4B78 US_ALU_EXT_ADDR_46
+0x4B7C US_ALU_EXT_ADDR_47
+0x4B80 US_ALU_EXT_ADDR_48
+0x4B84 US_ALU_EXT_ADDR_49
+0x4B88 US_ALU_EXT_ADDR_50
+0x4B8C US_ALU_EXT_ADDR_51
+0x4B90 US_ALU_EXT_ADDR_52
+0x4B94 US_ALU_EXT_ADDR_53
+0x4B98 US_ALU_EXT_ADDR_54
+0x4B9C US_ALU_EXT_ADDR_55
+0x4BA0 US_ALU_EXT_ADDR_56
+0x4BA4 US_ALU_EXT_ADDR_57
+0x4BA8 US_ALU_EXT_ADDR_58
+0x4BAC US_ALU_EXT_ADDR_59
+0x4BB0 US_ALU_EXT_ADDR_60
+0x4BB4 US_ALU_EXT_ADDR_61
+0x4BB8 US_ALU_EXT_ADDR_62
+0x4BBC US_ALU_EXT_ADDR_63
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
new file mode 100644
index 0000000..78d5e99
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -0,0 +1,496 @@
+rv515 0x6d40
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x208C VAP_INDEX_OFFSET
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x2218 VAP_TEX_TO_COLOR_CNTL
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x2500 VAP_PVS_FLOW_CNTL_ADDRS_LW_0
+0x2504 VAP_PVS_FLOW_CNTL_ADDRS_UW_0
+0x2508 VAP_PVS_FLOW_CNTL_ADDRS_LW_1
+0x250C VAP_PVS_FLOW_CNTL_ADDRS_UW_1
+0x2510 VAP_PVS_FLOW_CNTL_ADDRS_LW_2
+0x2514 VAP_PVS_FLOW_CNTL_ADDRS_UW_2
+0x2518 VAP_PVS_FLOW_CNTL_ADDRS_LW_3
+0x251C VAP_PVS_FLOW_CNTL_ADDRS_UW_3
+0x2520 VAP_PVS_FLOW_CNTL_ADDRS_LW_4
+0x2524 VAP_PVS_FLOW_CNTL_ADDRS_UW_4
+0x2528 VAP_PVS_FLOW_CNTL_ADDRS_LW_5
+0x252C VAP_PVS_FLOW_CNTL_ADDRS_UW_5
+0x2530 VAP_PVS_FLOW_CNTL_ADDRS_LW_6
+0x2534 VAP_PVS_FLOW_CNTL_ADDRS_UW_6
+0x2538 VAP_PVS_FLOW_CNTL_ADDRS_LW_7
+0x253C VAP_PVS_FLOW_CNTL_ADDRS_UW_7
+0x2540 VAP_PVS_FLOW_CNTL_ADDRS_LW_8
+0x2544 VAP_PVS_FLOW_CNTL_ADDRS_UW_8
+0x2548 VAP_PVS_FLOW_CNTL_ADDRS_LW_9
+0x254C VAP_PVS_FLOW_CNTL_ADDRS_UW_9
+0x2550 VAP_PVS_FLOW_CNTL_ADDRS_LW_10
+0x2554 VAP_PVS_FLOW_CNTL_ADDRS_UW_10
+0x2558 VAP_PVS_FLOW_CNTL_ADDRS_LW_11
+0x255C VAP_PVS_FLOW_CNTL_ADDRS_UW_11
+0x2560 VAP_PVS_FLOW_CNTL_ADDRS_LW_12
+0x2564 VAP_PVS_FLOW_CNTL_ADDRS_UW_12
+0x2568 VAP_PVS_FLOW_CNTL_ADDRS_LW_13
+0x256C VAP_PVS_FLOW_CNTL_ADDRS_UW_13
+0x2570 VAP_PVS_FLOW_CNTL_ADDRS_LW_14
+0x2574 VAP_PVS_FLOW_CNTL_ADDRS_UW_14
+0x2578 VAP_PVS_FLOW_CNTL_ADDRS_LW_15
+0x257C VAP_PVS_FLOW_CNTL_ADDRS_UW_15
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4114 SU_TEX_WRAP_PS3
+0x4118 PS3_ENABLE
+0x411c PS3_VTX_FMT
+0x4120 PS3_TEX_SOURCE
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4258 GA_COLOR_CONTROL_PS3
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4074 RS_IP_0
+0x4078 RS_IP_1
+0x407C RS_IP_2
+0x4080 RS_IP_3
+0x4084 RS_IP_4
+0x4088 RS_IP_5
+0x408C RS_IP_6
+0x4090 RS_IP_7
+0x4094 RS_IP_8
+0x4098 RS_IP_9
+0x409C RS_IP_10
+0x40A0 RS_IP_11
+0x40A4 RS_IP_12
+0x40A8 RS_IP_13
+0x40AC RS_IP_14
+0x40B0 RS_IP_15
+0x4320 RS_INST_0
+0x4324 RS_INST_1
+0x4328 RS_INST_2
+0x432C RS_INST_3
+0x4330 RS_INST_4
+0x4334 RS_INST_5
+0x4338 RS_INST_6
+0x433C RS_INST_7
+0x4340 RS_INST_8
+0x4344 RS_INST_9
+0x4348 RS_INST_10
+0x434C RS_INST_11
+0x4350 RS_INST_12
+0x4354 RS_INST_13
+0x4358 RS_INST_14
+0x435C RS_INST_15
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4250 GA_US_VECTOR_INDEX
+0x4254 GA_US_VECTOR_DATA
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4620 US_FC_BOOL_CONST
+0x4624 US_FC_CTRL
+0x4630 US_CODE_ADDR
+0x4634 US_CODE_RANGE
+0x4638 US_CODE_OFFSET
+0x4640 US_FORMAT0_0
+0x4644 US_FORMAT0_1
+0x4648 US_FORMAT0_2
+0x464C US_FORMAT0_3
+0x4650 US_FORMAT0_4
+0x4654 US_FORMAT0_5
+0x4658 US_FORMAT0_6
+0x465C US_FORMAT0_7
+0x4660 US_FORMAT0_8
+0x4664 US_FORMAT0_9
+0x4668 US_FORMAT0_10
+0x466C US_FORMAT0_11
+0x4670 US_FORMAT0_12
+0x4674 US_FORMAT0_13
+0x4678 US_FORMAT0_14
+0x467C US_FORMAT0_15
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46C0 RB3D_COLOR_CLEAR_VALUE_AR
+0x46C4 RB3D_COLOR_CLEAR_VALUE_GB
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4BE0 FG_ALPHA_VALUE
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4EF8 RB3D_CONSTANT_COLOR_AR
+0x4EFC RB3D_CONSTANT_COLOR_GB
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F58 ZB_ZPASS_DATA
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4FD4 ZB_STENCILREFMASK_BF
diff --git a/drivers/gpu/drm/radeon/rs100d.h b/drivers/gpu/drm/radeon/rs100d.h
new file mode 100644
index 0000000..48a913a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs100d.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RS100D_H__
+#define __RS100D_H__
+
+/* Registers */
+#define R_00015C_NB_TOM 0x00015C
+#define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_00015C_MC_FB_START 0xFFFF0000
+#define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_00015C_MC_FB_TOP 0x0000FFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
new file mode 100644
index 0000000..b8074a8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -0,0 +1,569 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "rs400d.h"
+
+/* This files gather functions specifics to : rs400,rs480 */
+static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
+
+void rs400_gart_adjust_size(struct radeon_device *rdev)
+{
+ /* Check gart size */
+ switch (rdev->mc.gtt_size/(1024*1024)) {
+ case 32:
+ case 64:
+ case 128:
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ break;
+ default:
+ DRM_ERROR("Unable to use IGP GART size %uM\n",
+ (unsigned)(rdev->mc.gtt_size >> 20));
+ DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
+ DRM_ERROR("Forcing to 32M GART size\n");
+ rdev->mc.gtt_size = 32 * 1024 * 1024;
+ return;
+ }
+}
+
+void rs400_gart_tlb_flush(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ unsigned int timeout = rdev->usec_timeout;
+
+ WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
+ do {
+ tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
+ if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
+ break;
+ DRM_UDELAY(1);
+ timeout--;
+ } while (timeout > 0);
+ WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
+}
+
+int rs400_gart_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->gart.ptr) {
+ WARN(1, "RS400 GART already initialized\n");
+ return 0;
+ }
+ /* Check gart size */
+ switch(rdev->mc.gtt_size / (1024 * 1024)) {
+ case 32:
+ case 64:
+ case 128:
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* Initialize common gart structure */
+ r = radeon_gart_init(rdev);
+ if (r)
+ return r;
+ if (rs400_debugfs_pcie_gart_info_init(rdev))
+ DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
+ rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+ return radeon_gart_table_ram_alloc(rdev);
+}
+
+int rs400_gart_enable(struct radeon_device *rdev)
+{
+ uint32_t size_reg;
+ uint32_t tmp;
+
+ radeon_gart_restore(rdev);
+ tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+ tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
+ WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
+ /* Check gart size */
+ switch(rdev->mc.gtt_size / (1024 * 1024)) {
+ case 32:
+ size_reg = RS480_VA_SIZE_32MB;
+ break;
+ case 64:
+ size_reg = RS480_VA_SIZE_64MB;
+ break;
+ case 128:
+ size_reg = RS480_VA_SIZE_128MB;
+ break;
+ case 256:
+ size_reg = RS480_VA_SIZE_256MB;
+ break;
+ case 512:
+ size_reg = RS480_VA_SIZE_512MB;
+ break;
+ case 1024:
+ size_reg = RS480_VA_SIZE_1GB;
+ break;
+ case 2048:
+ size_reg = RS480_VA_SIZE_2GB;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* It should be fine to program it to max value */
+ if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
+ WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
+ WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
+ } else {
+ WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
+ WREG32(RS480_AGP_BASE_2, 0);
+ }
+ tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
+ tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
+ if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
+ WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
+ } else {
+ WREG32(RADEON_MC_AGP_LOCATION, tmp);
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
+ }
+ /* Table should be in 32bits address space so ignore bits above. */
+ tmp = (u32)rdev->gart.table_addr & 0xfffff000;
+ tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
+
+ WREG32_MC(RS480_GART_BASE, tmp);
+ /* TODO: more tweaking here */
+ WREG32_MC(RS480_GART_FEATURE_ID,
+ (RS480_TLB_ENABLE |
+ RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
+ /* Disable snooping */
+ WREG32_MC(RS480_AGP_MODE_CNTL,
+ (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
+ /* Disable AGP mode */
+ /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
+ * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
+ if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
+ tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+ tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN;
+ WREG32_MC(RS480_MC_MISC_CNTL, tmp);
+ } else {
+ tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+ tmp |= RS480_GART_INDEX_REG_EN;
+ WREG32_MC(RS480_MC_MISC_CNTL, tmp);
+ }
+ /* Enable gart */
+ WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
+ rs400_gart_tlb_flush(rdev);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)rdev->gart.table_addr);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+void rs400_gart_disable(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+ tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
+ WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
+ WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
+}
+
+void rs400_gart_fini(struct radeon_device *rdev)
+{
+ radeon_gart_fini(rdev);
+ rs400_gart_disable(rdev);
+ radeon_gart_table_ram_free(rdev);
+}
+
+#define RS400_PTE_WRITEABLE (1 << 2)
+#define RS400_PTE_READABLE (1 << 3)
+
+int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+ uint32_t entry;
+ u32 *gtt = rdev->gart.ptr;
+
+ if (i < 0 || i > rdev->gart.num_gpu_pages) {
+ return -EINVAL;
+ }
+
+ entry = (lower_32_bits(addr) & PAGE_MASK) |
+ ((upper_32_bits(addr) & 0xff) << 4) |
+ RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
+ entry = cpu_to_le32(entry);
+ gtt[i] = entry;
+ return 0;
+}
+
+int rs400_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(RADEON_MC_STATUS);
+ if (tmp & RADEON_MC_IDLE) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
+static void rs400_gpu_init(struct radeon_device *rdev)
+{
+ /* FIXME: is this correct ? */
+ r420_pipes_init(rdev);
+ if (rs400_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "rs400: Failed to wait MC idle while "
+ "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
+ }
+}
+
+static void rs400_mc_init(struct radeon_device *rdev)
+{
+ u64 base;
+
+ rs400_gart_adjust_size(rdev);
+ rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
+ /* DDR for all card after R300 & IGP */
+ rdev->mc.vram_is_ddr = true;
+ rdev->mc.vram_width = 128;
+ r100_vram_init_sizes(rdev);
+ base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
+ radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
+}
+
+uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t r;
+
+ WREG32(RS480_NB_MC_INDEX, reg & 0xff);
+ r = RREG32(RS480_NB_MC_DATA);
+ WREG32(RS480_NB_MC_INDEX, 0xff);
+ return r;
+}
+
+void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
+ WREG32(RS480_NB_MC_DATA, (v));
+ WREG32(RS480_NB_MC_INDEX, 0xff);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = RREG32(RADEON_HOST_PATH_CNTL);
+ seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_BUS_CNTL);
+ seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
+ tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+ seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
+ if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
+ tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
+ seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
+ tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
+ seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
+ tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
+ seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
+ tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
+ seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
+ tmp = RREG32(RS690_HDP_FB_LOCATION);
+ seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
+ } else {
+ tmp = RREG32(RADEON_AGP_BASE);
+ seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
+ tmp = RREG32(RS480_AGP_BASE_2);
+ seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
+ tmp = RREG32(RADEON_MC_AGP_LOCATION);
+ seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
+ }
+ tmp = RREG32_MC(RS480_GART_BASE);
+ seq_printf(m, "GART_BASE 0x%08x\n", tmp);
+ tmp = RREG32_MC(RS480_GART_FEATURE_ID);
+ seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
+ tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
+ seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
+ tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+ seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x5F);
+ seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
+ tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
+ seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
+ tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
+ seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x3B);
+ seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x3C);
+ seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x30);
+ seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x31);
+ seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x32);
+ seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x33);
+ seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x34);
+ seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x35);
+ seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x36);
+ seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
+ tmp = RREG32_MC(0x37);
+ seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
+ return 0;
+}
+
+static struct drm_info_list rs400_gart_info_list[] = {
+ {"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
+};
+#endif
+
+static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
+#else
+ return 0;
+#endif
+}
+
+static void rs400_mc_program(struct radeon_device *rdev)
+{
+ struct r100_mc_save save;
+
+ /* Stops all mc clients */
+ r100_mc_stop(rdev, &save);
+
+ /* Wait for mc idle */
+ if (rs400_mc_wait_for_idle(rdev))
+ dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
+ WREG32(R_000148_MC_FB_LOCATION,
+ S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
+ S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
+
+ r100_mc_resume(rdev, &save);
+}
+
+static int rs400_startup(struct radeon_device *rdev)
+{
+ int r;
+
+ r100_set_common_regs(rdev);
+
+ rs400_mc_program(rdev);
+ /* Resume clock */
+ r300_clock_startup(rdev);
+ /* Initialize GPU configuration (# pipes, ...) */
+ rs400_gpu_init(rdev);
+ r100_enable_bm(rdev);
+ /* Initialize GART (initialize after TTM so we can allocate
+ * memory through TTM but finalize after TTM) */
+ r = rs400_gart_enable(rdev);
+ if (r)
+ return r;
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ /* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
+ r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+ r = r100_cp_init(rdev, 1024 * 1024);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ return r;
+ }
+
+ return 0;
+}
+
+int rs400_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Make sur GART are not working */
+ rs400_gart_disable(rdev);
+ /* Resume clock before doing reset */
+ r300_clock_startup(rdev);
+ /* setup MC before calling post tables */
+ rs400_mc_program(rdev);
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_asic_reset(rdev)) {
+ dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* post */
+ radeon_combios_asic_init(rdev->ddev);
+ /* Resume clock after posting */
+ r300_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
+ r = rs400_startup(rdev);
+ if (r) {
+ rdev->accel_working = false;
+ }
+ return r;
+}
+
+int rs400_suspend(struct radeon_device *rdev)
+{
+ r100_cp_disable(rdev);
+ radeon_wb_disable(rdev);
+ r100_irq_disable(rdev);
+ rs400_gart_disable(rdev);
+ return 0;
+}
+
+void rs400_fini(struct radeon_device *rdev)
+{
+ r100_cp_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_gem_fini(rdev);
+ rs400_gart_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_bo_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
+int rs400_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Disable VGA */
+ r100_vga_render_disable(rdev);
+ /* Initialize scratch registers */
+ radeon_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* TODO: disable VGA need to use VGA request */
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
+ /* BIOS*/
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ if (rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+ return -EINVAL;
+ } else {
+ r = radeon_combios_init(rdev);
+ if (r)
+ return r;
+ }
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_asic_reset(rdev)) {
+ dev_warn(rdev->dev,
+ "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* check if cards are posted or not */
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* initialize memory controller */
+ rs400_mc_init(rdev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+ r = rs400_gart_init(rdev);
+ if (r)
+ return r;
+ r300_set_reg_safe(rdev);
+
+ rdev->accel_working = true;
+ r = rs400_startup(rdev);
+ if (r) {
+ /* Somethings want wront with the accel init stop accel */
+ dev_err(rdev->dev, "Disabling GPU acceleration\n");
+ r100_cp_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ rs400_gart_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ rdev->accel_working = false;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rs400d.h b/drivers/gpu/drm/radeon/rs400d.h
new file mode 100644
index 0000000..6d8bac5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs400d.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RS400D_H__
+#define __RS400D_H__
+
+/* Registers */
+#define R_000148_MC_FB_LOCATION 0x000148
+#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000148_MC_FB_START 0xFFFF0000
+#define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_000148_MC_FB_TOP 0x0000FFFF
+#define R_00015C_NB_TOM 0x00015C
+#define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_00015C_MC_FB_START 0xFFFF0000
+#define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_00015C_MC_FB_TOP 0x0000FFFF
+#define R_0007C0_CP_STAT 0x0007C0
+#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
+#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
+#define C_0007C0_MRU_BUSY 0xFFFFFFFE
+#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
+#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
+#define C_0007C0_MWU_BUSY 0xFFFFFFFD
+#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
+#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
+#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
+#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
+#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
+#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
+#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
+#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
+#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
+#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
+#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
+#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
+#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
+#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
+#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
+#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
+#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
+#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
+#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
+#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
+#define C_0007C0_CSI_BUSY 0xFFFFDFFF
+#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
+#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
+#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
+#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
+#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
+#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
+#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
+#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
+#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
+#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
+#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
+#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
+#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
+#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
+#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
+#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
+#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
+#define C_0007C0_CP_BUSY 0x7FFFFFFF
+#define R_000E40_RBBM_STATUS 0x000E40
+#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
+#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
+#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
+#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
+#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
+#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
+#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
+#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
+#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
+#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
+#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
+#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
+#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
+#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
+#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
+#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
+#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
+#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
+#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
+#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
+#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
+#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
+#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
+#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
+#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
+#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
+#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
+#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
+#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
+#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
+#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
+#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
+#define C_000E40_E2_BUSY 0xFFFDFFFF
+#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
+#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
+#define C_000E40_RB2D_BUSY 0xFFFBFFFF
+#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
+#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
+#define C_000E40_RB3D_BUSY 0xFFF7FFFF
+#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
+#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
+#define C_000E40_VAP_BUSY 0xFFEFFFFF
+#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
+#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
+#define C_000E40_RE_BUSY 0xFFDFFFFF
+#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
+#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
+#define C_000E40_TAM_BUSY 0xFFBFFFFF
+#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
+#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
+#define C_000E40_TDM_BUSY 0xFF7FFFFF
+#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
+#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
+#define C_000E40_PB_BUSY 0xFEFFFFFF
+#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
+#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
+#define C_000E40_TIM_BUSY 0xFDFFFFFF
+#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
+#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
+#define C_000E40_GA_BUSY 0xFBFFFFFF
+#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
+#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
+#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
+#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
+#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
+#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
new file mode 100644
index 0000000..670b555
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -0,0 +1,1078 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+/* RS600 / Radeon X1250/X1270 integrated GPU
+ *
+ * This file gather function specific to RS600 which is the IGP of
+ * the X1250/X1270 family supporting intel CPU (while RS690/RS740
+ * is the X1250/X1270 supporting AMD CPU). The display engine are
+ * the avivo one, bios is an atombios, 3D block are the one of the
+ * R4XX family. The GART is different from the RS400 one and is very
+ * close to the one of the R600 family (R600 likely being an evolution
+ * of the RS600 GART block).
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "rs600d.h"
+
+#include "rs600_reg_safe.h"
+
+static void rs600_gpu_init(struct radeon_device *rdev);
+int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+
+static const u32 crtc_offsets[2] =
+{
+ 0,
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
+static bool avivo_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+ if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
+ return true;
+ else
+ return false;
+}
+
+static bool avivo_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+ u32 pos1, pos2;
+
+ pos1 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos2 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+ if (pos1 != pos2)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * avivo_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (r5xx-r7xx).
+ */
+void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
+{
+ unsigned i = 0;
+
+ if (crtc >= rdev->num_crtc)
+ return;
+
+ if (!(RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN))
+ return;
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (avivo_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!avivo_is_counter_moving(rdev, crtc))
+ break;
+ }
+ }
+
+ while (!avivo_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!avivo_is_counter_moving(rdev, crtc))
+ break;
+ }
+ }
+}
+
+void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* enable the pflip int */
+ radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* disable the pflip int */
+ radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
+
+ /* Lock the graphics update lock */
+ tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* update the scanout addresses */
+ WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+ WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ /* Wait for update_pending to go high. */
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* Return current update_pending status: */
+ return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+}
+
+void rs600_pm_misc(struct radeon_device *rdev)
+{
+ int requested_index = rdev->pm.requested_power_state_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+ struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+ u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
+ u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
+
+ if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+ if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp |= voltage->gpio.mask;
+ else
+ tmp &= ~(voltage->gpio.mask);
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ } else {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp &= ~voltage->gpio.mask;
+ else
+ tmp |= voltage->gpio.mask;
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ }
+ } else if (voltage->type == VOLTAGE_VDDC)
+ radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
+
+ dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
+ dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
+ dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
+ } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
+ }
+ } else {
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
+ }
+ WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
+
+ dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+ dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
+ if (voltage->delay) {
+ dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
+ dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
+ } else
+ dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
+ } else
+ dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
+ WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
+
+ hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+ hdp_dyn_cntl &= ~HDP_FORCEON;
+ else
+ hdp_dyn_cntl |= HDP_FORCEON;
+ WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
+#if 0
+ /* mc_host_dyn seems to cause hangs from time to time */
+ mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
+ mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
+ else
+ mc_host_dyn_cntl |= MC_HOST_FORCEON;
+ WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
+#endif
+ dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
+ dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
+ else
+ dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
+ WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
+
+ /* set pcie lanes */
+ if ((rdev->flags & RADEON_IS_PCIE) &&
+ !(rdev->flags & RADEON_IS_IGP) &&
+ rdev->asic->pm.set_pcie_lanes &&
+ (ps->pcie_lanes !=
+ rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+ radeon_set_pcie_lanes(rdev,
+ ps->pcie_lanes);
+ DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+ }
+}
+
+void rs600_pm_prepare(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* disable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
+void rs600_pm_finish(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* enable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
+/* hpd for digital panel detect/disconnect */
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = false;
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
+ if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
+ if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
+ connected = true;
+ break;
+ default:
+ break;
+ }
+ return connected;
+}
+
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = rs600_hpd_sense(rdev, hpd);
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+ if (connected)
+ tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+ else
+ tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+ WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+ if (connected)
+ tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+ else
+ tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+ WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
+void rs600_hpd_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+ unsigned enable = 0;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+ S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
+ break;
+ case RADEON_HPD_2:
+ WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+ S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
+ break;
+ default:
+ break;
+ }
+ enable |= 1 << radeon_connector->hpd.hpd;
+ radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+ }
+ radeon_irq_kms_enable_hpd(rdev, enable);
+}
+
+void rs600_hpd_fini(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+ unsigned disable = 0;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+ S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
+ break;
+ case RADEON_HPD_2:
+ WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+ S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
+ break;
+ default:
+ break;
+ }
+ disable |= 1 << radeon_connector->hpd.hpd;
+ }
+ radeon_irq_kms_disable_hpd(rdev, disable);
+}
+
+int rs600_asic_reset(struct radeon_device *rdev)
+{
+ struct rv515_mc_save save;
+ u32 status, tmp;
+ int ret = 0;
+
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
+ }
+ /* Stops all mc clients */
+ rv515_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ pci_clear_master(rdev->pdev);
+ mdelay(1);
+ /* reset GA+VAP */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+ S_0000F0_SOFT_RESET_GA(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* reset CP */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* reset MC */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ /* Check if GPU is idle */
+ if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ ret = -1;
+ } else
+ dev_info(rdev->dev, "GPU reset succeed\n");
+ rv515_mc_resume(rdev, &save);
+ return ret;
+}
+
+/*
+ * GART.
+ */
+void rs600_gart_tlb_flush(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+ tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
+ WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
+
+ tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+ tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
+ WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
+
+ tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+ tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
+ WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
+ tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+}
+
+static int rs600_gart_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->gart.robj) {
+ WARN(1, "RS600 GART already initialized\n");
+ return 0;
+ }
+ /* Initialize common gart structure */
+ r = radeon_gart_init(rdev);
+ if (r) {
+ return r;
+ }
+ rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
+ return radeon_gart_table_vram_alloc(rdev);
+}
+
+static int rs600_gart_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int r, i;
+
+ if (rdev->gart.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
+ return r;
+ radeon_gart_restore(rdev);
+ /* Enable bus master */
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
+ /* FIXME: setup default page */
+ WREG32_MC(R_000100_MC_PT0_CNTL,
+ (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
+ S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
+
+ for (i = 0; i < 19; i++) {
+ WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
+ S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
+ S_00016C_SYSTEM_ACCESS_MODE_MASK(
+ V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
+ S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
+ V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
+ S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
+ S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
+ S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
+ }
+ /* enable first context */
+ WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
+ S_000102_ENABLE_PAGE_TABLE(1) |
+ S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
+
+ /* disable all other contexts */
+ for (i = 1; i < 8; i++)
+ WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
+
+ /* setup the page table */
+ WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
+ rdev->gart.table_addr);
+ WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
+ WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
+ WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
+
+ /* System context maps to VRAM space */
+ WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
+ WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
+
+ /* enable page tables */
+ tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+ WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
+ tmp = RREG32_MC(R_000009_MC_CNTL1);
+ WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
+ rs600_gart_tlb_flush(rdev);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)rdev->gart.table_addr);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+static void rs600_gart_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ /* FIXME: disable out of gart access */
+ WREG32_MC(R_000100_MC_PT0_CNTL, 0);
+ tmp = RREG32_MC(R_000009_MC_CNTL1);
+ WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
+ radeon_gart_table_vram_unpin(rdev);
+}
+
+static void rs600_gart_fini(struct radeon_device *rdev)
+{
+ radeon_gart_fini(rdev);
+ rs600_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+}
+
+#define R600_PTE_VALID (1 << 0)
+#define R600_PTE_SYSTEM (1 << 1)
+#define R600_PTE_SNOOPED (1 << 2)
+#define R600_PTE_READABLE (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
+int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+ void __iomem *ptr = (void *)rdev->gart.ptr;
+
+ if (i < 0 || i > rdev->gart.num_gpu_pages) {
+ return -EINVAL;
+ }
+ addr = addr & 0xFFFFFFFFFFFFF000ULL;
+ addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+ addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
+ writeq(addr, ptr + (i * 8));
+ return 0;
+}
+
+int rs600_irq_set(struct radeon_device *rdev)
+{
+ uint32_t tmp = 0;
+ uint32_t mode_int = 0;
+ u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
+ ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+ u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
+ ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+ u32 hdmi0;
+ if (ASIC_IS_DCE2(rdev))
+ hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
+ ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+ else
+ hdmi0 = 0;
+
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+ WREG32(R_000040_GEN_INT_CNTL, 0);
+ return -EINVAL;
+ }
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+ tmp |= S_000040_SW_INT_EN(1);
+ }
+ if (rdev->irq.crtc_vblank_int[0] ||
+ atomic_read(&rdev->irq.pflip[0])) {
+ mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
+ }
+ if (rdev->irq.crtc_vblank_int[1] ||
+ atomic_read(&rdev->irq.pflip[1])) {
+ mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
+ }
+ if (rdev->irq.hpd[0]) {
+ hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+ }
+ if (rdev->irq.hpd[1]) {
+ hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+ }
+ if (rdev->irq.afmt[0]) {
+ hdmi0 |= S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+ }
+ WREG32(R_000040_GEN_INT_CNTL, tmp);
+ WREG32(R_006540_DxMODE_INT_MASK, mode_int);
+ WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+ WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+ if (ASIC_IS_DCE2(rdev))
+ WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+ return 0;
+}
+
+static inline u32 rs600_irq_ack(struct radeon_device *rdev)
+{
+ uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
+ uint32_t irq_mask = S_000044_SW_INT(1);
+ u32 tmp;
+
+ if (G_000044_DISPLAY_INT_STAT(irqs)) {
+ rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
+ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+ WREG32(R_006534_D1MODE_VBLANK_STATUS,
+ S_006534_D1MODE_VBLANK_ACK(1));
+ }
+ if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+ WREG32(R_006D34_D2MODE_VBLANK_STATUS,
+ S_006D34_D2MODE_VBLANK_ACK(1));
+ }
+ if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+ tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+ tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
+ WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+ }
+ if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+ tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+ tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
+ WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+ }
+ } else {
+ rdev->irq.stat_regs.r500.disp_int = 0;
+ }
+
+ if (ASIC_IS_DCE2(rdev)) {
+ rdev->irq.stat_regs.r500.hdmi0_status = RREG32(R_007404_HDMI0_STATUS) &
+ S_007404_HDMI0_AZ_FORMAT_WTRIG(1);
+ if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
+ tmp = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL);
+ tmp |= S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(1);
+ WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, tmp);
+ }
+ } else
+ rdev->irq.stat_regs.r500.hdmi0_status = 0;
+
+ if (irqs) {
+ WREG32(R_000044_GEN_INT_STATUS, irqs);
+ }
+ return irqs & irq_mask;
+}
+
+void rs600_irq_disable(struct radeon_device *rdev)
+{
+ u32 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
+ ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+ WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+ WREG32(R_000040_GEN_INT_CNTL, 0);
+ WREG32(R_006540_DxMODE_INT_MASK, 0);
+ /* Wait and acknowledge irq */
+ mdelay(1);
+ rs600_irq_ack(rdev);
+}
+
+int rs600_irq_process(struct radeon_device *rdev)
+{
+ u32 status, msi_rearm;
+ bool queue_hotplug = false;
+ bool queue_hdmi = false;
+
+ status = rs600_irq_ack(rdev);
+ if (!status &&
+ !rdev->irq.stat_regs.r500.disp_int &&
+ !rdev->irq.stat_regs.r500.hdmi0_status) {
+ return IRQ_NONE;
+ }
+ while (status ||
+ rdev->irq.stat_regs.r500.disp_int ||
+ rdev->irq.stat_regs.r500.hdmi0_status) {
+ /* SW interrupt */
+ if (G_000044_SW_INT(status)) {
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ }
+ /* Vertical blank interrupts */
+ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_flip(rdev, 0);
+ }
+ if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_flip(rdev, 1);
+ }
+ if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+ queue_hotplug = true;
+ DRM_DEBUG("HPD1\n");
+ }
+ if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+ queue_hotplug = true;
+ DRM_DEBUG("HPD2\n");
+ }
+ if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
+ queue_hdmi = true;
+ DRM_DEBUG("HDMI0\n");
+ }
+ status = rs600_irq_ack(rdev);
+ }
+ if (queue_hotplug)
+ schedule_work(&rdev->hotplug_work);
+ if (queue_hdmi)
+ schedule_work(&rdev->audio_work);
+ if (rdev->msi_enabled) {
+ switch (rdev->family) {
+ case CHIP_RS600:
+ case CHIP_RS690:
+ case CHIP_RS740:
+ msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM;
+ WREG32(RADEON_BUS_CNTL, msi_rearm);
+ WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
+ break;
+ default:
+ WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
+ break;
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+ if (crtc == 0)
+ return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
+ else
+ return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
+}
+
+int rs600_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
+ return 0;
+ udelay(1);
+ }
+ return -1;
+}
+
+static void rs600_gpu_init(struct radeon_device *rdev)
+{
+ r420_pipes_init(rdev);
+ /* Wait for mc idle */
+ if (rs600_mc_wait_for_idle(rdev))
+ dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+}
+
+static void rs600_mc_init(struct radeon_device *rdev)
+{
+ u64 base;
+
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+ rdev->mc.vram_is_ddr = true;
+ rdev->mc.vram_width = 128;
+ rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+ rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+ base = RREG32_MC(R_000004_MC_FB_LOCATION);
+ base = G_000004_MC_FB_START(base) << 16;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ rdev->mc.gtt_base_align = 0;
+ radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
+}
+
+void rs600_bandwidth_update(struct radeon_device *rdev)
+{
+ struct drm_display_mode *mode0 = NULL;
+ struct drm_display_mode *mode1 = NULL;
+ u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
+ /* FIXME: implement full support */
+
+ radeon_update_display_priority(rdev);
+
+ if (rdev->mode_info.crtcs[0]->base.enabled)
+ mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+ if (rdev->mode_info.crtcs[1]->base.enabled)
+ mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+
+ rs690_line_buffer_adjust(rdev, mode0, mode1);
+
+ if (rdev->disp_priority == 2) {
+ d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT);
+ d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT);
+ d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+ d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+ WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+ WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+ WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+ WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+ }
+}
+
+uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
+ S_000070_MC_IND_CITF_ARB0(1));
+ return RREG32(R_000074_MC_IND_DATA);
+}
+
+void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
+ S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
+ WREG32(R_000074_MC_IND_DATA, v);
+}
+
+static void rs600_debugfs(struct radeon_device *rdev)
+{
+ if (r100_debugfs_rbbm_init(rdev))
+ DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+}
+
+void rs600_set_safe_registers(struct radeon_device *rdev)
+{
+ rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
+ rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
+}
+
+static void rs600_mc_program(struct radeon_device *rdev)
+{
+ struct rv515_mc_save save;
+
+ /* Stops all mc clients */
+ rv515_mc_stop(rdev, &save);
+
+ /* Wait for mc idle */
+ if (rs600_mc_wait_for_idle(rdev))
+ dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+
+ /* FIXME: What does AGP means for such chipset ? */
+ WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
+ WREG32_MC(R_000006_AGP_BASE, 0);
+ WREG32_MC(R_000007_AGP_BASE_2, 0);
+ /* Program MC */
+ WREG32_MC(R_000004_MC_FB_LOCATION,
+ S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
+ S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
+ WREG32(R_000134_HDP_FB_LOCATION,
+ S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+
+ rv515_mc_resume(rdev, &save);
+}
+
+static int rs600_startup(struct radeon_device *rdev)
+{
+ int r;
+
+ rs600_mc_program(rdev);
+ /* Resume clock */
+ rv515_clock_startup(rdev);
+ /* Initialize GPU configuration (# pipes, ...) */
+ rs600_gpu_init(rdev);
+ /* Initialize GART (initialize after TTM so we can allocate
+ * memory through TTM but finalize after TTM) */
+ r = rs600_gart_enable(rdev);
+ if (r)
+ return r;
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ /* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
+ rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+ r = r100_cp_init(rdev, 1024 * 1024);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ return r;
+ }
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing audio\n");
+ return r;
+ }
+
+ return 0;
+}
+
+int rs600_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Make sur GART are not working */
+ rs600_gart_disable(rdev);
+ /* Resume clock before doing reset */
+ rv515_clock_startup(rdev);
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_asic_reset(rdev)) {
+ dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* post */
+ atom_asic_init(rdev->mode_info.atom_context);
+ /* Resume clock after posting */
+ rv515_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
+ r = rs600_startup(rdev);
+ if (r) {
+ rdev->accel_working = false;
+ }
+ return r;
+}
+
+int rs600_suspend(struct radeon_device *rdev)
+{
+ r600_audio_fini(rdev);
+ r100_cp_disable(rdev);
+ radeon_wb_disable(rdev);
+ rs600_irq_disable(rdev);
+ rs600_gart_disable(rdev);
+ return 0;
+}
+
+void rs600_fini(struct radeon_device *rdev)
+{
+ r600_audio_fini(rdev);
+ r100_cp_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_gem_fini(rdev);
+ rs600_gart_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_bo_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
+int rs600_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Disable VGA */
+ rv515_vga_render_disable(rdev);
+ /* Initialize scratch registers */
+ radeon_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
+ /* BIOS */
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ if (rdev->is_atom_bios) {
+ r = radeon_atombios_init(rdev);
+ if (r)
+ return r;
+ } else {
+ dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
+ return -EINVAL;
+ }
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_asic_reset(rdev)) {
+ dev_warn(rdev->dev,
+ "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* check if cards are posted or not */
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* initialize memory controller */
+ rs600_mc_init(rdev);
+ rs600_debugfs(rdev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+ r = rs600_gart_init(rdev);
+ if (r)
+ return r;
+ rs600_set_safe_registers(rdev);
+
+ rdev->accel_working = true;
+ r = rs600_startup(rdev);
+ if (r) {
+ /* Somethings want wront with the accel init stop accel */
+ dev_err(rdev->dev, "Disabling GPU acceleration\n");
+ r100_cp_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ rs600_gart_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ rdev->accel_working = false;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
new file mode 100644
index 0000000..f1f8941
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -0,0 +1,685 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RS600D_H__
+#define __RS600D_H__
+
+/* Registers */
+#define R_000040_GEN_INT_CNTL 0x000040
+#define S_000040_SCRATCH_INT_MASK(x) (((x) & 0x1) << 18)
+#define G_000040_SCRATCH_INT_MASK(x) (((x) >> 18) & 0x1)
+#define C_000040_SCRATCH_INT_MASK 0xFFFBFFFF
+#define S_000040_GUI_IDLE_MASK(x) (((x) & 0x1) << 19)
+#define G_000040_GUI_IDLE_MASK(x) (((x) >> 19) & 0x1)
+#define C_000040_GUI_IDLE_MASK 0xFFF7FFFF
+#define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13)
+#define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1)
+#define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF
+#define S_000040_DMA_VIPH2_INT_EN(x) (((x) & 0x1) << 14)
+#define G_000040_DMA_VIPH2_INT_EN(x) (((x) >> 14) & 0x1)
+#define C_000040_DMA_VIPH2_INT_EN 0xFFFFBFFF
+#define S_000040_DMA_VIPH3_INT_EN(x) (((x) & 0x1) << 15)
+#define G_000040_DMA_VIPH3_INT_EN(x) (((x) >> 15) & 0x1)
+#define C_000040_DMA_VIPH3_INT_EN 0xFFFF7FFF
+#define S_000040_I2C_INT_EN(x) (((x) & 0x1) << 17)
+#define G_000040_I2C_INT_EN(x) (((x) >> 17) & 0x1)
+#define C_000040_I2C_INT_EN 0xFFFDFFFF
+#define S_000040_GUI_IDLE(x) (((x) & 0x1) << 19)
+#define G_000040_GUI_IDLE(x) (((x) >> 19) & 0x1)
+#define C_000040_GUI_IDLE 0xFFF7FFFF
+#define S_000040_VIPH_INT_EN(x) (((x) & 0x1) << 24)
+#define G_000040_VIPH_INT_EN(x) (((x) >> 24) & 0x1)
+#define C_000040_VIPH_INT_EN 0xFEFFFFFF
+#define S_000040_SW_INT_EN(x) (((x) & 0x1) << 25)
+#define G_000040_SW_INT_EN(x) (((x) >> 25) & 0x1)
+#define C_000040_SW_INT_EN 0xFDFFFFFF
+#define S_000040_GEYSERVILLE(x) (((x) & 0x1) << 27)
+#define G_000040_GEYSERVILLE(x) (((x) >> 27) & 0x1)
+#define C_000040_GEYSERVILLE 0xF7FFFFFF
+#define S_000040_HDCP_AUTHORIZED_INT(x) (((x) & 0x1) << 28)
+#define G_000040_HDCP_AUTHORIZED_INT(x) (((x) >> 28) & 0x1)
+#define C_000040_HDCP_AUTHORIZED_INT 0xEFFFFFFF
+#define S_000040_DVI_I2C_INT(x) (((x) & 0x1) << 29)
+#define G_000040_DVI_I2C_INT(x) (((x) >> 29) & 0x1)
+#define C_000040_DVI_I2C_INT 0xDFFFFFFF
+#define S_000040_GUIDMA(x) (((x) & 0x1) << 30)
+#define G_000040_GUIDMA(x) (((x) >> 30) & 0x1)
+#define C_000040_GUIDMA 0xBFFFFFFF
+#define S_000040_VIDDMA(x) (((x) & 0x1) << 31)
+#define G_000040_VIDDMA(x) (((x) >> 31) & 0x1)
+#define C_000040_VIDDMA 0x7FFFFFFF
+#define R_000044_GEN_INT_STATUS 0x000044
+#define S_000044_DISPLAY_INT_STAT(x) (((x) & 0x1) << 0)
+#define G_000044_DISPLAY_INT_STAT(x) (((x) >> 0) & 0x1)
+#define C_000044_DISPLAY_INT_STAT 0xFFFFFFFE
+#define S_000044_VGA_INT_STAT(x) (((x) & 0x1) << 1)
+#define G_000044_VGA_INT_STAT(x) (((x) >> 1) & 0x1)
+#define C_000044_VGA_INT_STAT 0xFFFFFFFD
+#define S_000044_CAP0_INT_ACTIVE(x) (((x) & 0x1) << 8)
+#define G_000044_CAP0_INT_ACTIVE(x) (((x) >> 8) & 0x1)
+#define C_000044_CAP0_INT_ACTIVE 0xFFFFFEFF
+#define S_000044_DMA_VIPH0_INT(x) (((x) & 0x1) << 12)
+#define G_000044_DMA_VIPH0_INT(x) (((x) >> 12) & 0x1)
+#define C_000044_DMA_VIPH0_INT 0xFFFFEFFF
+#define S_000044_DMA_VIPH1_INT(x) (((x) & 0x1) << 13)
+#define G_000044_DMA_VIPH1_INT(x) (((x) >> 13) & 0x1)
+#define C_000044_DMA_VIPH1_INT 0xFFFFDFFF
+#define S_000044_DMA_VIPH2_INT(x) (((x) & 0x1) << 14)
+#define G_000044_DMA_VIPH2_INT(x) (((x) >> 14) & 0x1)
+#define C_000044_DMA_VIPH2_INT 0xFFFFBFFF
+#define S_000044_DMA_VIPH3_INT(x) (((x) & 0x1) << 15)
+#define G_000044_DMA_VIPH3_INT(x) (((x) >> 15) & 0x1)
+#define C_000044_DMA_VIPH3_INT 0xFFFF7FFF
+#define S_000044_MC_PROBE_FAULT_STAT(x) (((x) & 0x1) << 16)
+#define G_000044_MC_PROBE_FAULT_STAT(x) (((x) >> 16) & 0x1)
+#define C_000044_MC_PROBE_FAULT_STAT 0xFFFEFFFF
+#define S_000044_I2C_INT(x) (((x) & 0x1) << 17)
+#define G_000044_I2C_INT(x) (((x) >> 17) & 0x1)
+#define C_000044_I2C_INT 0xFFFDFFFF
+#define S_000044_SCRATCH_INT_STAT(x) (((x) & 0x1) << 18)
+#define G_000044_SCRATCH_INT_STAT(x) (((x) >> 18) & 0x1)
+#define C_000044_SCRATCH_INT_STAT 0xFFFBFFFF
+#define S_000044_GUI_IDLE_STAT(x) (((x) & 0x1) << 19)
+#define G_000044_GUI_IDLE_STAT(x) (((x) >> 19) & 0x1)
+#define C_000044_GUI_IDLE_STAT 0xFFF7FFFF
+#define S_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) & 0x1) << 20)
+#define G_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) >> 20) & 0x1)
+#define C_000044_ATI_OVERDRIVE_INT_STAT 0xFFEFFFFF
+#define S_000044_MC_PROTECTION_FAULT_STAT(x) (((x) & 0x1) << 21)
+#define G_000044_MC_PROTECTION_FAULT_STAT(x) (((x) >> 21) & 0x1)
+#define C_000044_MC_PROTECTION_FAULT_STAT 0xFFDFFFFF
+#define S_000044_RBBM_READ_INT_STAT(x) (((x) & 0x1) << 22)
+#define G_000044_RBBM_READ_INT_STAT(x) (((x) >> 22) & 0x1)
+#define C_000044_RBBM_READ_INT_STAT 0xFFBFFFFF
+#define S_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) & 0x1) << 23)
+#define G_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) >> 23) & 0x1)
+#define C_000044_CB_CONTEXT_SWITCH_STAT 0xFF7FFFFF
+#define S_000044_VIPH_INT(x) (((x) & 0x1) << 24)
+#define G_000044_VIPH_INT(x) (((x) >> 24) & 0x1)
+#define C_000044_VIPH_INT 0xFEFFFFFF
+#define S_000044_SW_INT(x) (((x) & 0x1) << 25)
+#define G_000044_SW_INT(x) (((x) >> 25) & 0x1)
+#define C_000044_SW_INT 0xFDFFFFFF
+#define S_000044_SW_INT_SET(x) (((x) & 0x1) << 26)
+#define G_000044_SW_INT_SET(x) (((x) >> 26) & 0x1)
+#define C_000044_SW_INT_SET 0xFBFFFFFF
+#define S_000044_IDCT_INT_STAT(x) (((x) & 0x1) << 27)
+#define G_000044_IDCT_INT_STAT(x) (((x) >> 27) & 0x1)
+#define C_000044_IDCT_INT_STAT 0xF7FFFFFF
+#define S_000044_GUIDMA_STAT(x) (((x) & 0x1) << 30)
+#define G_000044_GUIDMA_STAT(x) (((x) >> 30) & 0x1)
+#define C_000044_GUIDMA_STAT 0xBFFFFFFF
+#define S_000044_VIDDMA_STAT(x) (((x) & 0x1) << 31)
+#define G_000044_VIDDMA_STAT(x) (((x) >> 31) & 0x1)
+#define C_000044_VIDDMA_STAT 0x7FFFFFFF
+#define R_00004C_BUS_CNTL 0x00004C
+#define S_00004C_BUS_MASTER_DIS(x) (((x) & 0x1) << 14)
+#define G_00004C_BUS_MASTER_DIS(x) (((x) >> 14) & 0x1)
+#define C_00004C_BUS_MASTER_DIS 0xFFFFBFFF
+#define S_00004C_BUS_MSI_REARM(x) (((x) & 0x1) << 20)
+#define G_00004C_BUS_MSI_REARM(x) (((x) >> 20) & 0x1)
+#define C_00004C_BUS_MSI_REARM 0xFFEFFFFF
+#define R_000070_MC_IND_INDEX 0x000070
+#define S_000070_MC_IND_ADDR(x) (((x) & 0xFFFF) << 0)
+#define G_000070_MC_IND_ADDR(x) (((x) >> 0) & 0xFFFF)
+#define C_000070_MC_IND_ADDR 0xFFFF0000
+#define S_000070_MC_IND_SEQ_RBS_0(x) (((x) & 0x1) << 16)
+#define G_000070_MC_IND_SEQ_RBS_0(x) (((x) >> 16) & 0x1)
+#define C_000070_MC_IND_SEQ_RBS_0 0xFFFEFFFF
+#define S_000070_MC_IND_SEQ_RBS_1(x) (((x) & 0x1) << 17)
+#define G_000070_MC_IND_SEQ_RBS_1(x) (((x) >> 17) & 0x1)
+#define C_000070_MC_IND_SEQ_RBS_1 0xFFFDFFFF
+#define S_000070_MC_IND_SEQ_RBS_2(x) (((x) & 0x1) << 18)
+#define G_000070_MC_IND_SEQ_RBS_2(x) (((x) >> 18) & 0x1)
+#define C_000070_MC_IND_SEQ_RBS_2 0xFFFBFFFF
+#define S_000070_MC_IND_SEQ_RBS_3(x) (((x) & 0x1) << 19)
+#define G_000070_MC_IND_SEQ_RBS_3(x) (((x) >> 19) & 0x1)
+#define C_000070_MC_IND_SEQ_RBS_3 0xFFF7FFFF
+#define S_000070_MC_IND_AIC_RBS(x) (((x) & 0x1) << 20)
+#define G_000070_MC_IND_AIC_RBS(x) (((x) >> 20) & 0x1)
+#define C_000070_MC_IND_AIC_RBS 0xFFEFFFFF
+#define S_000070_MC_IND_CITF_ARB0(x) (((x) & 0x1) << 21)
+#define G_000070_MC_IND_CITF_ARB0(x) (((x) >> 21) & 0x1)
+#define C_000070_MC_IND_CITF_ARB0 0xFFDFFFFF
+#define S_000070_MC_IND_CITF_ARB1(x) (((x) & 0x1) << 22)
+#define G_000070_MC_IND_CITF_ARB1(x) (((x) >> 22) & 0x1)
+#define C_000070_MC_IND_CITF_ARB1 0xFFBFFFFF
+#define S_000070_MC_IND_WR_EN(x) (((x) & 0x1) << 23)
+#define G_000070_MC_IND_WR_EN(x) (((x) >> 23) & 0x1)
+#define C_000070_MC_IND_WR_EN 0xFF7FFFFF
+#define S_000070_MC_IND_RD_INV(x) (((x) & 0x1) << 24)
+#define G_000070_MC_IND_RD_INV(x) (((x) >> 24) & 0x1)
+#define C_000070_MC_IND_RD_INV 0xFEFFFFFF
+#define R_000074_MC_IND_DATA 0x000074
+#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_000074_MC_IND_DATA 0x00000000
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
+#define R_000134_HDP_FB_LOCATION 0x000134
+#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000134_HDP_FB_START 0xFFFF0000
+#define R_0007C0_CP_STAT 0x0007C0
+#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
+#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
+#define C_0007C0_MRU_BUSY 0xFFFFFFFE
+#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
+#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
+#define C_0007C0_MWU_BUSY 0xFFFFFFFD
+#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
+#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
+#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
+#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
+#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
+#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
+#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
+#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
+#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
+#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
+#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
+#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
+#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
+#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
+#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
+#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
+#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
+#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
+#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
+#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
+#define C_0007C0_CSI_BUSY 0xFFFFDFFF
+#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
+#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
+#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
+#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
+#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
+#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
+#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
+#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
+#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
+#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
+#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
+#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
+#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
+#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
+#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
+#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
+#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
+#define C_0007C0_CP_BUSY 0x7FFFFFFF
+#define R_000E40_RBBM_STATUS 0x000E40
+#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
+#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
+#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
+#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
+#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
+#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
+#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
+#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
+#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
+#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
+#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
+#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
+#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
+#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
+#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
+#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
+#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
+#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
+#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
+#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
+#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
+#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
+#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
+#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
+#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
+#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
+#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
+#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
+#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
+#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
+#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
+#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
+#define C_000E40_E2_BUSY 0xFFFDFFFF
+#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
+#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
+#define C_000E40_RB2D_BUSY 0xFFFBFFFF
+#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
+#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
+#define C_000E40_RB3D_BUSY 0xFFF7FFFF
+#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
+#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
+#define C_000E40_VAP_BUSY 0xFFEFFFFF
+#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
+#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
+#define C_000E40_RE_BUSY 0xFFDFFFFF
+#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
+#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
+#define C_000E40_TAM_BUSY 0xFFBFFFFF
+#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
+#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
+#define C_000E40_TDM_BUSY 0xFF7FFFFF
+#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
+#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
+#define C_000E40_PB_BUSY 0xFEFFFFFF
+#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
+#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
+#define C_000E40_TIM_BUSY 0xFDFFFFFF
+#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
+#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
+#define C_000E40_GA_BUSY 0xFBFFFFFF
+#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
+#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
+#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
+#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
+#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
+#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
+#define R_0060A4_D1CRTC_STATUS_FRAME_COUNT 0x0060A4
+#define S_0060A4_D1CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0)
+#define G_0060A4_D1CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF)
+#define C_0060A4_D1CRTC_FRAME_COUNT 0xFF000000
+#define R_006534_D1MODE_VBLANK_STATUS 0x006534
+#define S_006534_D1MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0)
+#define G_006534_D1MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1)
+#define C_006534_D1MODE_VBLANK_OCCURRED 0xFFFFFFFE
+#define S_006534_D1MODE_VBLANK_ACK(x) (((x) & 0x1) << 4)
+#define G_006534_D1MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1)
+#define C_006534_D1MODE_VBLANK_ACK 0xFFFFFFEF
+#define S_006534_D1MODE_VBLANK_STAT(x) (((x) & 0x1) << 12)
+#define G_006534_D1MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1)
+#define C_006534_D1MODE_VBLANK_STAT 0xFFFFEFFF
+#define S_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16)
+#define G_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1)
+#define C_006534_D1MODE_VBLANK_INTERRUPT 0xFFFEFFFF
+#define R_006540_DxMODE_INT_MASK 0x006540
+#define S_006540_D1MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 0)
+#define G_006540_D1MODE_VBLANK_INT_MASK(x) (((x) >> 0) & 0x1)
+#define C_006540_D1MODE_VBLANK_INT_MASK 0xFFFFFFFE
+#define S_006540_D1MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 4)
+#define G_006540_D1MODE_VLINE_INT_MASK(x) (((x) >> 4) & 0x1)
+#define C_006540_D1MODE_VLINE_INT_MASK 0xFFFFFFEF
+#define S_006540_D2MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 8)
+#define G_006540_D2MODE_VBLANK_INT_MASK(x) (((x) >> 8) & 0x1)
+#define C_006540_D2MODE_VBLANK_INT_MASK 0xFFFFFEFF
+#define S_006540_D2MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 12)
+#define G_006540_D2MODE_VLINE_INT_MASK(x) (((x) >> 12) & 0x1)
+#define C_006540_D2MODE_VLINE_INT_MASK 0xFFFFEFFF
+#define S_006540_D1MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 30)
+#define G_006540_D1MODE_VBLANK_CP_SEL(x) (((x) >> 30) & 0x1)
+#define C_006540_D1MODE_VBLANK_CP_SEL 0xBFFFFFFF
+#define S_006540_D2MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 31)
+#define G_006540_D2MODE_VBLANK_CP_SEL(x) (((x) >> 31) & 0x1)
+#define C_006540_D2MODE_VBLANK_CP_SEL 0x7FFFFFFF
+#define R_0068A4_D2CRTC_STATUS_FRAME_COUNT 0x0068A4
+#define S_0068A4_D2CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0)
+#define G_0068A4_D2CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF)
+#define C_0068A4_D2CRTC_FRAME_COUNT 0xFF000000
+#define R_006D34_D2MODE_VBLANK_STATUS 0x006D34
+#define S_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0)
+#define G_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1)
+#define C_006D34_D2MODE_VBLANK_OCCURRED 0xFFFFFFFE
+#define S_006D34_D2MODE_VBLANK_ACK(x) (((x) & 0x1) << 4)
+#define G_006D34_D2MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1)
+#define C_006D34_D2MODE_VBLANK_ACK 0xFFFFFFEF
+#define S_006D34_D2MODE_VBLANK_STAT(x) (((x) & 0x1) << 12)
+#define G_006D34_D2MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1)
+#define C_006D34_D2MODE_VBLANK_STAT 0xFFFFEFFF
+#define S_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16)
+#define G_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1)
+#define C_006D34_D2MODE_VBLANK_INTERRUPT 0xFFFEFFFF
+#define R_007EDC_DISP_INTERRUPT_STATUS 0x007EDC
+#define S_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) & 0x1) << 4)
+#define G_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) >> 4) & 0x1)
+#define C_007EDC_LB_D1_VBLANK_INTERRUPT 0xFFFFFFEF
+#define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5)
+#define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1)
+#define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF
+#define S_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 16)
+#define G_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) >> 16) & 0x1)
+#define C_007EDC_DACA_AUTODETECT_INTERRUPT 0xFFFEFFFF
+#define S_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 17)
+#define G_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) >> 17) & 0x1)
+#define C_007EDC_DACB_AUTODETECT_INTERRUPT 0xFFFDFFFF
+#define S_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) & 0x1) << 18)
+#define G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) >> 18) & 0x1)
+#define C_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT 0xFFFBFFFF
+#define S_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) & 0x1) << 19)
+#define G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) >> 19) & 0x1)
+#define C_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT 0xFFF7FFFF
+#define R_007828_DACA_AUTODETECT_CONTROL 0x007828
+#define S_007828_DACA_AUTODETECT_MODE(x) (((x) & 0x3) << 0)
+#define G_007828_DACA_AUTODETECT_MODE(x) (((x) >> 0) & 0x3)
+#define C_007828_DACA_AUTODETECT_MODE 0xFFFFFFFC
+#define S_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define G_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define C_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF
+#define S_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16)
+#define G_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3)
+#define C_007828_DACA_AUTODETECT_CHECK_MASK 0xFFFCFFFF
+#define R_007838_DACA_AUTODETECT_INT_CONTROL 0x007838
+#define S_007838_DACA_AUTODETECT_ACK(x) (((x) & 0x1) << 0)
+#define C_007838_DACA_DACA_AUTODETECT_ACK 0xFFFFFFFE
+#define S_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16)
+#define G_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1)
+#define C_007838_DACA_AUTODETECT_INT_ENABLE 0xFFFCFFFF
+#define R_007A28_DACB_AUTODETECT_CONTROL 0x007A28
+#define S_007A28_DACB_AUTODETECT_MODE(x) (((x) & 0x3) << 0)
+#define G_007A28_DACB_AUTODETECT_MODE(x) (((x) >> 0) & 0x3)
+#define C_007A28_DACB_AUTODETECT_MODE 0xFFFFFFFC
+#define S_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define G_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define C_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF
+#define S_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16)
+#define G_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3)
+#define C_007A28_DACB_AUTODETECT_CHECK_MASK 0xFFFCFFFF
+#define R_007A38_DACB_AUTODETECT_INT_CONTROL 0x007A38
+#define S_007A38_DACB_AUTODETECT_ACK(x) (((x) & 0x1) << 0)
+#define C_007A38_DACB_DACA_AUTODETECT_ACK 0xFFFFFFFE
+#define S_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16)
+#define G_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1)
+#define C_007A38_DACB_AUTODETECT_INT_ENABLE 0xFFFCFFFF
+#define R_007D00_DC_HOT_PLUG_DETECT1_CONTROL 0x007D00
+#define S_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) & 0x1) << 0)
+#define G_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) >> 0) & 0x1)
+#define C_007D00_DC_HOT_PLUG_DETECT1_EN 0xFFFFFFFE
+#define R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0x007D04
+#define S_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) & 0x1) << 0)
+#define G_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) >> 0) & 0x1)
+#define C_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0xFFFFFFFE
+#define S_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) & 0x1) << 1)
+#define G_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) >> 1) & 0x1)
+#define C_007D04_DC_HOT_PLUG_DETECT1_SENSE 0xFFFFFFFD
+#define R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL 0x007D08
+#define S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(x) (((x) & 0x1) << 0)
+#define C_007D08_DC_HOT_PLUG_DETECT1_INT_ACK 0xFFFFFFFE
+#define S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define G_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define C_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY 0xFFFFFEFF
+#define S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) & 0x1) << 16)
+#define G_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) >> 16) & 0x1)
+#define C_007D08_DC_HOT_PLUG_DETECT1_INT_EN 0xFFFEFFFF
+#define R_007D10_DC_HOT_PLUG_DETECT2_CONTROL 0x007D10
+#define S_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) & 0x1) << 0)
+#define G_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) >> 0) & 0x1)
+#define C_007D10_DC_HOT_PLUG_DETECT2_EN 0xFFFFFFFE
+#define R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0x007D14
+#define S_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) & 0x1) << 0)
+#define G_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) >> 0) & 0x1)
+#define C_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0xFFFFFFFE
+#define S_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) & 0x1) << 1)
+#define G_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) >> 1) & 0x1)
+#define C_007D14_DC_HOT_PLUG_DETECT2_SENSE 0xFFFFFFFD
+#define R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL 0x007D18
+#define S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(x) (((x) & 0x1) << 0)
+#define C_007D18_DC_HOT_PLUG_DETECT2_INT_ACK 0xFFFFFFFE
+#define S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define G_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define C_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY 0xFFFFFEFF
+#define S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) & 0x1) << 16)
+#define G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) >> 16) & 0x1)
+#define C_007D18_DC_HOT_PLUG_DETECT2_INT_EN 0xFFFEFFFF
+#define R_007404_HDMI0_STATUS 0x007404
+#define S_007404_HDMI0_AZ_FORMAT_WTRIG(x) (((x) & 0x1) << 28)
+#define G_007404_HDMI0_AZ_FORMAT_WTRIG(x) (((x) >> 28) & 0x1)
+#define C_007404_HDMI0_AZ_FORMAT_WTRIG 0xEFFFFFFF
+#define S_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x) (((x) & 0x1) << 29)
+#define G_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x) (((x) >> 29) & 0x1)
+#define C_007404_HDMI0_AZ_FORMAT_WTRIG_INT 0xDFFFFFFF
+#define R_007408_HDMI0_AUDIO_PACKET_CONTROL 0x007408
+#define S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x) (((x) & 0x1) << 28)
+#define G_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x) (((x) >> 28) & 0x1)
+#define C_007408_HDMI0_AZ_FORMAT_WTRIG_MASK 0xEFFFFFFF
+#define S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x) (((x) & 0x1) << 29)
+#define G_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x) (((x) >> 29) & 0x1)
+#define C_007408_HDMI0_AZ_FORMAT_WTRIG_ACK 0xDFFFFFFF
+
+/* MC registers */
+#define R_000000_MC_STATUS 0x000000
+#define S_000000_MC_IDLE(x) (((x) & 0x1) << 0)
+#define G_000000_MC_IDLE(x) (((x) >> 0) & 0x1)
+#define C_000000_MC_IDLE 0xFFFFFFFE
+#define R_000004_MC_FB_LOCATION 0x000004
+#define S_000004_MC_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_000004_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000004_MC_FB_START 0xFFFF0000
+#define S_000004_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_000004_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_000004_MC_FB_TOP 0x0000FFFF
+#define R_000005_MC_AGP_LOCATION 0x000005
+#define S_000005_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
+#define G_000005_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000005_MC_AGP_START 0xFFFF0000
+#define S_000005_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_000005_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_000005_MC_AGP_TOP 0x0000FFFF
+#define R_000006_AGP_BASE 0x000006
+#define S_000006_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_000006_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_000006_AGP_BASE_ADDR 0x00000000
+#define R_000007_AGP_BASE_2 0x000007
+#define S_000007_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
+#define G_000007_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
+#define C_000007_AGP_BASE_ADDR_2 0xFFFFFFF0
+#define R_000009_MC_CNTL1 0x000009
+#define S_000009_ENABLE_PAGE_TABLES(x) (((x) & 0x1) << 26)
+#define G_000009_ENABLE_PAGE_TABLES(x) (((x) >> 26) & 0x1)
+#define C_000009_ENABLE_PAGE_TABLES 0xFBFFFFFF
+/* FIXME don't know the various field size need feedback from AMD */
+#define R_000100_MC_PT0_CNTL 0x000100
+#define S_000100_ENABLE_PT(x) (((x) & 0x1) << 0)
+#define G_000100_ENABLE_PT(x) (((x) >> 0) & 0x1)
+#define C_000100_ENABLE_PT 0xFFFFFFFE
+#define S_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) & 0x7) << 15)
+#define G_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) >> 15) & 0x7)
+#define C_000100_EFFECTIVE_L2_CACHE_SIZE 0xFFFC7FFF
+#define S_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 0x7) << 21)
+#define G_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) >> 21) & 0x7)
+#define C_000100_EFFECTIVE_L2_QUEUE_SIZE 0xFF1FFFFF
+#define S_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) & 0x1) << 28)
+#define G_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) >> 28) & 0x1)
+#define C_000100_INVALIDATE_ALL_L1_TLBS 0xEFFFFFFF
+#define S_000100_INVALIDATE_L2_CACHE(x) (((x) & 0x1) << 29)
+#define G_000100_INVALIDATE_L2_CACHE(x) (((x) >> 29) & 0x1)
+#define C_000100_INVALIDATE_L2_CACHE 0xDFFFFFFF
+#define R_000102_MC_PT0_CONTEXT0_CNTL 0x000102
+#define S_000102_ENABLE_PAGE_TABLE(x) (((x) & 0x1) << 0)
+#define G_000102_ENABLE_PAGE_TABLE(x) (((x) >> 0) & 0x1)
+#define C_000102_ENABLE_PAGE_TABLE 0xFFFFFFFE
+#define S_000102_PAGE_TABLE_DEPTH(x) (((x) & 0x3) << 1)
+#define G_000102_PAGE_TABLE_DEPTH(x) (((x) >> 1) & 0x3)
+#define C_000102_PAGE_TABLE_DEPTH 0xFFFFFFF9
+#define V_000102_PAGE_TABLE_FLAT 0
+/* R600 documentation suggest that this should be a number of pages */
+#define R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x000112
+#define R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x000114
+#define R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x00011C
+#define R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x00012C
+#define R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x00013C
+#define R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x00014C
+#define R_00016C_MC_PT0_CLIENT0_CNTL 0x00016C
+#define S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 0)
+#define G_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 0) & 0x1)
+#define C_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFE
+#define S_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 1)
+#define G_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 1) & 0x1)
+#define C_00016C_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFD
+#define S_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) & 0x3) << 8)
+#define G_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) >> 8) & 0x3)
+#define C_00016C_SYSTEM_ACCESS_MODE_MASK 0xFFFFFCFF
+#define V_00016C_SYSTEM_ACCESS_MODE_PA_ONLY 0
+#define V_00016C_SYSTEM_ACCESS_MODE_USE_SYS_MAP 1
+#define V_00016C_SYSTEM_ACCESS_MODE_IN_SYS 2
+#define V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS 3
+#define S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) & 0x1) << 10)
+#define G_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) >> 10) & 0x1)
+#define C_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS 0xFFFFFBFF
+#define V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH 0
+#define V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE 1
+#define S_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) & 0x7) << 11)
+#define G_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) >> 11) & 0x7)
+#define C_00016C_EFFECTIVE_L1_CACHE_SIZE 0xFFFFC7FF
+#define S_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) & 0x1) << 14)
+#define G_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) >> 14) & 0x1)
+#define C_00016C_ENABLE_FRAGMENT_PROCESSING 0xFFFFBFFF
+#define S_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 0x7) << 15)
+#define G_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) >> 15) & 0x7)
+#define C_00016C_EFFECTIVE_L1_QUEUE_SIZE 0xFFFC7FFF
+#define S_00016C_INVALIDATE_L1_TLB(x) (((x) & 0x1) << 20)
+#define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1)
+#define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF
+
+#define R_006548_D1MODE_PRIORITY_A_CNT 0x006548
+#define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
+#define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
+#define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000
+#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
+#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
+#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF
+#define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
+#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
+#define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C
+#define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
+#define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
+#define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000
+#define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
+#define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
+#define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF
+#define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
+#define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
+#define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48
+#define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
+#define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
+#define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000
+#define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
+#define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
+#define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF
+#define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
+#define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
+#define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C
+#define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
+#define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
+#define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000
+#define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
+#define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
+#define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF
+#define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
+#define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
+
+/* PLL regs */
+#define GENERAL_PWRMGT 0x8
+#define GLOBAL_PWRMGT_EN (1 << 0)
+#define MOBILE_SU (1 << 2)
+#define DYN_PWRMGT_SCLK_LENGTH 0xc
+#define NORMAL_POWER_SCLK_HILEN(x) ((x) << 0)
+#define NORMAL_POWER_SCLK_LOLEN(x) ((x) << 4)
+#define REDUCED_POWER_SCLK_HILEN(x) ((x) << 8)
+#define REDUCED_POWER_SCLK_LOLEN(x) ((x) << 12)
+#define POWER_D1_SCLK_HILEN(x) ((x) << 16)
+#define POWER_D1_SCLK_LOLEN(x) ((x) << 20)
+#define STATIC_SCREEN_HILEN(x) ((x) << 24)
+#define STATIC_SCREEN_LOLEN(x) ((x) << 28)
+#define DYN_SCLK_VOL_CNTL 0xe
+#define IO_CG_VOLTAGE_DROP (1 << 0)
+#define VOLTAGE_DROP_SYNC (1 << 2)
+#define VOLTAGE_DELAY_SEL(x) ((x) << 3)
+#define HDP_DYN_CNTL 0x10
+#define HDP_FORCEON (1 << 0)
+#define MC_HOST_DYN_CNTL 0x1e
+#define MC_HOST_FORCEON (1 << 0)
+#define DYN_BACKBIAS_CNTL 0x29
+#define IO_CG_BACKBIAS_EN (1 << 0)
+
+/* mmreg */
+#define DOUT_POWER_MANAGEMENT_CNTL 0x7ee0
+#define PWRDN_WAIT_BUSY_OFF (1 << 0)
+#define PWRDN_WAIT_PWRSEQ_OFF (1 << 4)
+#define PWRDN_WAIT_PPLL_OFF (1 << 8)
+#define PWRUP_WAIT_PPLL_ON (1 << 12)
+#define PWRUP_WAIT_MEM_INIT_DONE (1 << 16)
+#define PM_ASSERT_RESET (1 << 20)
+#define PM_PWRDN_PPLL (1 << 24)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
new file mode 100644
index 0000000..55880d5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -0,0 +1,807 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "rs690d.h"
+
+int rs690_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS);
+ if (G_000090_MC_SYSTEM_IDLE(tmp))
+ return 0;
+ udelay(1);
+ }
+ return -1;
+}
+
+static void rs690_gpu_init(struct radeon_device *rdev)
+{
+ /* FIXME: is this correct ? */
+ r420_pipes_init(rdev);
+ if (rs690_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+}
+
+union igp_info {
+ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2;
+};
+
+void rs690_pm_info(struct radeon_device *rdev)
+{
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ union igp_info *info;
+ uint16_t data_offset;
+ uint8_t frev, crev;
+ fixed20_12 tmp;
+
+ if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset);
+
+ /* Get various system informations from bios */
+ switch (crev) {
+ case 1:
+ tmp.full = dfixed_const(100);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock));
+ rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+ if (le16_to_cpu(info->info.usK8MemoryClock))
+ rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
+ else if (rdev->clock.default_mclk) {
+ rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
+ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+ } else
+ rdev->pm.igp_system_mclk.full = dfixed_const(400);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
+ rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
+ break;
+ case 2:
+ tmp.full = dfixed_const(100);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock));
+ rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+ if (le32_to_cpu(info->info_v2.ulBootUpUMAClock))
+ rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock));
+ else if (rdev->clock.default_mclk)
+ rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
+ else
+ rdev->pm.igp_system_mclk.full = dfixed_const(66700);
+ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq));
+ rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
+ rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
+ break;
+ default:
+ /* We assume the slower possible clock ie worst case */
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
+ rdev->pm.igp_system_mclk.full = dfixed_const(200);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
+ rdev->pm.igp_ht_link_width.full = dfixed_const(8);
+ DRM_ERROR("No integrated system info for your GPU, using safe default\n");
+ break;
+ }
+ } else {
+ /* We assume the slower possible clock ie worst case */
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
+ rdev->pm.igp_system_mclk.full = dfixed_const(200);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
+ rdev->pm.igp_ht_link_width.full = dfixed_const(8);
+ DRM_ERROR("No integrated system info for your GPU, using safe default\n");
+ }
+ /* Compute various bandwidth */
+ /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
+ tmp.full = dfixed_const(4);
+ rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp);
+ /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
+ * = ht_clk * ht_width / 5
+ */
+ tmp.full = dfixed_const(5);
+ rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk,
+ rdev->pm.igp_ht_link_width);
+ rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp);
+ if (tmp.full < rdev->pm.max_bandwidth.full) {
+ /* HT link is a limiting factor */
+ rdev->pm.max_bandwidth.full = tmp.full;
+ }
+ /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
+ * = (sideport_clk * 14) / 10
+ */
+ tmp.full = dfixed_const(14);
+ rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
+ tmp.full = dfixed_const(10);
+ rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
+}
+
+static void rs690_mc_init(struct radeon_device *rdev)
+{
+ u64 base;
+ uint32_t h_addr, l_addr;
+ unsigned long long k8_addr;
+
+ rs400_gart_adjust_size(rdev);
+ rdev->mc.vram_is_ddr = true;
+ rdev->mc.vram_width = 128;
+ rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+ rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
+ base = G_000100_MC_FB_START(base) << 16;
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+
+ /* Use K8 direct mapping for fast fb access. */
+ rdev->fastfb_working = false;
+ h_addr = G_00005F_K8_ADDR_EXT(RREG32_MC(R_00005F_MC_MISC_UMA_CNTL));
+ l_addr = RREG32_MC(R_00001E_K8_FB_LOCATION);
+ k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
+#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+ if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
+#endif
+ {
+ /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
+ * memory is present.
+ */
+ if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
+ DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
+ (unsigned long long)rdev->mc.aper_base, k8_addr);
+ rdev->mc.aper_base = (resource_size_t)k8_addr;
+ rdev->fastfb_working = true;
+ }
+ }
+
+ rs690_pm_info(rdev);
+ radeon_vram_location(rdev, &rdev->mc, base);
+ rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
+ radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
+}
+
+void rs690_line_buffer_adjust(struct radeon_device *rdev,
+ struct drm_display_mode *mode1,
+ struct drm_display_mode *mode2)
+{
+ u32 tmp;
+
+ /*
+ * Line Buffer Setup
+ * There is a single line buffer shared by both display controllers.
+ * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+ * the display controllers. The paritioning can either be done
+ * manually or via one of four preset allocations specified in bits 1:0:
+ * 0 - line buffer is divided in half and shared between crtc
+ * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
+ * 2 - D1 gets the whole buffer
+ * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
+ * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual
+ * allocation mode. In manual allocation mode, D1 always starts at 0,
+ * D1 end/2 is specified in bits 14:4; D2 allocation follows D1.
+ */
+ tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT;
+ tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE;
+ /* auto */
+ if (mode1 && mode2) {
+ if (mode1->hdisplay > mode2->hdisplay) {
+ if (mode1->hdisplay > 2560)
+ tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
+ else
+ tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+ } else if (mode2->hdisplay > mode1->hdisplay) {
+ if (mode2->hdisplay > 2560)
+ tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
+ else
+ tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+ } else
+ tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+ } else if (mode1) {
+ tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY;
+ } else if (mode2) {
+ tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
+ }
+ WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
+}
+
+struct rs690_watermark {
+ u32 lb_request_fifo_depth;
+ fixed20_12 num_line_pair;
+ fixed20_12 estimated_width;
+ fixed20_12 worst_case_latency;
+ fixed20_12 consumption_rate;
+ fixed20_12 active_time;
+ fixed20_12 dbpp;
+ fixed20_12 priority_mark_max;
+ fixed20_12 priority_mark;
+ fixed20_12 sclk;
+};
+
+static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
+ struct radeon_crtc *crtc,
+ struct rs690_watermark *wm)
+{
+ struct drm_display_mode *mode = &crtc->base.mode;
+ fixed20_12 a, b, c;
+ fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
+ fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
+
+ if (!crtc->base.enabled) {
+ /* FIXME: wouldn't it better to set priority mark to maximum */
+ wm->lb_request_fifo_depth = 4;
+ return;
+ }
+
+ if (crtc->vsc.full > dfixed_const(2))
+ wm->num_line_pair.full = dfixed_const(2);
+ else
+ wm->num_line_pair.full = dfixed_const(1);
+
+ b.full = dfixed_const(mode->crtc_hdisplay);
+ c.full = dfixed_const(256);
+ a.full = dfixed_div(b, c);
+ request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+ request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+ if (a.full < dfixed_const(4)) {
+ wm->lb_request_fifo_depth = 4;
+ } else {
+ wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
+ }
+
+ /* Determine consumption rate
+ * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
+ * vtaps = number of vertical taps,
+ * vsc = vertical scaling ratio, defined as source/destination
+ * hsc = horizontal scaling ration, defined as source/destination
+ */
+ a.full = dfixed_const(mode->clock);
+ b.full = dfixed_const(1000);
+ a.full = dfixed_div(a, b);
+ pclk.full = dfixed_div(b, a);
+ if (crtc->rmx_type != RMX_OFF) {
+ b.full = dfixed_const(2);
+ if (crtc->vsc.full > b.full)
+ b.full = crtc->vsc.full;
+ b.full = dfixed_mul(b, crtc->hsc);
+ c.full = dfixed_const(2);
+ b.full = dfixed_div(b, c);
+ consumption_time.full = dfixed_div(pclk, b);
+ } else {
+ consumption_time.full = pclk.full;
+ }
+ a.full = dfixed_const(1);
+ wm->consumption_rate.full = dfixed_div(a, consumption_time);
+
+
+ /* Determine line time
+ * LineTime = total time for one line of displayhtotal
+ * LineTime = total number of horizontal pixels
+ * pclk = pixel clock period(ns)
+ */
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ line_time.full = dfixed_mul(a, pclk);
+
+ /* Determine active time
+ * ActiveTime = time of active region of display within one line,
+ * hactive = total number of horizontal active pixels
+ * htotal = total number of horizontal pixels
+ */
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->active_time.full = dfixed_mul(line_time, b);
+ wm->active_time.full = dfixed_div(wm->active_time, a);
+
+ /* Maximun bandwidth is the minimun bandwidth of all component */
+ rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
+ if (rdev->mc.igp_sideport_enabled) {
+ if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
+ rdev->pm.sideport_bandwidth.full)
+ rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
+ read_delay_latency.full = dfixed_const(370 * 800 * 1000);
+ read_delay_latency.full = dfixed_div(read_delay_latency,
+ rdev->pm.igp_sideport_mclk);
+ } else {
+ if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
+ rdev->pm.k8_bandwidth.full)
+ rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth;
+ if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
+ rdev->pm.ht_bandwidth.full)
+ rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
+ read_delay_latency.full = dfixed_const(5000);
+ }
+
+ /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
+ a.full = dfixed_const(16);
+ rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a);
+ a.full = dfixed_const(1000);
+ rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk);
+ /* Determine chunk time
+ * ChunkTime = the time it takes the DCP to send one chunk of data
+ * to the LB which consists of pipeline delay and inter chunk gap
+ * sclk = system clock(ns)
+ */
+ a.full = dfixed_const(256 * 13);
+ chunk_time.full = dfixed_mul(rdev->pm.sclk, a);
+ a.full = dfixed_const(10);
+ chunk_time.full = dfixed_div(chunk_time, a);
+
+ /* Determine the worst case latency
+ * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
+ * WorstCaseLatency = worst case time from urgent to when the MC starts
+ * to return data
+ * READ_DELAY_IDLE_MAX = constant of 1us
+ * ChunkTime = time it takes the DCP to send one chunk of data to the LB
+ * which consists of pipeline delay and inter chunk gap
+ */
+ if (dfixed_trunc(wm->num_line_pair) > 1) {
+ a.full = dfixed_const(3);
+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
+ wm->worst_case_latency.full += read_delay_latency.full;
+ } else {
+ a.full = dfixed_const(2);
+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
+ wm->worst_case_latency.full += read_delay_latency.full;
+ }
+
+ /* Determine the tolerable latency
+ * TolerableLatency = Any given request has only 1 line time
+ * for the data to be returned
+ * LBRequestFifoDepth = Number of chunk requests the LB can
+ * put into the request FIFO for a display
+ * LineTime = total time for one line of display
+ * ChunkTime = the time it takes the DCP to send one chunk
+ * of data to the LB which consists of
+ * pipeline delay and inter chunk gap
+ */
+ if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
+ tolerable_latency.full = line_time.full;
+ } else {
+ tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
+ tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
+ tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
+ tolerable_latency.full = line_time.full - tolerable_latency.full;
+ }
+ /* We assume worst case 32bits (4 bytes) */
+ wm->dbpp.full = dfixed_const(4 * 8);
+
+ /* Determine the maximum priority mark
+ * width = viewport width in pixels
+ */
+ a.full = dfixed_const(16);
+ wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+ wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
+
+ /* Determine estimated width */
+ estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
+ estimated_width.full = dfixed_div(estimated_width, consumption_time);
+ if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+ wm->priority_mark.full = dfixed_const(10);
+ } else {
+ a.full = dfixed_const(16);
+ wm->priority_mark.full = dfixed_div(estimated_width, a);
+ wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
+ wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
+ }
+}
+
+void rs690_bandwidth_update(struct radeon_device *rdev)
+{
+ struct drm_display_mode *mode0 = NULL;
+ struct drm_display_mode *mode1 = NULL;
+ struct rs690_watermark wm0;
+ struct rs690_watermark wm1;
+ u32 tmp;
+ u32 d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
+ u32 d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
+ fixed20_12 priority_mark02, priority_mark12, fill_rate;
+ fixed20_12 a, b;
+
+ radeon_update_display_priority(rdev);
+
+ if (rdev->mode_info.crtcs[0]->base.enabled)
+ mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+ if (rdev->mode_info.crtcs[1]->base.enabled)
+ mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+ /*
+ * Set display0/1 priority up in the memory controller for
+ * modes if the user specifies HIGH for displaypriority
+ * option.
+ */
+ if ((rdev->disp_priority == 2) &&
+ ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
+ tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
+ tmp &= C_000104_MC_DISP0R_INIT_LAT;
+ tmp &= C_000104_MC_DISP1R_INIT_LAT;
+ if (mode0)
+ tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
+ if (mode1)
+ tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
+ WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
+ }
+ rs690_line_buffer_adjust(rdev, mode0, mode1);
+
+ if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
+ WREG32(R_006C9C_DCP_CONTROL, 0);
+ if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+ WREG32(R_006C9C_DCP_CONTROL, 2);
+
+ rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
+ rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
+
+ tmp = (wm0.lb_request_fifo_depth - 1);
+ tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
+ WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
+
+ if (mode0 && mode1) {
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
+ else
+ a.full = wm0.num_line_pair.full;
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
+ else
+ b.full = wm1.num_line_pair.full;
+ a.full += b.full;
+ fill_rate.full = dfixed_div(wm0.sclk, a);
+ if (wm0.consumption_rate.full > fill_rate.full) {
+ b.full = wm0.consumption_rate.full - fill_rate.full;
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_mul(wm0.worst_case_latency,
+ wm0.consumption_rate);
+ a.full = a.full + b.full;
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
+ } else {
+ a.full = dfixed_mul(wm0.worst_case_latency,
+ wm0.consumption_rate);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
+ }
+ if (wm1.consumption_rate.full > fill_rate.full) {
+ b.full = wm1.consumption_rate.full - fill_rate.full;
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_mul(wm1.worst_case_latency,
+ wm1.consumption_rate);
+ a.full = a.full + b.full;
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
+ } else {
+ a.full = dfixed_mul(wm1.worst_case_latency,
+ wm1.consumption_rate);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
+ }
+ if (wm0.priority_mark.full > priority_mark02.full)
+ priority_mark02.full = wm0.priority_mark.full;
+ if (dfixed_trunc(priority_mark02) < 0)
+ priority_mark02.full = 0;
+ if (wm0.priority_mark_max.full > priority_mark02.full)
+ priority_mark02.full = wm0.priority_mark_max.full;
+ if (wm1.priority_mark.full > priority_mark12.full)
+ priority_mark12.full = wm1.priority_mark.full;
+ if (dfixed_trunc(priority_mark12) < 0)
+ priority_mark12.full = 0;
+ if (wm1.priority_mark_max.full > priority_mark12.full)
+ priority_mark12.full = wm1.priority_mark_max.full;
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+ if (rdev->disp_priority == 2) {
+ d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+ d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+ }
+ } else if (mode0) {
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
+ else
+ a.full = wm0.num_line_pair.full;
+ fill_rate.full = dfixed_div(wm0.sclk, a);
+ if (wm0.consumption_rate.full > fill_rate.full) {
+ b.full = wm0.consumption_rate.full - fill_rate.full;
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_mul(wm0.worst_case_latency,
+ wm0.consumption_rate);
+ a.full = a.full + b.full;
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
+ } else {
+ a.full = dfixed_mul(wm0.worst_case_latency,
+ wm0.consumption_rate);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
+ }
+ if (wm0.priority_mark.full > priority_mark02.full)
+ priority_mark02.full = wm0.priority_mark.full;
+ if (dfixed_trunc(priority_mark02) < 0)
+ priority_mark02.full = 0;
+ if (wm0.priority_mark_max.full > priority_mark02.full)
+ priority_mark02.full = wm0.priority_mark_max.full;
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ if (rdev->disp_priority == 2)
+ d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+ } else if (mode1) {
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
+ else
+ a.full = wm1.num_line_pair.full;
+ fill_rate.full = dfixed_div(wm1.sclk, a);
+ if (wm1.consumption_rate.full > fill_rate.full) {
+ b.full = wm1.consumption_rate.full - fill_rate.full;
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_mul(wm1.worst_case_latency,
+ wm1.consumption_rate);
+ a.full = a.full + b.full;
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
+ } else {
+ a.full = dfixed_mul(wm1.worst_case_latency,
+ wm1.consumption_rate);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
+ }
+ if (wm1.priority_mark.full > priority_mark12.full)
+ priority_mark12.full = wm1.priority_mark.full;
+ if (dfixed_trunc(priority_mark12) < 0)
+ priority_mark12.full = 0;
+ if (wm1.priority_mark_max.full > priority_mark12.full)
+ priority_mark12.full = wm1.priority_mark_max.full;
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+ if (rdev->disp_priority == 2)
+ d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+ }
+
+ WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+ WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+ WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+ WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+}
+
+uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t r;
+
+ WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
+ r = RREG32(R_00007C_MC_DATA);
+ WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
+ return r;
+}
+
+void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
+ S_000078_MC_IND_WR_EN(1));
+ WREG32(R_00007C_MC_DATA, v);
+ WREG32(R_000078_MC_INDEX, 0x7F);
+}
+
+static void rs690_mc_program(struct radeon_device *rdev)
+{
+ struct rv515_mc_save save;
+
+ /* Stops all mc clients */
+ rv515_mc_stop(rdev, &save);
+
+ /* Wait for mc idle */
+ if (rs690_mc_wait_for_idle(rdev))
+ dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+ /* Program MC, should be a 32bits limited address space */
+ WREG32_MC(R_000100_MCCFG_FB_LOCATION,
+ S_000100_MC_FB_START(rdev->mc.vram_start >> 16) |
+ S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16));
+ WREG32(R_000134_HDP_FB_LOCATION,
+ S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+
+ rv515_mc_resume(rdev, &save);
+}
+
+static int rs690_startup(struct radeon_device *rdev)
+{
+ int r;
+
+ rs690_mc_program(rdev);
+ /* Resume clock */
+ rv515_clock_startup(rdev);
+ /* Initialize GPU configuration (# pipes, ...) */
+ rs690_gpu_init(rdev);
+ /* Initialize GART (initialize after TTM so we can allocate
+ * memory through TTM but finalize after TTM) */
+ r = rs400_gart_enable(rdev);
+ if (r)
+ return r;
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ /* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
+ rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+ r = r100_cp_init(rdev, 1024 * 1024);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ return r;
+ }
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing audio\n");
+ return r;
+ }
+
+ return 0;
+}
+
+int rs690_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Make sur GART are not working */
+ rs400_gart_disable(rdev);
+ /* Resume clock before doing reset */
+ rv515_clock_startup(rdev);
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_asic_reset(rdev)) {
+ dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* post */
+ atom_asic_init(rdev->mode_info.atom_context);
+ /* Resume clock after posting */
+ rv515_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
+ r = rs690_startup(rdev);
+ if (r) {
+ rdev->accel_working = false;
+ }
+ return r;
+}
+
+int rs690_suspend(struct radeon_device *rdev)
+{
+ r600_audio_fini(rdev);
+ r100_cp_disable(rdev);
+ radeon_wb_disable(rdev);
+ rs600_irq_disable(rdev);
+ rs400_gart_disable(rdev);
+ return 0;
+}
+
+void rs690_fini(struct radeon_device *rdev)
+{
+ r600_audio_fini(rdev);
+ r100_cp_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_gem_fini(rdev);
+ rs400_gart_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_bo_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
+int rs690_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Disable VGA */
+ rv515_vga_render_disable(rdev);
+ /* Initialize scratch registers */
+ radeon_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
+ /* TODO: disable VGA need to use VGA request */
+ /* BIOS*/
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ if (rdev->is_atom_bios) {
+ r = radeon_atombios_init(rdev);
+ if (r)
+ return r;
+ } else {
+ dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
+ return -EINVAL;
+ }
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_asic_reset(rdev)) {
+ dev_warn(rdev->dev,
+ "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* check if cards are posted or not */
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* initialize memory controller */
+ rs690_mc_init(rdev);
+ rv515_debugfs(rdev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+ r = rs400_gart_init(rdev);
+ if (r)
+ return r;
+ rs600_set_safe_registers(rdev);
+
+ rdev->accel_working = true;
+ r = rs690_startup(rdev);
+ if (r) {
+ /* Somethings want wront with the accel init stop accel */
+ dev_err(rdev->dev, "Disabling GPU acceleration\n");
+ r100_cp_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ rs400_gart_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ rdev->accel_working = false;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h
new file mode 100644
index 0000000..8af3ccf
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs690d.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RS690D_H__
+#define __RS690D_H__
+
+/* Registers */
+#define R_00001E_K8_FB_LOCATION 0x00001E
+#define R_00005F_MC_MISC_UMA_CNTL 0x00005F
+#define G_00005F_K8_ADDR_EXT(x) (((x) >> 0) & 0xFF)
+#define R_000078_MC_INDEX 0x000078
+#define S_000078_MC_IND_ADDR(x) (((x) & 0x1FF) << 0)
+#define G_000078_MC_IND_ADDR(x) (((x) >> 0) & 0x1FF)
+#define C_000078_MC_IND_ADDR 0xFFFFFE00
+#define S_000078_MC_IND_WR_EN(x) (((x) & 0x1) << 9)
+#define G_000078_MC_IND_WR_EN(x) (((x) >> 9) & 0x1)
+#define C_000078_MC_IND_WR_EN 0xFFFFFDFF
+#define R_00007C_MC_DATA 0x00007C
+#define S_00007C_MC_DATA(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_00007C_MC_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_00007C_MC_DATA 0x00000000
+#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
+#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_0000F8_CONFIG_MEMSIZE 0x00000000
+#define R_000134_HDP_FB_LOCATION 0x000134
+#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000134_HDP_FB_START 0xFFFF0000
+#define R_0007C0_CP_STAT 0x0007C0
+#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
+#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
+#define C_0007C0_MRU_BUSY 0xFFFFFFFE
+#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
+#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
+#define C_0007C0_MWU_BUSY 0xFFFFFFFD
+#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
+#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
+#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
+#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
+#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
+#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
+#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
+#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
+#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
+#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
+#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
+#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
+#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
+#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
+#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
+#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
+#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
+#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
+#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
+#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
+#define C_0007C0_CSI_BUSY 0xFFFFDFFF
+#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
+#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
+#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
+#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
+#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
+#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
+#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
+#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
+#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
+#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
+#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
+#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
+#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
+#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
+#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
+#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
+#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
+#define C_0007C0_CP_BUSY 0x7FFFFFFF
+#define R_000E40_RBBM_STATUS 0x000E40
+#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
+#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
+#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
+#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
+#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
+#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
+#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
+#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
+#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
+#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
+#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
+#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
+#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
+#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
+#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
+#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
+#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
+#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
+#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
+#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
+#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
+#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
+#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
+#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
+#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
+#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
+#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
+#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
+#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
+#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
+#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
+#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
+#define C_000E40_E2_BUSY 0xFFFDFFFF
+#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
+#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
+#define C_000E40_RB2D_BUSY 0xFFFBFFFF
+#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
+#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
+#define C_000E40_RB3D_BUSY 0xFFF7FFFF
+#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
+#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
+#define C_000E40_VAP_BUSY 0xFFEFFFFF
+#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
+#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
+#define C_000E40_RE_BUSY 0xFFDFFFFF
+#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
+#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
+#define C_000E40_TAM_BUSY 0xFFBFFFFF
+#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
+#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
+#define C_000E40_TDM_BUSY 0xFF7FFFFF
+#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
+#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
+#define C_000E40_PB_BUSY 0xFEFFFFFF
+#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
+#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
+#define C_000E40_TIM_BUSY 0xFDFFFFFF
+#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
+#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
+#define C_000E40_GA_BUSY 0xFBFFFFFF
+#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
+#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
+#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
+#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
+#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
+#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
+#define R_006520_DC_LB_MEMORY_SPLIT 0x006520
+#define S_006520_DC_LB_MEMORY_SPLIT(x) (((x) & 0x3) << 0)
+#define G_006520_DC_LB_MEMORY_SPLIT(x) (((x) >> 0) & 0x3)
+#define C_006520_DC_LB_MEMORY_SPLIT 0xFFFFFFFC
+#define S_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) & 0x1) << 2)
+#define G_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) >> 2) & 0x1)
+#define C_006520_DC_LB_MEMORY_SPLIT_MODE 0xFFFFFFFB
+#define V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
+#define V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
+#define V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY 2
+#define V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
+#define S_006520_DC_LB_DISP1_END_ADR(x) (((x) & 0x7FF) << 4)
+#define G_006520_DC_LB_DISP1_END_ADR(x) (((x) >> 4) & 0x7FF)
+#define C_006520_DC_LB_DISP1_END_ADR 0xFFFF800F
+#define R_006548_D1MODE_PRIORITY_A_CNT 0x006548
+#define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
+#define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
+#define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000
+#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
+#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
+#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF
+#define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
+#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
+#define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C
+#define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
+#define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
+#define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000
+#define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
+#define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
+#define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF
+#define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
+#define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
+#define R_006C9C_DCP_CONTROL 0x006C9C
+#define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48
+#define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
+#define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
+#define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000
+#define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
+#define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
+#define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF
+#define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
+#define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
+#define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C
+#define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
+#define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
+#define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000
+#define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
+#define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
+#define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF
+#define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
+#define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
+#define R_006D58_LB_MAX_REQ_OUTSTANDING 0x006D58
+#define S_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 0)
+#define G_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) >> 0) & 0xF)
+#define C_006D58_LB_D1_MAX_REQ_OUTSTANDING 0xFFFFFFF0
+#define S_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 16)
+#define G_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) >> 16) & 0xF)
+#define C_006D58_LB_D2_MAX_REQ_OUTSTANDING 0xFFF0FFFF
+
+
+#define R_000090_MC_SYSTEM_STATUS 0x000090
+#define S_000090_MC_SYSTEM_IDLE(x) (((x) & 0x1) << 0)
+#define G_000090_MC_SYSTEM_IDLE(x) (((x) >> 0) & 0x1)
+#define C_000090_MC_SYSTEM_IDLE 0xFFFFFFFE
+#define S_000090_MC_SEQUENCER_IDLE(x) (((x) & 0x1) << 1)
+#define G_000090_MC_SEQUENCER_IDLE(x) (((x) >> 1) & 0x1)
+#define C_000090_MC_SEQUENCER_IDLE 0xFFFFFFFD
+#define S_000090_MC_ARBITER_IDLE(x) (((x) & 0x1) << 2)
+#define G_000090_MC_ARBITER_IDLE(x) (((x) >> 2) & 0x1)
+#define C_000090_MC_ARBITER_IDLE 0xFFFFFFFB
+#define S_000090_MC_SELECT_PM(x) (((x) & 0x1) << 3)
+#define G_000090_MC_SELECT_PM(x) (((x) >> 3) & 0x1)
+#define C_000090_MC_SELECT_PM 0xFFFFFFF7
+#define S_000090_RESERVED4(x) (((x) & 0xF) << 4)
+#define G_000090_RESERVED4(x) (((x) >> 4) & 0xF)
+#define C_000090_RESERVED4 0xFFFFFF0F
+#define S_000090_RESERVED8(x) (((x) & 0xF) << 8)
+#define G_000090_RESERVED8(x) (((x) >> 8) & 0xF)
+#define C_000090_RESERVED8 0xFFFFF0FF
+#define S_000090_RESERVED12(x) (((x) & 0xF) << 12)
+#define G_000090_RESERVED12(x) (((x) >> 12) & 0xF)
+#define C_000090_RESERVED12 0xFFFF0FFF
+#define S_000090_MCA_INIT_EXECUTED(x) (((x) & 0x1) << 16)
+#define G_000090_MCA_INIT_EXECUTED(x) (((x) >> 16) & 0x1)
+#define C_000090_MCA_INIT_EXECUTED 0xFFFEFFFF
+#define S_000090_MCA_IDLE(x) (((x) & 0x1) << 17)
+#define G_000090_MCA_IDLE(x) (((x) >> 17) & 0x1)
+#define C_000090_MCA_IDLE 0xFFFDFFFF
+#define S_000090_MCA_SEQ_IDLE(x) (((x) & 0x1) << 18)
+#define G_000090_MCA_SEQ_IDLE(x) (((x) >> 18) & 0x1)
+#define C_000090_MCA_SEQ_IDLE 0xFFFBFFFF
+#define S_000090_MCA_ARB_IDLE(x) (((x) & 0x1) << 19)
+#define G_000090_MCA_ARB_IDLE(x) (((x) >> 19) & 0x1)
+#define C_000090_MCA_ARB_IDLE 0xFFF7FFFF
+#define S_000090_RESERVED20(x) (((x) & 0xFFF) << 20)
+#define G_000090_RESERVED20(x) (((x) >> 20) & 0xFFF)
+#define C_000090_RESERVED20 0x000FFFFF
+#define R_000100_MCCFG_FB_LOCATION 0x000100
+#define S_000100_MC_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_000100_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000100_MC_FB_START 0xFFFF0000
+#define S_000100_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_000100_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_000100_MC_FB_TOP 0x0000FFFF
+#define R_000104_MC_INIT_MISC_LAT_TIMER 0x000104
+#define S_000104_MC_CPR_INIT_LAT(x) (((x) & 0xF) << 0)
+#define G_000104_MC_CPR_INIT_LAT(x) (((x) >> 0) & 0xF)
+#define C_000104_MC_CPR_INIT_LAT 0xFFFFFFF0
+#define S_000104_MC_VF_INIT_LAT(x) (((x) & 0xF) << 4)
+#define G_000104_MC_VF_INIT_LAT(x) (((x) >> 4) & 0xF)
+#define C_000104_MC_VF_INIT_LAT 0xFFFFFF0F
+#define S_000104_MC_DISP0R_INIT_LAT(x) (((x) & 0xF) << 8)
+#define G_000104_MC_DISP0R_INIT_LAT(x) (((x) >> 8) & 0xF)
+#define C_000104_MC_DISP0R_INIT_LAT 0xFFFFF0FF
+#define S_000104_MC_DISP1R_INIT_LAT(x) (((x) & 0xF) << 12)
+#define G_000104_MC_DISP1R_INIT_LAT(x) (((x) >> 12) & 0xF)
+#define C_000104_MC_DISP1R_INIT_LAT 0xFFFF0FFF
+#define S_000104_MC_FIXED_INIT_LAT(x) (((x) & 0xF) << 16)
+#define G_000104_MC_FIXED_INIT_LAT(x) (((x) >> 16) & 0xF)
+#define C_000104_MC_FIXED_INIT_LAT 0xFFF0FFFF
+#define S_000104_MC_E2R_INIT_LAT(x) (((x) & 0xF) << 20)
+#define G_000104_MC_E2R_INIT_LAT(x) (((x) >> 20) & 0xF)
+#define C_000104_MC_E2R_INIT_LAT 0xFF0FFFFF
+#define S_000104_SAME_PAGE_PRIO(x) (((x) & 0xF) << 24)
+#define G_000104_SAME_PAGE_PRIO(x) (((x) >> 24) & 0xF)
+#define C_000104_SAME_PAGE_PRIO 0xF0FFFFFF
+#define S_000104_MC_GLOBW_INIT_LAT(x) (((x) & 0xF) << 28)
+#define G_000104_MC_GLOBW_INIT_LAT(x) (((x) >> 28) & 0xF)
+#define C_000104_MC_GLOBW_INIT_LAT 0x0FFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv200d.h b/drivers/gpu/drm/radeon/rv200d.h
new file mode 100644
index 0000000..c5b3983
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv200d.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RV200D_H__
+#define __RV200D_H__
+
+#define R_00015C_AGP_BASE_2 0x00015C
+#define S_00015C_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
+#define G_00015C_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
+#define C_00015C_AGP_BASE_ADDR_2 0xFFFFFFF0
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv250d.h b/drivers/gpu/drm/radeon/rv250d.h
new file mode 100644
index 0000000..e5a70b0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv250d.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RV250D_H__
+#define __RV250D_H__
+
+#define R_00000D_SCLK_CNTL_M6 0x00000D
+#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
+#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7)
+#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8
+#define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3)
+#define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1)
+#define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7
+#define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4)
+#define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1)
+#define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF
+#define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5)
+#define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1)
+#define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF
+#define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6)
+#define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1)
+#define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF
+#define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7)
+#define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1)
+#define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F
+#define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8)
+#define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1)
+#define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF
+#define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9)
+#define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1)
+#define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF
+#define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10)
+#define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1)
+#define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF
+#define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11)
+#define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1)
+#define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF
+#define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12)
+#define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1)
+#define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF
+#define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13)
+#define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1)
+#define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF
+#define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14)
+#define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1)
+#define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF
+#define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15)
+#define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1)
+#define C_00000D_FORCE_DISP2 0xFFFF7FFF
+#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16)
+#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1)
+#define C_00000D_FORCE_CP 0xFFFEFFFF
+#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17)
+#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1)
+#define C_00000D_FORCE_HDP 0xFFFDFFFF
+#define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18)
+#define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1)
+#define C_00000D_FORCE_DISP1 0xFFFBFFFF
+#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19)
+#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1)
+#define C_00000D_FORCE_TOP 0xFFF7FFFF
+#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
+#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
+#define C_00000D_FORCE_E2 0xFFEFFFFF
+#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21)
+#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1)
+#define C_00000D_FORCE_SE 0xFFDFFFFF
+#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
+#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
+#define C_00000D_FORCE_IDCT 0xFFBFFFFF
+#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23)
+#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1)
+#define C_00000D_FORCE_VIP 0xFF7FFFFF
+#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
+#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
+#define C_00000D_FORCE_RE 0xFEFFFFFF
+#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25)
+#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1)
+#define C_00000D_FORCE_PB 0xFDFFFFFF
+#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26)
+#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1)
+#define C_00000D_FORCE_TAM 0xFBFFFFFF
+#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27)
+#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1)
+#define C_00000D_FORCE_TDM 0xF7FFFFFF
+#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28)
+#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
+#define C_00000D_FORCE_RB 0xEFFFFFFF
+#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29)
+#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1)
+#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF
+#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30)
+#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1)
+#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF
+#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31)
+#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1)
+#define C_00000D_FORCE_OV0 0x7FFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv350d.h b/drivers/gpu/drm/radeon/rv350d.h
new file mode 100644
index 0000000..c75c5ed
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv350d.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RV350D_H__
+#define __RV350D_H__
+
+/* RV350, RV380 registers */
+/* #define R_00000D_SCLK_CNTL 0x00000D */
+#define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21)
+#define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1)
+#define C_00000D_FORCE_VAP 0xFFDFFFFF
+#define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25)
+#define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1)
+#define C_00000D_FORCE_SR 0xFDFFFFFF
+#define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26)
+#define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1)
+#define C_00000D_FORCE_PX 0xFBFFFFFF
+#define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27)
+#define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1)
+#define C_00000D_FORCE_TX 0xF7FFFFFF
+#define S_00000D_FORCE_US(x) (((x) & 0x1) << 28)
+#define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1)
+#define C_00000D_FORCE_US 0xEFFFFFFF
+#define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30)
+#define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1)
+#define C_00000D_FORCE_SU 0xBFFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
new file mode 100644
index 0000000..21c7d7b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -0,0 +1,1257 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include "rv515d.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "rv515_reg_safe.h"
+
+/* This files gather functions specifics to: rv515 */
+static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
+static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
+static void rv515_gpu_init(struct radeon_device *rdev);
+int rv515_mc_wait_for_idle(struct radeon_device *rdev);
+
+static const u32 crtc_offsets[2] =
+{
+ 0,
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
+void rv515_debugfs(struct radeon_device *rdev)
+{
+ if (r100_debugfs_rbbm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+ }
+ if (rv515_debugfs_pipes_info_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for pipes !\n");
+ }
+ if (rv515_debugfs_ga_info_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for pipes !\n");
+ }
+}
+
+void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ int r;
+
+ r = radeon_ring_lock(rdev, ring, 64);
+ if (r) {
+ return;
+ }
+ radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
+ radeon_ring_write(ring,
+ ISYNC_ANY2D_IDLE3D |
+ ISYNC_ANY3D_IDLE2D |
+ ISYNC_WAIT_IDLEGUI |
+ ISYNC_CPSCRATCH_IDLEGUI);
+ radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+ radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+ radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+ radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+ radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
+ radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
+ radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+ radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+ radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+ radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+ radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+ radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+ radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
+ radeon_ring_write(ring,
+ ((6 << MS_X0_SHIFT) |
+ (6 << MS_Y0_SHIFT) |
+ (6 << MS_X1_SHIFT) |
+ (6 << MS_Y1_SHIFT) |
+ (6 << MS_X2_SHIFT) |
+ (6 << MS_Y2_SHIFT) |
+ (6 << MSBD0_Y_SHIFT) |
+ (6 << MSBD0_X_SHIFT)));
+ radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
+ radeon_ring_write(ring,
+ ((6 << MS_X3_SHIFT) |
+ (6 << MS_Y3_SHIFT) |
+ (6 << MS_X4_SHIFT) |
+ (6 << MS_Y4_SHIFT) |
+ (6 << MS_X5_SHIFT) |
+ (6 << MS_Y5_SHIFT) |
+ (6 << MSBD1_SHIFT)));
+ radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
+ radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
+ radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
+ radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
+ radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
+ radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
+ radeon_ring_write(ring, PACKET0(0x20C8, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_unlock_commit(rdev, ring);
+}
+
+int rv515_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32_MC(MC_STATUS);
+ if (tmp & MC_STATUS_IDLE) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
+void rv515_vga_render_disable(struct radeon_device *rdev)
+{
+ WREG32(R_000300_VGA_RENDER_CONTROL,
+ RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
+}
+
+static void rv515_gpu_init(struct radeon_device *rdev)
+{
+ unsigned pipe_select_current, gb_pipe_select, tmp;
+
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "resetting GPU. Bad things might happen.\n");
+ }
+ rv515_vga_render_disable(rdev);
+ r420_pipes_init(rdev);
+ gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+ tmp = RREG32(R300_DST_PIPE_CONFIG);
+ pipe_select_current = (tmp >> 2) & 3;
+ tmp = (1 << pipe_select_current) |
+ (((gb_pipe_select >> 8) & 0xF) << 4);
+ WREG32_PLL(0x000D, tmp);
+ if (r100_gui_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait GUI idle while "
+ "resetting GPU. Bad things might happen.\n");
+ }
+ if (rv515_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "Failed to wait MC idle while "
+ "programming pipes. Bad things might happen.\n");
+ }
+}
+
+static void rv515_vram_get_type(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+
+ rdev->mc.vram_width = 128;
+ rdev->mc.vram_is_ddr = true;
+ tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK;
+ switch (tmp) {
+ case 0:
+ rdev->mc.vram_width = 64;
+ break;
+ case 1:
+ rdev->mc.vram_width = 128;
+ break;
+ default:
+ rdev->mc.vram_width = 128;
+ break;
+ }
+}
+
+static void rv515_mc_init(struct radeon_device *rdev)
+{
+
+ rv515_vram_get_type(rdev);
+ r100_vram_init_sizes(rdev);
+ radeon_vram_location(rdev, &rdev->mc, 0);
+ rdev->mc.gtt_base_align = 0;
+ if (!(rdev->flags & RADEON_IS_AGP))
+ radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
+}
+
+uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t r;
+
+ WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
+ r = RREG32(MC_IND_DATA);
+ WREG32(MC_IND_INDEX, 0);
+ return r;
+}
+
+void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
+ WREG32(MC_IND_DATA, (v));
+ WREG32(MC_IND_INDEX, 0);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = RREG32(GB_PIPE_SELECT);
+ seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
+ tmp = RREG32(SU_REG_DEST);
+ seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp);
+ tmp = RREG32(GB_TILE_CONFIG);
+ seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
+ tmp = RREG32(DST_PIPE_CONFIG);
+ seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
+ return 0;
+}
+
+static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = RREG32(0x2140);
+ seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
+ radeon_asic_reset(rdev);
+ tmp = RREG32(0x425C);
+ seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
+ return 0;
+}
+
+static struct drm_info_list rv515_pipes_info_list[] = {
+ {"rv515_pipes_info", rv515_debugfs_pipes_info, 0, NULL},
+};
+
+static struct drm_info_list rv515_ga_info_list[] = {
+ {"rv515_ga_info", rv515_debugfs_ga_info, 0, NULL},
+};
+#endif
+
+static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
+#else
+ return 0;
+#endif
+}
+
+static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
+#else
+ return 0;
+#endif
+}
+
+void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
+{
+ u32 crtc_enabled, tmp, frame_count, blackout;
+ int i, j;
+
+ save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
+
+ /* disable VGA render */
+ WREG32(R_000300_VGA_RENDER_CONTROL, 0);
+ /* blank the display controllers */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ crtc_enabled = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN;
+ if (crtc_enabled) {
+ save->crtc_enabled[i] = true;
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+ if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
+ radeon_wait_for_vblank(rdev, i);
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ /* wait for the next frame */
+ frame_count = radeon_get_vblank_counter(rdev, i);
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ if (radeon_get_vblank_counter(rdev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+
+ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~AVIVO_CRTC_EN;
+ WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ save->crtc_enabled[i] = false;
+ /* ***** */
+ } else {
+ save->crtc_enabled[i] = false;
+ }
+ }
+
+ radeon_mc_wait_for_idle(rdev);
+
+ if (rdev->family >= CHIP_R600) {
+ if (rdev->family >= CHIP_RV770)
+ blackout = RREG32(R700_MC_CITF_CNTL);
+ else
+ blackout = RREG32(R600_CITF_CNTL);
+ if ((blackout & R600_BLACKOUT_MASK) != R600_BLACKOUT_MASK) {
+ /* Block CPU access */
+ WREG32(R600_BIF_FB_EN, 0);
+ /* blackout the MC */
+ blackout |= R600_BLACKOUT_MASK;
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R700_MC_CITF_CNTL, blackout);
+ else
+ WREG32(R600_CITF_CNTL, blackout);
+ }
+ }
+ /* wait for the MC to settle */
+ udelay(100);
+
+ /* lock double buffered regs */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+ if (!(tmp & AVIVO_D1GRPH_UPDATE_LOCK)) {
+ tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (!(tmp & 1)) {
+ tmp |= 1;
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ }
+ }
+}
+
+void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
+{
+ u32 tmp, frame_count;
+ int i, j;
+
+ /* update crtc base addresses */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->family >= CHIP_RV770) {
+ if (i == 0) {
+ WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ } else {
+ WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+ upper_32_bits(rdev->mc.vram_start));
+ }
+ }
+ WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+ WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+ }
+ WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+
+ /* unlock regs and wait for update */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
+ if ((tmp & 0x3) != 0) {
+ tmp &= ~0x3;
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+ if (tmp & AVIVO_D1GRPH_UPDATE_LOCK) {
+ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (tmp & 1) {
+ tmp &= ~1;
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+ if ((tmp & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) == 0)
+ break;
+ udelay(1);
+ }
+ }
+ }
+
+ if (rdev->family >= CHIP_R600) {
+ /* unblackout the MC */
+ if (rdev->family >= CHIP_RV770)
+ tmp = RREG32(R700_MC_CITF_CNTL);
+ else
+ tmp = RREG32(R600_CITF_CNTL);
+ tmp &= ~R600_BLACKOUT_MASK;
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R700_MC_CITF_CNTL, tmp);
+ else
+ WREG32(R600_CITF_CNTL, tmp);
+ /* allow CPU access */
+ WREG32(R600_BIF_FB_EN, R600_FB_READ_EN | R600_FB_WRITE_EN);
+ }
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ /* wait for the next frame */
+ frame_count = radeon_get_vblank_counter(rdev, i);
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ if (radeon_get_vblank_counter(rdev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ /* Unlock vga access */
+ WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+ WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
+}
+
+static void rv515_mc_program(struct radeon_device *rdev)
+{
+ struct rv515_mc_save save;
+
+ /* Stops all mc clients */
+ rv515_mc_stop(rdev, &save);
+
+ /* Wait for mc idle */
+ if (rv515_mc_wait_for_idle(rdev))
+ dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+ /* Write VRAM size in case we are limiting it */
+ WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+ /* Program MC, should be a 32bits limited address space */
+ WREG32_MC(R_000001_MC_FB_LOCATION,
+ S_000001_MC_FB_START(rdev->mc.vram_start >> 16) |
+ S_000001_MC_FB_TOP(rdev->mc.vram_end >> 16));
+ WREG32(R_000134_HDP_FB_LOCATION,
+ S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+ if (rdev->flags & RADEON_IS_AGP) {
+ WREG32_MC(R_000002_MC_AGP_LOCATION,
+ S_000002_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+ S_000002_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+ WREG32_MC(R_000003_MC_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+ WREG32_MC(R_000004_MC_AGP_BASE_2,
+ S_000004_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
+ } else {
+ WREG32_MC(R_000002_MC_AGP_LOCATION, 0xFFFFFFFF);
+ WREG32_MC(R_000003_MC_AGP_BASE, 0);
+ WREG32_MC(R_000004_MC_AGP_BASE_2, 0);
+ }
+
+ rv515_mc_resume(rdev, &save);
+}
+
+void rv515_clock_startup(struct radeon_device *rdev)
+{
+ if (radeon_dynclks != -1 && radeon_dynclks)
+ radeon_atom_set_clock_gating(rdev, 1);
+ /* We need to force on some of the block */
+ WREG32_PLL(R_00000F_CP_DYN_CNTL,
+ RREG32_PLL(R_00000F_CP_DYN_CNTL) | S_00000F_CP_FORCEON(1));
+ WREG32_PLL(R_000011_E2_DYN_CNTL,
+ RREG32_PLL(R_000011_E2_DYN_CNTL) | S_000011_E2_FORCEON(1));
+ WREG32_PLL(R_000013_IDCT_DYN_CNTL,
+ RREG32_PLL(R_000013_IDCT_DYN_CNTL) | S_000013_IDCT_FORCEON(1));
+}
+
+static int rv515_startup(struct radeon_device *rdev)
+{
+ int r;
+
+ rv515_mc_program(rdev);
+ /* Resume clock */
+ rv515_clock_startup(rdev);
+ /* Initialize GPU configuration (# pipes, ...) */
+ rv515_gpu_init(rdev);
+ /* Initialize GART (initialize after TTM so we can allocate
+ * memory through TTM but finalize after TTM) */
+ if (rdev->flags & RADEON_IS_PCIE) {
+ r = rv370_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ }
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ /* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
+ rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+ r = r100_cp_init(rdev, 1024 * 1024);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ return r;
+ }
+
+ return 0;
+}
+
+int rv515_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Make sur GART are not working */
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_disable(rdev);
+ /* Resume clock before doing reset */
+ rv515_clock_startup(rdev);
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_asic_reset(rdev)) {
+ dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* post */
+ atom_asic_init(rdev->mode_info.atom_context);
+ /* Resume clock after posting */
+ rv515_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
+ r = rv515_startup(rdev);
+ if (r) {
+ rdev->accel_working = false;
+ }
+ return r;
+}
+
+int rv515_suspend(struct radeon_device *rdev)
+{
+ r100_cp_disable(rdev);
+ radeon_wb_disable(rdev);
+ rs600_irq_disable(rdev);
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_disable(rdev);
+ return 0;
+}
+
+void rv515_set_safe_registers(struct radeon_device *rdev)
+{
+ rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm;
+ rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm);
+}
+
+void rv515_fini(struct radeon_device *rdev)
+{
+ r100_cp_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_gem_fini(rdev);
+ rv370_pcie_gart_fini(rdev);
+ radeon_agp_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_bo_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
+int rv515_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Initialize scratch registers */
+ radeon_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* TODO: disable VGA need to use VGA request */
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
+ /* BIOS*/
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ if (rdev->is_atom_bios) {
+ r = radeon_atombios_init(rdev);
+ if (r)
+ return r;
+ } else {
+ dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
+ return -EINVAL;
+ }
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_asic_reset(rdev)) {
+ dev_warn(rdev->dev,
+ "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* check if cards are posted or not */
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
+ }
+ /* initialize memory controller */
+ rv515_mc_init(rdev);
+ rv515_debugfs(rdev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+ r = rv370_pcie_gart_init(rdev);
+ if (r)
+ return r;
+ rv515_set_safe_registers(rdev);
+
+ rdev->accel_working = true;
+ r = rv515_startup(rdev);
+ if (r) {
+ /* Somethings want wront with the accel init stop accel */
+ dev_err(rdev->dev, "Disabling GPU acceleration\n");
+ r100_cp_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ rv370_pcie_gart_fini(rdev);
+ radeon_agp_fini(rdev);
+ rdev->accel_working = false;
+ }
+ return 0;
+}
+
+void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *crtc)
+{
+ int index_reg = 0x6578 + crtc->crtc_offset;
+ int data_reg = 0x657c + crtc->crtc_offset;
+
+ WREG32(0x659C + crtc->crtc_offset, 0x0);
+ WREG32(0x6594 + crtc->crtc_offset, 0x705);
+ WREG32(0x65A4 + crtc->crtc_offset, 0x10001);
+ WREG32(0x65D8 + crtc->crtc_offset, 0x0);
+ WREG32(0x65B0 + crtc->crtc_offset, 0x0);
+ WREG32(0x65C0 + crtc->crtc_offset, 0x0);
+ WREG32(0x65D4 + crtc->crtc_offset, 0x0);
+ WREG32(index_reg, 0x0);
+ WREG32(data_reg, 0x841880A8);
+ WREG32(index_reg, 0x1);
+ WREG32(data_reg, 0x84208680);
+ WREG32(index_reg, 0x2);
+ WREG32(data_reg, 0xBFF880B0);
+ WREG32(index_reg, 0x100);
+ WREG32(data_reg, 0x83D88088);
+ WREG32(index_reg, 0x101);
+ WREG32(data_reg, 0x84608680);
+ WREG32(index_reg, 0x102);
+ WREG32(data_reg, 0xBFF080D0);
+ WREG32(index_reg, 0x200);
+ WREG32(data_reg, 0x83988068);
+ WREG32(index_reg, 0x201);
+ WREG32(data_reg, 0x84A08680);
+ WREG32(index_reg, 0x202);
+ WREG32(data_reg, 0xBFF080F8);
+ WREG32(index_reg, 0x300);
+ WREG32(data_reg, 0x83588058);
+ WREG32(index_reg, 0x301);
+ WREG32(data_reg, 0x84E08660);
+ WREG32(index_reg, 0x302);
+ WREG32(data_reg, 0xBFF88120);
+ WREG32(index_reg, 0x400);
+ WREG32(data_reg, 0x83188040);
+ WREG32(index_reg, 0x401);
+ WREG32(data_reg, 0x85008660);
+ WREG32(index_reg, 0x402);
+ WREG32(data_reg, 0xBFF88150);
+ WREG32(index_reg, 0x500);
+ WREG32(data_reg, 0x82D88030);
+ WREG32(index_reg, 0x501);
+ WREG32(data_reg, 0x85408640);
+ WREG32(index_reg, 0x502);
+ WREG32(data_reg, 0xBFF88180);
+ WREG32(index_reg, 0x600);
+ WREG32(data_reg, 0x82A08018);
+ WREG32(index_reg, 0x601);
+ WREG32(data_reg, 0x85808620);
+ WREG32(index_reg, 0x602);
+ WREG32(data_reg, 0xBFF081B8);
+ WREG32(index_reg, 0x700);
+ WREG32(data_reg, 0x82608010);
+ WREG32(index_reg, 0x701);
+ WREG32(data_reg, 0x85A08600);
+ WREG32(index_reg, 0x702);
+ WREG32(data_reg, 0x800081F0);
+ WREG32(index_reg, 0x800);
+ WREG32(data_reg, 0x8228BFF8);
+ WREG32(index_reg, 0x801);
+ WREG32(data_reg, 0x85E085E0);
+ WREG32(index_reg, 0x802);
+ WREG32(data_reg, 0xBFF88228);
+ WREG32(index_reg, 0x10000);
+ WREG32(data_reg, 0x82A8BF00);
+ WREG32(index_reg, 0x10001);
+ WREG32(data_reg, 0x82A08CC0);
+ WREG32(index_reg, 0x10002);
+ WREG32(data_reg, 0x8008BEF8);
+ WREG32(index_reg, 0x10100);
+ WREG32(data_reg, 0x81F0BF28);
+ WREG32(index_reg, 0x10101);
+ WREG32(data_reg, 0x83608CA0);
+ WREG32(index_reg, 0x10102);
+ WREG32(data_reg, 0x8018BED0);
+ WREG32(index_reg, 0x10200);
+ WREG32(data_reg, 0x8148BF38);
+ WREG32(index_reg, 0x10201);
+ WREG32(data_reg, 0x84408C80);
+ WREG32(index_reg, 0x10202);
+ WREG32(data_reg, 0x8008BEB8);
+ WREG32(index_reg, 0x10300);
+ WREG32(data_reg, 0x80B0BF78);
+ WREG32(index_reg, 0x10301);
+ WREG32(data_reg, 0x85008C20);
+ WREG32(index_reg, 0x10302);
+ WREG32(data_reg, 0x8020BEA0);
+ WREG32(index_reg, 0x10400);
+ WREG32(data_reg, 0x8028BF90);
+ WREG32(index_reg, 0x10401);
+ WREG32(data_reg, 0x85E08BC0);
+ WREG32(index_reg, 0x10402);
+ WREG32(data_reg, 0x8018BE90);
+ WREG32(index_reg, 0x10500);
+ WREG32(data_reg, 0xBFB8BFB0);
+ WREG32(index_reg, 0x10501);
+ WREG32(data_reg, 0x86C08B40);
+ WREG32(index_reg, 0x10502);
+ WREG32(data_reg, 0x8010BE90);
+ WREG32(index_reg, 0x10600);
+ WREG32(data_reg, 0xBF58BFC8);
+ WREG32(index_reg, 0x10601);
+ WREG32(data_reg, 0x87A08AA0);
+ WREG32(index_reg, 0x10602);
+ WREG32(data_reg, 0x8010BE98);
+ WREG32(index_reg, 0x10700);
+ WREG32(data_reg, 0xBF10BFF0);
+ WREG32(index_reg, 0x10701);
+ WREG32(data_reg, 0x886089E0);
+ WREG32(index_reg, 0x10702);
+ WREG32(data_reg, 0x8018BEB0);
+ WREG32(index_reg, 0x10800);
+ WREG32(data_reg, 0xBED8BFE8);
+ WREG32(index_reg, 0x10801);
+ WREG32(data_reg, 0x89408940);
+ WREG32(index_reg, 0x10802);
+ WREG32(data_reg, 0xBFE8BED8);
+ WREG32(index_reg, 0x20000);
+ WREG32(data_reg, 0x80008000);
+ WREG32(index_reg, 0x20001);
+ WREG32(data_reg, 0x90008000);
+ WREG32(index_reg, 0x20002);
+ WREG32(data_reg, 0x80008000);
+ WREG32(index_reg, 0x20003);
+ WREG32(data_reg, 0x80008000);
+ WREG32(index_reg, 0x20100);
+ WREG32(data_reg, 0x80108000);
+ WREG32(index_reg, 0x20101);
+ WREG32(data_reg, 0x8FE0BF70);
+ WREG32(index_reg, 0x20102);
+ WREG32(data_reg, 0xBFE880C0);
+ WREG32(index_reg, 0x20103);
+ WREG32(data_reg, 0x80008000);
+ WREG32(index_reg, 0x20200);
+ WREG32(data_reg, 0x8018BFF8);
+ WREG32(index_reg, 0x20201);
+ WREG32(data_reg, 0x8F80BF08);
+ WREG32(index_reg, 0x20202);
+ WREG32(data_reg, 0xBFD081A0);
+ WREG32(index_reg, 0x20203);
+ WREG32(data_reg, 0xBFF88000);
+ WREG32(index_reg, 0x20300);
+ WREG32(data_reg, 0x80188000);
+ WREG32(index_reg, 0x20301);
+ WREG32(data_reg, 0x8EE0BEC0);
+ WREG32(index_reg, 0x20302);
+ WREG32(data_reg, 0xBFB082A0);
+ WREG32(index_reg, 0x20303);
+ WREG32(data_reg, 0x80008000);
+ WREG32(index_reg, 0x20400);
+ WREG32(data_reg, 0x80188000);
+ WREG32(index_reg, 0x20401);
+ WREG32(data_reg, 0x8E00BEA0);
+ WREG32(index_reg, 0x20402);
+ WREG32(data_reg, 0xBF8883C0);
+ WREG32(index_reg, 0x20403);
+ WREG32(data_reg, 0x80008000);
+ WREG32(index_reg, 0x20500);
+ WREG32(data_reg, 0x80188000);
+ WREG32(index_reg, 0x20501);
+ WREG32(data_reg, 0x8D00BE90);
+ WREG32(index_reg, 0x20502);
+ WREG32(data_reg, 0xBF588500);
+ WREG32(index_reg, 0x20503);
+ WREG32(data_reg, 0x80008008);
+ WREG32(index_reg, 0x20600);
+ WREG32(data_reg, 0x80188000);
+ WREG32(index_reg, 0x20601);
+ WREG32(data_reg, 0x8BC0BE98);
+ WREG32(index_reg, 0x20602);
+ WREG32(data_reg, 0xBF308660);
+ WREG32(index_reg, 0x20603);
+ WREG32(data_reg, 0x80008008);
+ WREG32(index_reg, 0x20700);
+ WREG32(data_reg, 0x80108000);
+ WREG32(index_reg, 0x20701);
+ WREG32(data_reg, 0x8A80BEB0);
+ WREG32(index_reg, 0x20702);
+ WREG32(data_reg, 0xBF0087C0);
+ WREG32(index_reg, 0x20703);
+ WREG32(data_reg, 0x80008008);
+ WREG32(index_reg, 0x20800);
+ WREG32(data_reg, 0x80108000);
+ WREG32(index_reg, 0x20801);
+ WREG32(data_reg, 0x8920BED0);
+ WREG32(index_reg, 0x20802);
+ WREG32(data_reg, 0xBED08920);
+ WREG32(index_reg, 0x20803);
+ WREG32(data_reg, 0x80008010);
+ WREG32(index_reg, 0x30000);
+ WREG32(data_reg, 0x90008000);
+ WREG32(index_reg, 0x30001);
+ WREG32(data_reg, 0x80008000);
+ WREG32(index_reg, 0x30100);
+ WREG32(data_reg, 0x8FE0BF90);
+ WREG32(index_reg, 0x30101);
+ WREG32(data_reg, 0xBFF880A0);
+ WREG32(index_reg, 0x30200);
+ WREG32(data_reg, 0x8F60BF40);
+ WREG32(index_reg, 0x30201);
+ WREG32(data_reg, 0xBFE88180);
+ WREG32(index_reg, 0x30300);
+ WREG32(data_reg, 0x8EC0BF00);
+ WREG32(index_reg, 0x30301);
+ WREG32(data_reg, 0xBFC88280);
+ WREG32(index_reg, 0x30400);
+ WREG32(data_reg, 0x8DE0BEE0);
+ WREG32(index_reg, 0x30401);
+ WREG32(data_reg, 0xBFA083A0);
+ WREG32(index_reg, 0x30500);
+ WREG32(data_reg, 0x8CE0BED0);
+ WREG32(index_reg, 0x30501);
+ WREG32(data_reg, 0xBF7884E0);
+ WREG32(index_reg, 0x30600);
+ WREG32(data_reg, 0x8BA0BED8);
+ WREG32(index_reg, 0x30601);
+ WREG32(data_reg, 0xBF508640);
+ WREG32(index_reg, 0x30700);
+ WREG32(data_reg, 0x8A60BEE8);
+ WREG32(index_reg, 0x30701);
+ WREG32(data_reg, 0xBF2087A0);
+ WREG32(index_reg, 0x30800);
+ WREG32(data_reg, 0x8900BF00);
+ WREG32(index_reg, 0x30801);
+ WREG32(data_reg, 0xBF008900);
+}
+
+struct rv515_watermark {
+ u32 lb_request_fifo_depth;
+ fixed20_12 num_line_pair;
+ fixed20_12 estimated_width;
+ fixed20_12 worst_case_latency;
+ fixed20_12 consumption_rate;
+ fixed20_12 active_time;
+ fixed20_12 dbpp;
+ fixed20_12 priority_mark_max;
+ fixed20_12 priority_mark;
+ fixed20_12 sclk;
+};
+
+static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
+ struct radeon_crtc *crtc,
+ struct rv515_watermark *wm)
+{
+ struct drm_display_mode *mode = &crtc->base.mode;
+ fixed20_12 a, b, c;
+ fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
+ fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
+
+ if (!crtc->base.enabled) {
+ /* FIXME: wouldn't it better to set priority mark to maximum */
+ wm->lb_request_fifo_depth = 4;
+ return;
+ }
+
+ if (crtc->vsc.full > dfixed_const(2))
+ wm->num_line_pair.full = dfixed_const(2);
+ else
+ wm->num_line_pair.full = dfixed_const(1);
+
+ b.full = dfixed_const(mode->crtc_hdisplay);
+ c.full = dfixed_const(256);
+ a.full = dfixed_div(b, c);
+ request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+ request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+ if (a.full < dfixed_const(4)) {
+ wm->lb_request_fifo_depth = 4;
+ } else {
+ wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
+ }
+
+ /* Determine consumption rate
+ * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
+ * vtaps = number of vertical taps,
+ * vsc = vertical scaling ratio, defined as source/destination
+ * hsc = horizontal scaling ration, defined as source/destination
+ */
+ a.full = dfixed_const(mode->clock);
+ b.full = dfixed_const(1000);
+ a.full = dfixed_div(a, b);
+ pclk.full = dfixed_div(b, a);
+ if (crtc->rmx_type != RMX_OFF) {
+ b.full = dfixed_const(2);
+ if (crtc->vsc.full > b.full)
+ b.full = crtc->vsc.full;
+ b.full = dfixed_mul(b, crtc->hsc);
+ c.full = dfixed_const(2);
+ b.full = dfixed_div(b, c);
+ consumption_time.full = dfixed_div(pclk, b);
+ } else {
+ consumption_time.full = pclk.full;
+ }
+ a.full = dfixed_const(1);
+ wm->consumption_rate.full = dfixed_div(a, consumption_time);
+
+
+ /* Determine line time
+ * LineTime = total time for one line of displayhtotal
+ * LineTime = total number of horizontal pixels
+ * pclk = pixel clock period(ns)
+ */
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ line_time.full = dfixed_mul(a, pclk);
+
+ /* Determine active time
+ * ActiveTime = time of active region of display within one line,
+ * hactive = total number of horizontal active pixels
+ * htotal = total number of horizontal pixels
+ */
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->active_time.full = dfixed_mul(line_time, b);
+ wm->active_time.full = dfixed_div(wm->active_time, a);
+
+ /* Determine chunk time
+ * ChunkTime = the time it takes the DCP to send one chunk of data
+ * to the LB which consists of pipeline delay and inter chunk gap
+ * sclk = system clock(Mhz)
+ */
+ a.full = dfixed_const(600 * 1000);
+ chunk_time.full = dfixed_div(a, rdev->pm.sclk);
+ read_delay_latency.full = dfixed_const(1000);
+
+ /* Determine the worst case latency
+ * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
+ * WorstCaseLatency = worst case time from urgent to when the MC starts
+ * to return data
+ * READ_DELAY_IDLE_MAX = constant of 1us
+ * ChunkTime = time it takes the DCP to send one chunk of data to the LB
+ * which consists of pipeline delay and inter chunk gap
+ */
+ if (dfixed_trunc(wm->num_line_pair) > 1) {
+ a.full = dfixed_const(3);
+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
+ wm->worst_case_latency.full += read_delay_latency.full;
+ } else {
+ wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
+ }
+
+ /* Determine the tolerable latency
+ * TolerableLatency = Any given request has only 1 line time
+ * for the data to be returned
+ * LBRequestFifoDepth = Number of chunk requests the LB can
+ * put into the request FIFO for a display
+ * LineTime = total time for one line of display
+ * ChunkTime = the time it takes the DCP to send one chunk
+ * of data to the LB which consists of
+ * pipeline delay and inter chunk gap
+ */
+ if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
+ tolerable_latency.full = line_time.full;
+ } else {
+ tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
+ tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
+ tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
+ tolerable_latency.full = line_time.full - tolerable_latency.full;
+ }
+ /* We assume worst case 32bits (4 bytes) */
+ wm->dbpp.full = dfixed_const(2 * 16);
+
+ /* Determine the maximum priority mark
+ * width = viewport width in pixels
+ */
+ a.full = dfixed_const(16);
+ wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+ wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
+
+ /* Determine estimated width */
+ estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
+ estimated_width.full = dfixed_div(estimated_width, consumption_time);
+ if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+ wm->priority_mark.full = wm->priority_mark_max.full;
+ } else {
+ a.full = dfixed_const(16);
+ wm->priority_mark.full = dfixed_div(estimated_width, a);
+ wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
+ wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
+ }
+}
+
+void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
+{
+ struct drm_display_mode *mode0 = NULL;
+ struct drm_display_mode *mode1 = NULL;
+ struct rv515_watermark wm0;
+ struct rv515_watermark wm1;
+ u32 tmp;
+ u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
+ u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
+ fixed20_12 priority_mark02, priority_mark12, fill_rate;
+ fixed20_12 a, b;
+
+ if (rdev->mode_info.crtcs[0]->base.enabled)
+ mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+ if (rdev->mode_info.crtcs[1]->base.enabled)
+ mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+ rs690_line_buffer_adjust(rdev, mode0, mode1);
+
+ rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
+ rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
+
+ tmp = wm0.lb_request_fifo_depth;
+ tmp |= wm1.lb_request_fifo_depth << 16;
+ WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
+
+ if (mode0 && mode1) {
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
+ else
+ a.full = wm0.num_line_pair.full;
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
+ else
+ b.full = wm1.num_line_pair.full;
+ a.full += b.full;
+ fill_rate.full = dfixed_div(wm0.sclk, a);
+ if (wm0.consumption_rate.full > fill_rate.full) {
+ b.full = wm0.consumption_rate.full - fill_rate.full;
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm0.worst_case_latency,
+ wm0.consumption_rate);
+ priority_mark02.full = a.full + b.full;
+ } else {
+ a.full = dfixed_mul(wm0.worst_case_latency,
+ wm0.consumption_rate);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
+ }
+ if (wm1.consumption_rate.full > fill_rate.full) {
+ b.full = wm1.consumption_rate.full - fill_rate.full;
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm1.worst_case_latency,
+ wm1.consumption_rate);
+ priority_mark12.full = a.full + b.full;
+ } else {
+ a.full = dfixed_mul(wm1.worst_case_latency,
+ wm1.consumption_rate);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
+ }
+ if (wm0.priority_mark.full > priority_mark02.full)
+ priority_mark02.full = wm0.priority_mark.full;
+ if (dfixed_trunc(priority_mark02) < 0)
+ priority_mark02.full = 0;
+ if (wm0.priority_mark_max.full > priority_mark02.full)
+ priority_mark02.full = wm0.priority_mark_max.full;
+ if (wm1.priority_mark.full > priority_mark12.full)
+ priority_mark12.full = wm1.priority_mark.full;
+ if (dfixed_trunc(priority_mark12) < 0)
+ priority_mark12.full = 0;
+ if (wm1.priority_mark_max.full > priority_mark12.full)
+ priority_mark12.full = wm1.priority_mark_max.full;
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+ if (rdev->disp_priority == 2) {
+ d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+ d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+ }
+ } else if (mode0) {
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
+ else
+ a.full = wm0.num_line_pair.full;
+ fill_rate.full = dfixed_div(wm0.sclk, a);
+ if (wm0.consumption_rate.full > fill_rate.full) {
+ b.full = wm0.consumption_rate.full - fill_rate.full;
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm0.worst_case_latency,
+ wm0.consumption_rate);
+ priority_mark02.full = a.full + b.full;
+ } else {
+ a.full = dfixed_mul(wm0.worst_case_latency,
+ wm0.consumption_rate);
+ b.full = dfixed_const(16);
+ priority_mark02.full = dfixed_div(a, b);
+ }
+ if (wm0.priority_mark.full > priority_mark02.full)
+ priority_mark02.full = wm0.priority_mark.full;
+ if (dfixed_trunc(priority_mark02) < 0)
+ priority_mark02.full = 0;
+ if (wm0.priority_mark_max.full > priority_mark02.full)
+ priority_mark02.full = wm0.priority_mark_max.full;
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ if (rdev->disp_priority == 2)
+ d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+ } else if (mode1) {
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
+ else
+ a.full = wm1.num_line_pair.full;
+ fill_rate.full = dfixed_div(wm1.sclk, a);
+ if (wm1.consumption_rate.full > fill_rate.full) {
+ b.full = wm1.consumption_rate.full - fill_rate.full;
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm1.worst_case_latency,
+ wm1.consumption_rate);
+ priority_mark12.full = a.full + b.full;
+ } else {
+ a.full = dfixed_mul(wm1.worst_case_latency,
+ wm1.consumption_rate);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
+ }
+ if (wm1.priority_mark.full > priority_mark12.full)
+ priority_mark12.full = wm1.priority_mark.full;
+ if (dfixed_trunc(priority_mark12) < 0)
+ priority_mark12.full = 0;
+ if (wm1.priority_mark_max.full > priority_mark12.full)
+ priority_mark12.full = wm1.priority_mark_max.full;
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+ if (rdev->disp_priority == 2)
+ d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+ }
+
+ WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+ WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+ WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+ WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+}
+
+void rv515_bandwidth_update(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ struct drm_display_mode *mode0 = NULL;
+ struct drm_display_mode *mode1 = NULL;
+
+ radeon_update_display_priority(rdev);
+
+ if (rdev->mode_info.crtcs[0]->base.enabled)
+ mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+ if (rdev->mode_info.crtcs[1]->base.enabled)
+ mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+ /*
+ * Set display0/1 priority up in the memory controller for
+ * modes if the user specifies HIGH for displaypriority
+ * option.
+ */
+ if ((rdev->disp_priority == 2) &&
+ (rdev->family == CHIP_RV515)) {
+ tmp = RREG32_MC(MC_MISC_LAT_TIMER);
+ tmp &= ~MC_DISP1R_INIT_LAT_MASK;
+ tmp &= ~MC_DISP0R_INIT_LAT_MASK;
+ if (mode1)
+ tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
+ if (mode0)
+ tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
+ WREG32_MC(MC_MISC_LAT_TIMER, tmp);
+ }
+ rv515_bandwidth_avivo_update(rdev);
+}
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
new file mode 100644
index 0000000..6927a20
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -0,0 +1,638 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RV515D_H__
+#define __RV515D_H__
+
+/*
+ * RV515 registers
+ */
+#define PCIE_INDEX 0x0030
+#define PCIE_DATA 0x0034
+#define MC_IND_INDEX 0x0070
+#define MC_IND_WR_EN (1 << 24)
+#define MC_IND_DATA 0x0074
+#define RBBM_SOFT_RESET 0x00F0
+#define CONFIG_MEMSIZE 0x00F8
+#define HDP_FB_LOCATION 0x0134
+#define CP_CSQ_CNTL 0x0740
+#define CP_CSQ_MODE 0x0744
+#define CP_CSQ_ADDR 0x07F0
+#define CP_CSQ_DATA 0x07F4
+#define CP_CSQ_STAT 0x07F8
+#define CP_CSQ2_STAT 0x07FC
+#define RBBM_STATUS 0x0E40
+#define DST_PIPE_CONFIG 0x170C
+#define WAIT_UNTIL 0x1720
+#define WAIT_2D_IDLE (1 << 14)
+#define WAIT_3D_IDLE (1 << 15)
+#define WAIT_2D_IDLECLEAN (1 << 16)
+#define WAIT_3D_IDLECLEAN (1 << 17)
+#define ISYNC_CNTL 0x1724
+#define ISYNC_ANY2D_IDLE3D (1 << 0)
+#define ISYNC_ANY3D_IDLE2D (1 << 1)
+#define ISYNC_TRIG2D_IDLE3D (1 << 2)
+#define ISYNC_TRIG3D_IDLE2D (1 << 3)
+#define ISYNC_WAIT_IDLEGUI (1 << 4)
+#define ISYNC_CPSCRATCH_IDLEGUI (1 << 5)
+#define VAP_INDEX_OFFSET 0x208C
+#define VAP_PVS_STATE_FLUSH_REG 0x2284
+#define GB_ENABLE 0x4008
+#define GB_MSPOS0 0x4010
+#define MS_X0_SHIFT 0
+#define MS_Y0_SHIFT 4
+#define MS_X1_SHIFT 8
+#define MS_Y1_SHIFT 12
+#define MS_X2_SHIFT 16
+#define MS_Y2_SHIFT 20
+#define MSBD0_Y_SHIFT 24
+#define MSBD0_X_SHIFT 28
+#define GB_MSPOS1 0x4014
+#define MS_X3_SHIFT 0
+#define MS_Y3_SHIFT 4
+#define MS_X4_SHIFT 8
+#define MS_Y4_SHIFT 12
+#define MS_X5_SHIFT 16
+#define MS_Y5_SHIFT 20
+#define MSBD1_SHIFT 24
+#define GB_TILE_CONFIG 0x4018
+#define ENABLE_TILING (1 << 0)
+#define PIPE_COUNT_MASK 0x0000000E
+#define PIPE_COUNT_SHIFT 1
+#define TILE_SIZE_8 (0 << 4)
+#define TILE_SIZE_16 (1 << 4)
+#define TILE_SIZE_32 (2 << 4)
+#define SUBPIXEL_1_12 (0 << 16)
+#define SUBPIXEL_1_16 (1 << 16)
+#define GB_SELECT 0x401C
+#define GB_AA_CONFIG 0x4020
+#define GB_PIPE_SELECT 0x402C
+#define GA_ENHANCE 0x4274
+#define GA_DEADLOCK_CNTL (1 << 0)
+#define GA_FASTSYNC_CNTL (1 << 1)
+#define GA_POLY_MODE 0x4288
+#define FRONT_PTYPE_POINT (0 << 4)
+#define FRONT_PTYPE_LINE (1 << 4)
+#define FRONT_PTYPE_TRIANGE (2 << 4)
+#define BACK_PTYPE_POINT (0 << 7)
+#define BACK_PTYPE_LINE (1 << 7)
+#define BACK_PTYPE_TRIANGE (2 << 7)
+#define GA_ROUND_MODE 0x428C
+#define GEOMETRY_ROUND_TRUNC (0 << 0)
+#define GEOMETRY_ROUND_NEAREST (1 << 0)
+#define COLOR_ROUND_TRUNC (0 << 2)
+#define COLOR_ROUND_NEAREST (1 << 2)
+#define SU_REG_DEST 0x42C8
+#define RB3D_DSTCACHE_CTLSTAT 0x4E4C
+#define RB3D_DC_FLUSH (2 << 0)
+#define RB3D_DC_FREE (2 << 2)
+#define RB3D_DC_FINISH (1 << 4)
+#define ZB_ZCACHE_CTLSTAT 0x4F18
+#define ZC_FLUSH (1 << 0)
+#define ZC_FREE (1 << 1)
+#define DC_LB_MEMORY_SPLIT 0x6520
+#define DC_LB_MEMORY_SPLIT_MASK 0x00000003
+#define DC_LB_MEMORY_SPLIT_SHIFT 0
+#define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
+#define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
+#define DC_LB_MEMORY_SPLIT_D1_ONLY 2
+#define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
+#define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
+#define DC_LB_DISP1_END_ADR_SHIFT 4
+#define DC_LB_DISP1_END_ADR_MASK 0x00007FF0
+#define D1MODE_PRIORITY_A_CNT 0x6548
+#define MODE_PRIORITY_MARK_MASK 0x00007FFF
+#define MODE_PRIORITY_OFF (1 << 16)
+#define MODE_PRIORITY_ALWAYS_ON (1 << 20)
+#define MODE_PRIORITY_FORCE_MASK (1 << 24)
+#define D1MODE_PRIORITY_B_CNT 0x654C
+#define LB_MAX_REQ_OUTSTANDING 0x6D58
+#define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F
+#define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0
+#define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000
+#define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16
+#define D2MODE_PRIORITY_A_CNT 0x6D48
+#define D2MODE_PRIORITY_B_CNT 0x6D4C
+
+/* ix[MC] registers */
+#define MC_FB_LOCATION 0x01
+#define MC_FB_START_MASK 0x0000FFFF
+#define MC_FB_START_SHIFT 0
+#define MC_FB_TOP_MASK 0xFFFF0000
+#define MC_FB_TOP_SHIFT 16
+#define MC_AGP_LOCATION 0x02
+#define MC_AGP_START_MASK 0x0000FFFF
+#define MC_AGP_START_SHIFT 0
+#define MC_AGP_TOP_MASK 0xFFFF0000
+#define MC_AGP_TOP_SHIFT 16
+#define MC_AGP_BASE 0x03
+#define MC_AGP_BASE_2 0x04
+#define MC_CNTL 0x5
+#define MEM_NUM_CHANNELS_MASK 0x00000003
+#define MC_STATUS 0x08
+#define MC_STATUS_IDLE (1 << 4)
+#define MC_MISC_LAT_TIMER 0x09
+#define MC_CPR_INIT_LAT_MASK 0x0000000F
+#define MC_VF_INIT_LAT_MASK 0x000000F0
+#define MC_DISP0R_INIT_LAT_MASK 0x00000F00
+#define MC_DISP0R_INIT_LAT_SHIFT 8
+#define MC_DISP1R_INIT_LAT_MASK 0x0000F000
+#define MC_DISP1R_INIT_LAT_SHIFT 12
+#define MC_FIXED_INIT_LAT_MASK 0x000F0000
+#define MC_E2R_INIT_LAT_MASK 0x00F00000
+#define SAME_PAGE_PRIO_MASK 0x0F000000
+#define MC_GLOBW_INIT_LAT_MASK 0xF0000000
+
+
+/*
+ * PM4 packet
+ */
+#define CP_PACKET0 0x00000000
+#define PACKET0_BASE_INDEX_SHIFT 0
+#define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
+#define PACKET0_COUNT_SHIFT 16
+#define PACKET0_COUNT_MASK (0x3fff << 16)
+#define CP_PACKET1 0x40000000
+#define CP_PACKET2 0x80000000
+#define PACKET2_PAD_SHIFT 0
+#define PACKET2_PAD_MASK (0x3fffffff << 0)
+#define CP_PACKET3 0xC0000000
+#define PACKET3_IT_OPCODE_SHIFT 8
+#define PACKET3_IT_OPCODE_MASK (0xff << 8)
+#define PACKET3_COUNT_SHIFT 16
+#define PACKET3_COUNT_MASK (0x3fff << 16)
+/* PACKET3 op code */
+#define PACKET3_NOP 0x10
+#define PACKET3_3D_DRAW_VBUF 0x28
+#define PACKET3_3D_DRAW_IMMD 0x29
+#define PACKET3_3D_DRAW_INDX 0x2A
+#define PACKET3_3D_LOAD_VBPNTR 0x2F
+#define PACKET3_INDX_BUFFER 0x33
+#define PACKET3_3D_DRAW_VBUF_2 0x34
+#define PACKET3_3D_DRAW_IMMD_2 0x35
+#define PACKET3_3D_DRAW_INDX_2 0x36
+#define PACKET3_BITBLT_MULTI 0x9B
+
+#define PACKET0(reg, n) (CP_PACKET0 | \
+ REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) | \
+ REG_SET(PACKET0_COUNT, (n)))
+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define PACKET3(op, n) (CP_PACKET3 | \
+ REG_SET(PACKET3_IT_OPCODE, (op)) | \
+ REG_SET(PACKET3_COUNT, (n)))
+
+/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
+#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
+#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_0000F8_CONFIG_MEMSIZE 0x00000000
+#define R_000134_HDP_FB_LOCATION 0x000134
+#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000134_HDP_FB_START 0xFFFF0000
+#define R_000300_VGA_RENDER_CONTROL 0x000300
+#define S_000300_VGA_BLINK_RATE(x) (((x) & 0x1F) << 0)
+#define G_000300_VGA_BLINK_RATE(x) (((x) >> 0) & 0x1F)
+#define C_000300_VGA_BLINK_RATE 0xFFFFFFE0
+#define S_000300_VGA_BLINK_MODE(x) (((x) & 0x3) << 5)
+#define G_000300_VGA_BLINK_MODE(x) (((x) >> 5) & 0x3)
+#define C_000300_VGA_BLINK_MODE 0xFFFFFF9F
+#define S_000300_VGA_CURSOR_BLINK_INVERT(x) (((x) & 0x1) << 7)
+#define G_000300_VGA_CURSOR_BLINK_INVERT(x) (((x) >> 7) & 0x1)
+#define C_000300_VGA_CURSOR_BLINK_INVERT 0xFFFFFF7F
+#define S_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x) (((x) & 0x1) << 8)
+#define G_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x) (((x) >> 8) & 0x1)
+#define C_000300_VGA_EXTD_ADDR_COUNT_ENABLE 0xFFFFFEFF
+#define S_000300_VGA_VSTATUS_CNTL(x) (((x) & 0x3) << 16)
+#define G_000300_VGA_VSTATUS_CNTL(x) (((x) >> 16) & 0x3)
+#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
+#define S_000300_VGA_LOCK_8DOT(x) (((x) & 0x1) << 24)
+#define G_000300_VGA_LOCK_8DOT(x) (((x) >> 24) & 0x1)
+#define C_000300_VGA_LOCK_8DOT 0xFEFFFFFF
+#define S_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) & 0x1) << 25)
+#define G_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) >> 25) & 0x1)
+#define C_000300_VGAREG_LINECMP_COMPATIBILITY_SEL 0xFDFFFFFF
+#define R_000310_VGA_MEMORY_BASE_ADDRESS 0x000310
+#define S_000310_VGA_MEMORY_BASE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_000310_VGA_MEMORY_BASE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_000310_VGA_MEMORY_BASE_ADDRESS 0x00000000
+#define R_000328_VGA_HDP_CONTROL 0x000328
+#define S_000328_VGA_MEM_PAGE_SELECT_EN(x) (((x) & 0x1) << 0)
+#define G_000328_VGA_MEM_PAGE_SELECT_EN(x) (((x) >> 0) & 0x1)
+#define C_000328_VGA_MEM_PAGE_SELECT_EN 0xFFFFFFFE
+#define S_000328_VGA_RBBM_LOCK_DISABLE(x) (((x) & 0x1) << 8)
+#define G_000328_VGA_RBBM_LOCK_DISABLE(x) (((x) >> 8) & 0x1)
+#define C_000328_VGA_RBBM_LOCK_DISABLE 0xFFFFFEFF
+#define S_000328_VGA_SOFT_RESET(x) (((x) & 0x1) << 16)
+#define G_000328_VGA_SOFT_RESET(x) (((x) >> 16) & 0x1)
+#define C_000328_VGA_SOFT_RESET 0xFFFEFFFF
+#define S_000328_VGA_TEST_RESET_CONTROL(x) (((x) & 0x1) << 24)
+#define G_000328_VGA_TEST_RESET_CONTROL(x) (((x) >> 24) & 0x1)
+#define C_000328_VGA_TEST_RESET_CONTROL 0xFEFFFFFF
+#define R_000330_D1VGA_CONTROL 0x000330
+#define S_000330_D1VGA_MODE_ENABLE(x) (((x) & 0x1) << 0)
+#define G_000330_D1VGA_MODE_ENABLE(x) (((x) >> 0) & 0x1)
+#define C_000330_D1VGA_MODE_ENABLE 0xFFFFFFFE
+#define S_000330_D1VGA_TIMING_SELECT(x) (((x) & 0x1) << 8)
+#define G_000330_D1VGA_TIMING_SELECT(x) (((x) >> 8) & 0x1)
+#define C_000330_D1VGA_TIMING_SELECT 0xFFFFFEFF
+#define S_000330_D1VGA_SYNC_POLARITY_SELECT(x) (((x) & 0x1) << 9)
+#define G_000330_D1VGA_SYNC_POLARITY_SELECT(x) (((x) >> 9) & 0x1)
+#define C_000330_D1VGA_SYNC_POLARITY_SELECT 0xFFFFFDFF
+#define S_000330_D1VGA_OVERSCAN_TIMING_SELECT(x) (((x) & 0x1) << 10)
+#define G_000330_D1VGA_OVERSCAN_TIMING_SELECT(x) (((x) >> 10) & 0x1)
+#define C_000330_D1VGA_OVERSCAN_TIMING_SELECT 0xFFFFFBFF
+#define S_000330_D1VGA_OVERSCAN_COLOR_EN(x) (((x) & 0x1) << 16)
+#define G_000330_D1VGA_OVERSCAN_COLOR_EN(x) (((x) >> 16) & 0x1)
+#define C_000330_D1VGA_OVERSCAN_COLOR_EN 0xFFFEFFFF
+#define S_000330_D1VGA_ROTATE(x) (((x) & 0x3) << 24)
+#define G_000330_D1VGA_ROTATE(x) (((x) >> 24) & 0x3)
+#define C_000330_D1VGA_ROTATE 0xFCFFFFFF
+#define R_000338_D2VGA_CONTROL 0x000338
+#define S_000338_D2VGA_MODE_ENABLE(x) (((x) & 0x1) << 0)
+#define G_000338_D2VGA_MODE_ENABLE(x) (((x) >> 0) & 0x1)
+#define C_000338_D2VGA_MODE_ENABLE 0xFFFFFFFE
+#define S_000338_D2VGA_TIMING_SELECT(x) (((x) & 0x1) << 8)
+#define G_000338_D2VGA_TIMING_SELECT(x) (((x) >> 8) & 0x1)
+#define C_000338_D2VGA_TIMING_SELECT 0xFFFFFEFF
+#define S_000338_D2VGA_SYNC_POLARITY_SELECT(x) (((x) & 0x1) << 9)
+#define G_000338_D2VGA_SYNC_POLARITY_SELECT(x) (((x) >> 9) & 0x1)
+#define C_000338_D2VGA_SYNC_POLARITY_SELECT 0xFFFFFDFF
+#define S_000338_D2VGA_OVERSCAN_TIMING_SELECT(x) (((x) & 0x1) << 10)
+#define G_000338_D2VGA_OVERSCAN_TIMING_SELECT(x) (((x) >> 10) & 0x1)
+#define C_000338_D2VGA_OVERSCAN_TIMING_SELECT 0xFFFFFBFF
+#define S_000338_D2VGA_OVERSCAN_COLOR_EN(x) (((x) & 0x1) << 16)
+#define G_000338_D2VGA_OVERSCAN_COLOR_EN(x) (((x) >> 16) & 0x1)
+#define C_000338_D2VGA_OVERSCAN_COLOR_EN 0xFFFEFFFF
+#define S_000338_D2VGA_ROTATE(x) (((x) & 0x3) << 24)
+#define G_000338_D2VGA_ROTATE(x) (((x) >> 24) & 0x3)
+#define C_000338_D2VGA_ROTATE 0xFCFFFFFF
+#define R_0007C0_CP_STAT 0x0007C0
+#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
+#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
+#define C_0007C0_MRU_BUSY 0xFFFFFFFE
+#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
+#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
+#define C_0007C0_MWU_BUSY 0xFFFFFFFD
+#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
+#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
+#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
+#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
+#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
+#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
+#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
+#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
+#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
+#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
+#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
+#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
+#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
+#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
+#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
+#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
+#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
+#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
+#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
+#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
+#define C_0007C0_CSI_BUSY 0xFFFFDFFF
+#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
+#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
+#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
+#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
+#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
+#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
+#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
+#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
+#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
+#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
+#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
+#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
+#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
+#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
+#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
+#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
+#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
+#define C_0007C0_CP_BUSY 0x7FFFFFFF
+#define R_000E40_RBBM_STATUS 0x000E40
+#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
+#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
+#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
+#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
+#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
+#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
+#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
+#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
+#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
+#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
+#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
+#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
+#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
+#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
+#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
+#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
+#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
+#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
+#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
+#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
+#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
+#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
+#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
+#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
+#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
+#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
+#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
+#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
+#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
+#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
+#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
+#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
+#define C_000E40_E2_BUSY 0xFFFDFFFF
+#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
+#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
+#define C_000E40_RB2D_BUSY 0xFFFBFFFF
+#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
+#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
+#define C_000E40_RB3D_BUSY 0xFFF7FFFF
+#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
+#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
+#define C_000E40_VAP_BUSY 0xFFEFFFFF
+#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
+#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
+#define C_000E40_RE_BUSY 0xFFDFFFFF
+#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
+#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
+#define C_000E40_TAM_BUSY 0xFFBFFFFF
+#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
+#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
+#define C_000E40_TDM_BUSY 0xFF7FFFFF
+#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
+#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
+#define C_000E40_PB_BUSY 0xFEFFFFFF
+#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
+#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
+#define C_000E40_TIM_BUSY 0xFDFFFFFF
+#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
+#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
+#define C_000E40_GA_BUSY 0xFBFFFFFF
+#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
+#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
+#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
+#define S_000E40_RBBM_HIBUSY(x) (((x) & 0x1) << 28)
+#define G_000E40_RBBM_HIBUSY(x) (((x) >> 28) & 0x1)
+#define C_000E40_RBBM_HIBUSY 0xEFFFFFFF
+#define S_000E40_SKID_CFBUSY(x) (((x) & 0x1) << 29)
+#define G_000E40_SKID_CFBUSY(x) (((x) >> 29) & 0x1)
+#define C_000E40_SKID_CFBUSY 0xDFFFFFFF
+#define S_000E40_VAP_VF_BUSY(x) (((x) & 0x1) << 30)
+#define G_000E40_VAP_VF_BUSY(x) (((x) >> 30) & 0x1)
+#define C_000E40_VAP_VF_BUSY 0xBFFFFFFF
+#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
+#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
+#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
+#define R_006080_D1CRTC_CONTROL 0x006080
+#define S_006080_D1CRTC_MASTER_EN(x) (((x) & 0x1) << 0)
+#define G_006080_D1CRTC_MASTER_EN(x) (((x) >> 0) & 0x1)
+#define C_006080_D1CRTC_MASTER_EN 0xFFFFFFFE
+#define S_006080_D1CRTC_SYNC_RESET_SEL(x) (((x) & 0x1) << 4)
+#define G_006080_D1CRTC_SYNC_RESET_SEL(x) (((x) >> 4) & 0x1)
+#define C_006080_D1CRTC_SYNC_RESET_SEL 0xFFFFFFEF
+#define S_006080_D1CRTC_DISABLE_POINT_CNTL(x) (((x) & 0x3) << 8)
+#define G_006080_D1CRTC_DISABLE_POINT_CNTL(x) (((x) >> 8) & 0x3)
+#define C_006080_D1CRTC_DISABLE_POINT_CNTL 0xFFFFFCFF
+#define S_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x) (((x) & 0x1) << 16)
+#define G_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x) (((x) >> 16) & 0x1)
+#define C_006080_D1CRTC_CURRENT_MASTER_EN_STATE 0xFFFEFFFF
+#define S_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24)
+#define G_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1)
+#define C_006080_D1CRTC_DISP_READ_REQUEST_DISABLE 0xFEFFFFFF
+#define R_0060E8_D1CRTC_UPDATE_LOCK 0x0060E8
+#define S_0060E8_D1CRTC_UPDATE_LOCK(x) (((x) & 0x1) << 0)
+#define G_0060E8_D1CRTC_UPDATE_LOCK(x) (((x) >> 0) & 0x1)
+#define C_0060E8_D1CRTC_UPDATE_LOCK 0xFFFFFFFE
+#define R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x006110
+#define S_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x00000000
+#define R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x006118
+#define S_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x00000000
+#define R_006880_D2CRTC_CONTROL 0x006880
+#define S_006880_D2CRTC_MASTER_EN(x) (((x) & 0x1) << 0)
+#define G_006880_D2CRTC_MASTER_EN(x) (((x) >> 0) & 0x1)
+#define C_006880_D2CRTC_MASTER_EN 0xFFFFFFFE
+#define S_006880_D2CRTC_SYNC_RESET_SEL(x) (((x) & 0x1) << 4)
+#define G_006880_D2CRTC_SYNC_RESET_SEL(x) (((x) >> 4) & 0x1)
+#define C_006880_D2CRTC_SYNC_RESET_SEL 0xFFFFFFEF
+#define S_006880_D2CRTC_DISABLE_POINT_CNTL(x) (((x) & 0x3) << 8)
+#define G_006880_D2CRTC_DISABLE_POINT_CNTL(x) (((x) >> 8) & 0x3)
+#define C_006880_D2CRTC_DISABLE_POINT_CNTL 0xFFFFFCFF
+#define S_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x) (((x) & 0x1) << 16)
+#define G_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x) (((x) >> 16) & 0x1)
+#define C_006880_D2CRTC_CURRENT_MASTER_EN_STATE 0xFFFEFFFF
+#define S_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24)
+#define G_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1)
+#define C_006880_D2CRTC_DISP_READ_REQUEST_DISABLE 0xFEFFFFFF
+#define R_0068E8_D2CRTC_UPDATE_LOCK 0x0068E8
+#define S_0068E8_D2CRTC_UPDATE_LOCK(x) (((x) & 0x1) << 0)
+#define G_0068E8_D2CRTC_UPDATE_LOCK(x) (((x) >> 0) & 0x1)
+#define C_0068E8_D2CRTC_UPDATE_LOCK 0xFFFFFFFE
+#define R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS 0x006910
+#define S_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS 0x00000000
+#define R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS 0x006918
+#define S_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS 0x00000000
+
+
+#define R_000001_MC_FB_LOCATION 0x000001
+#define S_000001_MC_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_000001_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000001_MC_FB_START 0xFFFF0000
+#define S_000001_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_000001_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_000001_MC_FB_TOP 0x0000FFFF
+#define R_000002_MC_AGP_LOCATION 0x000002
+#define S_000002_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
+#define G_000002_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000002_MC_AGP_START 0xFFFF0000
+#define S_000002_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_000002_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_000002_MC_AGP_TOP 0x0000FFFF
+#define R_000003_MC_AGP_BASE 0x000003
+#define S_000003_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_000003_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_000003_AGP_BASE_ADDR 0x00000000
+#define R_000004_MC_AGP_BASE_2 0x000004
+#define S_000004_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
+#define G_000004_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
+#define C_000004_AGP_BASE_ADDR_2 0xFFFFFFF0
+
+
+#define R_00000F_CP_DYN_CNTL 0x00000F
+#define S_00000F_CP_FORCEON(x) (((x) & 0x1) << 0)
+#define G_00000F_CP_FORCEON(x) (((x) >> 0) & 0x1)
+#define C_00000F_CP_FORCEON 0xFFFFFFFE
+#define S_00000F_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 1)
+#define G_00000F_CP_MAX_DYN_STOP_LAT(x) (((x) >> 1) & 0x1)
+#define C_00000F_CP_MAX_DYN_STOP_LAT 0xFFFFFFFD
+#define S_00000F_CP_CLOCK_STATUS(x) (((x) & 0x1) << 2)
+#define G_00000F_CP_CLOCK_STATUS(x) (((x) >> 2) & 0x1)
+#define C_00000F_CP_CLOCK_STATUS 0xFFFFFFFB
+#define S_00000F_CP_PROG_SHUTOFF(x) (((x) & 0x1) << 3)
+#define G_00000F_CP_PROG_SHUTOFF(x) (((x) >> 3) & 0x1)
+#define C_00000F_CP_PROG_SHUTOFF 0xFFFFFFF7
+#define S_00000F_CP_PROG_DELAY_VALUE(x) (((x) & 0xFF) << 4)
+#define G_00000F_CP_PROG_DELAY_VALUE(x) (((x) >> 4) & 0xFF)
+#define C_00000F_CP_PROG_DELAY_VALUE 0xFFFFF00F
+#define S_00000F_CP_LOWER_POWER_IDLE(x) (((x) & 0xFF) << 12)
+#define G_00000F_CP_LOWER_POWER_IDLE(x) (((x) >> 12) & 0xFF)
+#define C_00000F_CP_LOWER_POWER_IDLE 0xFFF00FFF
+#define S_00000F_CP_LOWER_POWER_IGNORE(x) (((x) & 0x1) << 20)
+#define G_00000F_CP_LOWER_POWER_IGNORE(x) (((x) >> 20) & 0x1)
+#define C_00000F_CP_LOWER_POWER_IGNORE 0xFFEFFFFF
+#define S_00000F_CP_NORMAL_POWER_IGNORE(x) (((x) & 0x1) << 21)
+#define G_00000F_CP_NORMAL_POWER_IGNORE(x) (((x) >> 21) & 0x1)
+#define C_00000F_CP_NORMAL_POWER_IGNORE 0xFFDFFFFF
+#define S_00000F_SPARE(x) (((x) & 0x3) << 22)
+#define G_00000F_SPARE(x) (((x) >> 22) & 0x3)
+#define C_00000F_SPARE 0xFF3FFFFF
+#define S_00000F_CP_NORMAL_POWER_BUSY(x) (((x) & 0xFF) << 24)
+#define G_00000F_CP_NORMAL_POWER_BUSY(x) (((x) >> 24) & 0xFF)
+#define C_00000F_CP_NORMAL_POWER_BUSY 0x00FFFFFF
+#define R_000011_E2_DYN_CNTL 0x000011
+#define S_000011_E2_FORCEON(x) (((x) & 0x1) << 0)
+#define G_000011_E2_FORCEON(x) (((x) >> 0) & 0x1)
+#define C_000011_E2_FORCEON 0xFFFFFFFE
+#define S_000011_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 1)
+#define G_000011_E2_MAX_DYN_STOP_LAT(x) (((x) >> 1) & 0x1)
+#define C_000011_E2_MAX_DYN_STOP_LAT 0xFFFFFFFD
+#define S_000011_E2_CLOCK_STATUS(x) (((x) & 0x1) << 2)
+#define G_000011_E2_CLOCK_STATUS(x) (((x) >> 2) & 0x1)
+#define C_000011_E2_CLOCK_STATUS 0xFFFFFFFB
+#define S_000011_E2_PROG_SHUTOFF(x) (((x) & 0x1) << 3)
+#define G_000011_E2_PROG_SHUTOFF(x) (((x) >> 3) & 0x1)
+#define C_000011_E2_PROG_SHUTOFF 0xFFFFFFF7
+#define S_000011_E2_PROG_DELAY_VALUE(x) (((x) & 0xFF) << 4)
+#define G_000011_E2_PROG_DELAY_VALUE(x) (((x) >> 4) & 0xFF)
+#define C_000011_E2_PROG_DELAY_VALUE 0xFFFFF00F
+#define S_000011_E2_LOWER_POWER_IDLE(x) (((x) & 0xFF) << 12)
+#define G_000011_E2_LOWER_POWER_IDLE(x) (((x) >> 12) & 0xFF)
+#define C_000011_E2_LOWER_POWER_IDLE 0xFFF00FFF
+#define S_000011_E2_LOWER_POWER_IGNORE(x) (((x) & 0x1) << 20)
+#define G_000011_E2_LOWER_POWER_IGNORE(x) (((x) >> 20) & 0x1)
+#define C_000011_E2_LOWER_POWER_IGNORE 0xFFEFFFFF
+#define S_000011_E2_NORMAL_POWER_IGNORE(x) (((x) & 0x1) << 21)
+#define G_000011_E2_NORMAL_POWER_IGNORE(x) (((x) >> 21) & 0x1)
+#define C_000011_E2_NORMAL_POWER_IGNORE 0xFFDFFFFF
+#define S_000011_SPARE(x) (((x) & 0x3) << 22)
+#define G_000011_SPARE(x) (((x) >> 22) & 0x3)
+#define C_000011_SPARE 0xFF3FFFFF
+#define S_000011_E2_NORMAL_POWER_BUSY(x) (((x) & 0xFF) << 24)
+#define G_000011_E2_NORMAL_POWER_BUSY(x) (((x) >> 24) & 0xFF)
+#define C_000011_E2_NORMAL_POWER_BUSY 0x00FFFFFF
+#define R_000013_IDCT_DYN_CNTL 0x000013
+#define S_000013_IDCT_FORCEON(x) (((x) & 0x1) << 0)
+#define G_000013_IDCT_FORCEON(x) (((x) >> 0) & 0x1)
+#define C_000013_IDCT_FORCEON 0xFFFFFFFE
+#define S_000013_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 1)
+#define G_000013_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 1) & 0x1)
+#define C_000013_IDCT_MAX_DYN_STOP_LAT 0xFFFFFFFD
+#define S_000013_IDCT_CLOCK_STATUS(x) (((x) & 0x1) << 2)
+#define G_000013_IDCT_CLOCK_STATUS(x) (((x) >> 2) & 0x1)
+#define C_000013_IDCT_CLOCK_STATUS 0xFFFFFFFB
+#define S_000013_IDCT_PROG_SHUTOFF(x) (((x) & 0x1) << 3)
+#define G_000013_IDCT_PROG_SHUTOFF(x) (((x) >> 3) & 0x1)
+#define C_000013_IDCT_PROG_SHUTOFF 0xFFFFFFF7
+#define S_000013_IDCT_PROG_DELAY_VALUE(x) (((x) & 0xFF) << 4)
+#define G_000013_IDCT_PROG_DELAY_VALUE(x) (((x) >> 4) & 0xFF)
+#define C_000013_IDCT_PROG_DELAY_VALUE 0xFFFFF00F
+#define S_000013_IDCT_LOWER_POWER_IDLE(x) (((x) & 0xFF) << 12)
+#define G_000013_IDCT_LOWER_POWER_IDLE(x) (((x) >> 12) & 0xFF)
+#define C_000013_IDCT_LOWER_POWER_IDLE 0xFFF00FFF
+#define S_000013_IDCT_LOWER_POWER_IGNORE(x) (((x) & 0x1) << 20)
+#define G_000013_IDCT_LOWER_POWER_IGNORE(x) (((x) >> 20) & 0x1)
+#define C_000013_IDCT_LOWER_POWER_IGNORE 0xFFEFFFFF
+#define S_000013_IDCT_NORMAL_POWER_IGNORE(x) (((x) & 0x1) << 21)
+#define G_000013_IDCT_NORMAL_POWER_IGNORE(x) (((x) >> 21) & 0x1)
+#define C_000013_IDCT_NORMAL_POWER_IGNORE 0xFFDFFFFF
+#define S_000013_SPARE(x) (((x) & 0x3) << 22)
+#define G_000013_SPARE(x) (((x) >> 22) & 0x3)
+#define C_000013_SPARE 0xFF3FFFFF
+#define S_000013_IDCT_NORMAL_POWER_BUSY(x) (((x) & 0xFF) << 24)
+#define G_000013_IDCT_NORMAL_POWER_BUSY(x) (((x) >> 24) & 0xFF)
+#define C_000013_IDCT_NORMAL_POWER_BUSY 0x00FFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
new file mode 100644
index 0000000..f5e92cf
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -0,0 +1,2194 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include <drm/radeon_drm.h>
+#include "rv770d.h"
+#include "atom.h"
+#include "avivod.h"
+
+#define R700_PFP_UCODE_SIZE 848
+#define R700_PM4_UCODE_SIZE 1360
+
+static void rv770_gpu_init(struct radeon_device *rdev);
+void rv770_fini(struct radeon_device *rdev);
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+
+int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+ unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+ int r;
+
+ /* RV740 uses evergreen uvd clk programming */
+ if (rdev->family == CHIP_RV740)
+ return evergreen_set_uvd_clocks(rdev, vclk, dclk);
+
+ /* bypass vclk and dclk with bclk */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ if (!vclk || !dclk) {
+ /* keep the Bypass mode, put PLL to sleep */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ return 0;
+ }
+
+ r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
+ 43663, 0x03FFFFFE, 1, 30, ~0,
+ &fb_div, &vclk_div, &dclk_div);
+ if (r)
+ return r;
+
+ fb_div |= 1;
+ vclk_div -= 1;
+ dclk_div -= 1;
+
+ /* set UPLL_FB_DIV to 0x50000 */
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(0x50000), ~UPLL_FB_DIV_MASK);
+
+ /* deassert UPLL_RESET and UPLL_SLEEP */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~(UPLL_RESET_MASK | UPLL_SLEEP_MASK));
+
+ /* assert BYPASS EN and FB_DIV[0] <- ??? why? */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(1), ~UPLL_FB_DIV(1));
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* assert PLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+ /* set the required FB_DIV, REF_DIV, Post divder values */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REF_DIV(1), ~UPLL_REF_DIV_MASK);
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ UPLL_SW_HILEN(vclk_div >> 1) |
+ UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
+ UPLL_SW_HILEN2(dclk_div >> 1) |
+ UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)),
+ ~UPLL_SW_MASK);
+
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div),
+ ~UPLL_FB_DIV_MASK);
+
+ /* give the PLL some time to settle */
+ mdelay(15);
+
+ /* deassert PLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+ mdelay(15);
+
+ /* deassert BYPASS EN and FB_DIV[0] <- ??? why? */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, 0, ~UPLL_FB_DIV(1));
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* switch VCLK and DCLK selection */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ mdelay(100);
+
+ return 0;
+}
+
+static const u32 r7xx_golden_registers[] =
+{
+ 0x8d00, 0xffffffff, 0x0e0e0074,
+ 0x8d04, 0xffffffff, 0x013a2b34,
+ 0x9508, 0xffffffff, 0x00000002,
+ 0x8b20, 0xffffffff, 0,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x28350, 0xffffffff, 0,
+ 0x9058, 0xffffffff, 0x0fffc40f,
+ 0x240c, 0xffffffff, 0x00000380,
+ 0x733c, 0xffffffff, 0x00000002,
+ 0x2650, 0x00040000, 0,
+ 0x20bc, 0x00040000, 0,
+ 0x7300, 0xffffffff, 0x001000f0
+};
+
+static const u32 r7xx_golden_dyn_gpr_registers[] =
+{
+ 0x8db0, 0xffffffff, 0x98989898,
+ 0x8db4, 0xffffffff, 0x98989898,
+ 0x8db8, 0xffffffff, 0x98989898,
+ 0x8dbc, 0xffffffff, 0x98989898,
+ 0x8dc0, 0xffffffff, 0x98989898,
+ 0x8dc4, 0xffffffff, 0x98989898,
+ 0x8dc8, 0xffffffff, 0x98989898,
+ 0x8dcc, 0xffffffff, 0x98989898,
+ 0x88c4, 0xffffffff, 0x00000082
+};
+
+static const u32 rv770_golden_registers[] =
+{
+ 0x562c, 0xffffffff, 0,
+ 0x3f90, 0xffffffff, 0,
+ 0x9148, 0xffffffff, 0,
+ 0x3f94, 0xffffffff, 0,
+ 0x914c, 0xffffffff, 0,
+ 0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv770ce_golden_registers[] =
+{
+ 0x562c, 0xffffffff, 0,
+ 0x3f90, 0xffffffff, 0x00cc0000,
+ 0x9148, 0xffffffff, 0x00cc0000,
+ 0x3f94, 0xffffffff, 0x00cc0000,
+ 0x914c, 0xffffffff, 0x00cc0000,
+ 0x9b7c, 0xffffffff, 0x00fa0000,
+ 0x3f8c, 0xffffffff, 0x00fa0000,
+ 0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv770_mgcg_init[] =
+{
+ 0x8bcc, 0xffffffff, 0x130300f9,
+ 0x5448, 0xffffffff, 0x100,
+ 0x55e4, 0xffffffff, 0x100,
+ 0x160c, 0xffffffff, 0x100,
+ 0x5644, 0xffffffff, 0x100,
+ 0xc164, 0xffffffff, 0x100,
+ 0x8a18, 0xffffffff, 0x100,
+ 0x897c, 0xffffffff, 0x8000100,
+ 0x8b28, 0xffffffff, 0x3c000100,
+ 0x9144, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10000,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10001,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10002,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10003,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x0,
+ 0x9870, 0xffffffff, 0x100,
+ 0x8d58, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x0,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x1,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x2,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x3,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x4,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x5,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x6,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x7,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x8,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x9,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x8000,
+ 0x9490, 0xffffffff, 0x0,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x1,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x2,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x3,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x4,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x5,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x6,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x7,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x8,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x9,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x8000,
+ 0x9604, 0xffffffff, 0x0,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x1,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x2,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x3,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x4,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x5,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x6,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x7,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x8,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x9,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x80000000,
+ 0x9030, 0xffffffff, 0x100,
+ 0x9034, 0xffffffff, 0x100,
+ 0x9038, 0xffffffff, 0x100,
+ 0x903c, 0xffffffff, 0x100,
+ 0x9040, 0xffffffff, 0x100,
+ 0xa200, 0xffffffff, 0x100,
+ 0xa204, 0xffffffff, 0x100,
+ 0xa208, 0xffffffff, 0x100,
+ 0xa20c, 0xffffffff, 0x100,
+ 0x971c, 0xffffffff, 0x100,
+ 0x915c, 0xffffffff, 0x00020001,
+ 0x9160, 0xffffffff, 0x00040003,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00080007,
+ 0x9174, 0xffffffff, 0x000a0009,
+ 0x9178, 0xffffffff, 0x000c000b,
+ 0x917c, 0xffffffff, 0x000e000d,
+ 0x9180, 0xffffffff, 0x0010000f,
+ 0x918c, 0xffffffff, 0x00120011,
+ 0x9190, 0xffffffff, 0x00140013,
+ 0x9194, 0xffffffff, 0x00020001,
+ 0x9198, 0xffffffff, 0x00040003,
+ 0x919c, 0xffffffff, 0x00060005,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000a0009,
+ 0x91b0, 0xffffffff, 0x000c000b,
+ 0x91b4, 0xffffffff, 0x000e000d,
+ 0x91b8, 0xffffffff, 0x0010000f,
+ 0x91c4, 0xffffffff, 0x00120011,
+ 0x91c8, 0xffffffff, 0x00140013,
+ 0x91cc, 0xffffffff, 0x00020001,
+ 0x91d0, 0xffffffff, 0x00040003,
+ 0x91d4, 0xffffffff, 0x00060005,
+ 0x91e0, 0xffffffff, 0x00080007,
+ 0x91e4, 0xffffffff, 0x000a0009,
+ 0x91e8, 0xffffffff, 0x000c000b,
+ 0x91ec, 0xffffffff, 0x00020001,
+ 0x91f0, 0xffffffff, 0x00040003,
+ 0x91f4, 0xffffffff, 0x00060005,
+ 0x9200, 0xffffffff, 0x00080007,
+ 0x9204, 0xffffffff, 0x000a0009,
+ 0x9208, 0xffffffff, 0x000c000b,
+ 0x920c, 0xffffffff, 0x000e000d,
+ 0x9210, 0xffffffff, 0x0010000f,
+ 0x921c, 0xffffffff, 0x00120011,
+ 0x9220, 0xffffffff, 0x00140013,
+ 0x9224, 0xffffffff, 0x00020001,
+ 0x9228, 0xffffffff, 0x00040003,
+ 0x922c, 0xffffffff, 0x00060005,
+ 0x9238, 0xffffffff, 0x00080007,
+ 0x923c, 0xffffffff, 0x000a0009,
+ 0x9240, 0xffffffff, 0x000c000b,
+ 0x9244, 0xffffffff, 0x000e000d,
+ 0x9248, 0xffffffff, 0x0010000f,
+ 0x9254, 0xffffffff, 0x00120011,
+ 0x9258, 0xffffffff, 0x00140013,
+ 0x925c, 0xffffffff, 0x00020001,
+ 0x9260, 0xffffffff, 0x00040003,
+ 0x9264, 0xffffffff, 0x00060005,
+ 0x9270, 0xffffffff, 0x00080007,
+ 0x9274, 0xffffffff, 0x000a0009,
+ 0x9278, 0xffffffff, 0x000c000b,
+ 0x927c, 0xffffffff, 0x000e000d,
+ 0x9280, 0xffffffff, 0x0010000f,
+ 0x928c, 0xffffffff, 0x00120011,
+ 0x9290, 0xffffffff, 0x00140013,
+ 0x9294, 0xffffffff, 0x00020001,
+ 0x929c, 0xffffffff, 0x00040003,
+ 0x92a0, 0xffffffff, 0x00060005,
+ 0x92a4, 0xffffffff, 0x00080007
+};
+
+static const u32 rv710_golden_registers[] =
+{
+ 0x3f90, 0x00ff0000, 0x00fc0000,
+ 0x9148, 0x00ff0000, 0x00fc0000,
+ 0x3f94, 0x00ff0000, 0x00fc0000,
+ 0x914c, 0x00ff0000, 0x00fc0000,
+ 0xb4c, 0x00000020, 0x00000020,
+ 0xa180, 0xffffffff, 0x00003f3f
+};
+
+static const u32 rv710_mgcg_init[] =
+{
+ 0x8bcc, 0xffffffff, 0x13030040,
+ 0x5448, 0xffffffff, 0x100,
+ 0x55e4, 0xffffffff, 0x100,
+ 0x160c, 0xffffffff, 0x100,
+ 0x5644, 0xffffffff, 0x100,
+ 0xc164, 0xffffffff, 0x100,
+ 0x8a18, 0xffffffff, 0x100,
+ 0x897c, 0xffffffff, 0x8000100,
+ 0x8b28, 0xffffffff, 0x3c000100,
+ 0x9144, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10000,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x0,
+ 0x9870, 0xffffffff, 0x100,
+ 0x8d58, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x0,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x1,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x8000,
+ 0x9490, 0xffffffff, 0x0,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x1,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x8000,
+ 0x9604, 0xffffffff, 0x0,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x1,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x80000000,
+ 0x9030, 0xffffffff, 0x100,
+ 0x9034, 0xffffffff, 0x100,
+ 0x9038, 0xffffffff, 0x100,
+ 0x903c, 0xffffffff, 0x100,
+ 0x9040, 0xffffffff, 0x100,
+ 0xa200, 0xffffffff, 0x100,
+ 0xa204, 0xffffffff, 0x100,
+ 0xa208, 0xffffffff, 0x100,
+ 0xa20c, 0xffffffff, 0x100,
+ 0x971c, 0xffffffff, 0x100,
+ 0x915c, 0xffffffff, 0x00020001,
+ 0x9174, 0xffffffff, 0x00000003,
+ 0x9178, 0xffffffff, 0x00050001,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00000004,
+ 0x9190, 0xffffffff, 0x00070006,
+ 0x9194, 0xffffffff, 0x00050001,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x91a8, 0xffffffff, 0x00000004,
+ 0x91ac, 0xffffffff, 0x00070006,
+ 0x91e8, 0xffffffff, 0x00000001,
+ 0x9294, 0xffffffff, 0x00000001,
+ 0x929c, 0xffffffff, 0x00000002,
+ 0x92a0, 0xffffffff, 0x00040003,
+ 0x9150, 0xffffffff, 0x4d940000
+};
+
+static const u32 rv730_golden_registers[] =
+{
+ 0x3f90, 0x00ff0000, 0x00f00000,
+ 0x9148, 0x00ff0000, 0x00f00000,
+ 0x3f94, 0x00ff0000, 0x00f00000,
+ 0x914c, 0x00ff0000, 0x00f00000,
+ 0x900c, 0xffffffff, 0x003b033f,
+ 0xb4c, 0x00000020, 0x00000020,
+ 0xa180, 0xffffffff, 0x00003f3f
+};
+
+static const u32 rv730_mgcg_init[] =
+{
+ 0x8bcc, 0xffffffff, 0x130300f9,
+ 0x5448, 0xffffffff, 0x100,
+ 0x55e4, 0xffffffff, 0x100,
+ 0x160c, 0xffffffff, 0x100,
+ 0x5644, 0xffffffff, 0x100,
+ 0xc164, 0xffffffff, 0x100,
+ 0x8a18, 0xffffffff, 0x100,
+ 0x897c, 0xffffffff, 0x8000100,
+ 0x8b28, 0xffffffff, 0x3c000100,
+ 0x9144, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10000,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10001,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x0,
+ 0x9870, 0xffffffff, 0x100,
+ 0x8d58, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x0,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x1,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x2,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x3,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x4,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x5,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x6,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x7,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x8000,
+ 0x9490, 0xffffffff, 0x0,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x1,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x2,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x3,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x4,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x5,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x6,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x7,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x8000,
+ 0x9604, 0xffffffff, 0x0,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x1,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x2,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x3,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x4,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x5,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x6,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x7,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x80000000,
+ 0x9030, 0xffffffff, 0x100,
+ 0x9034, 0xffffffff, 0x100,
+ 0x9038, 0xffffffff, 0x100,
+ 0x903c, 0xffffffff, 0x100,
+ 0x9040, 0xffffffff, 0x100,
+ 0xa200, 0xffffffff, 0x100,
+ 0xa204, 0xffffffff, 0x100,
+ 0xa208, 0xffffffff, 0x100,
+ 0xa20c, 0xffffffff, 0x100,
+ 0x971c, 0xffffffff, 0x100,
+ 0x915c, 0xffffffff, 0x00020001,
+ 0x916c, 0xffffffff, 0x00040003,
+ 0x9170, 0xffffffff, 0x00000005,
+ 0x9178, 0xffffffff, 0x00050001,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00000004,
+ 0x9190, 0xffffffff, 0x00070006,
+ 0x9194, 0xffffffff, 0x00050001,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x91a8, 0xffffffff, 0x00000004,
+ 0x91ac, 0xffffffff, 0x00070006,
+ 0x91b0, 0xffffffff, 0x00050001,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91c4, 0xffffffff, 0x00000004,
+ 0x91c8, 0xffffffff, 0x00070006,
+ 0x91cc, 0xffffffff, 0x00050001,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91e0, 0xffffffff, 0x00000004,
+ 0x91e4, 0xffffffff, 0x00070006,
+ 0x91e8, 0xffffffff, 0x00000001,
+ 0x91ec, 0xffffffff, 0x00050001,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x9200, 0xffffffff, 0x00000004,
+ 0x9204, 0xffffffff, 0x00070006,
+ 0x9208, 0xffffffff, 0x00050001,
+ 0x920c, 0xffffffff, 0x00030002,
+ 0x921c, 0xffffffff, 0x00000004,
+ 0x9220, 0xffffffff, 0x00070006,
+ 0x9224, 0xffffffff, 0x00050001,
+ 0x9228, 0xffffffff, 0x00030002,
+ 0x9238, 0xffffffff, 0x00000004,
+ 0x923c, 0xffffffff, 0x00070006,
+ 0x9240, 0xffffffff, 0x00050001,
+ 0x9244, 0xffffffff, 0x00030002,
+ 0x9254, 0xffffffff, 0x00000004,
+ 0x9258, 0xffffffff, 0x00070006,
+ 0x9294, 0xffffffff, 0x00000001,
+ 0x929c, 0xffffffff, 0x00000002,
+ 0x92a0, 0xffffffff, 0x00040003,
+ 0x92a4, 0xffffffff, 0x00000005
+};
+
+static const u32 rv740_golden_registers[] =
+{
+ 0x88c4, 0xffffffff, 0x00000082,
+ 0x28a50, 0xfffffffc, 0x00000004,
+ 0x2650, 0x00040000, 0,
+ 0x20bc, 0x00040000, 0,
+ 0x733c, 0xffffffff, 0x00000002,
+ 0x7300, 0xffffffff, 0x001000f0,
+ 0x3f90, 0x00ff0000, 0,
+ 0x9148, 0x00ff0000, 0,
+ 0x3f94, 0x00ff0000, 0,
+ 0x914c, 0x00ff0000, 0,
+ 0x240c, 0xffffffff, 0x00000380,
+ 0x8a14, 0x00000007, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ff0fff,
+ 0x28a4c, 0xffffffff, 0x00004000,
+ 0xa180, 0xffffffff, 0x00003f3f,
+ 0x8d00, 0xffffffff, 0x0e0e003a,
+ 0x8d04, 0xffffffff, 0x013a0e2a,
+ 0x8c00, 0xffffffff, 0xe400000f,
+ 0x8db0, 0xffffffff, 0x98989898,
+ 0x8db4, 0xffffffff, 0x98989898,
+ 0x8db8, 0xffffffff, 0x98989898,
+ 0x8dbc, 0xffffffff, 0x98989898,
+ 0x8dc0, 0xffffffff, 0x98989898,
+ 0x8dc4, 0xffffffff, 0x98989898,
+ 0x8dc8, 0xffffffff, 0x98989898,
+ 0x8dcc, 0xffffffff, 0x98989898,
+ 0x9058, 0xffffffff, 0x0fffc40f,
+ 0x900c, 0xffffffff, 0x003b033f,
+ 0x28350, 0xffffffff, 0,
+ 0x8cf0, 0x1fffffff, 0x08e00420,
+ 0x9508, 0xffffffff, 0x00000002,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv740_mgcg_init[] =
+{
+ 0x8bcc, 0xffffffff, 0x13030100,
+ 0x5448, 0xffffffff, 0x100,
+ 0x55e4, 0xffffffff, 0x100,
+ 0x160c, 0xffffffff, 0x100,
+ 0x5644, 0xffffffff, 0x100,
+ 0xc164, 0xffffffff, 0x100,
+ 0x8a18, 0xffffffff, 0x100,
+ 0x897c, 0xffffffff, 0x100,
+ 0x8b28, 0xffffffff, 0x100,
+ 0x9144, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10000,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10001,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10002,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10003,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x0,
+ 0x9870, 0xffffffff, 0x100,
+ 0x8d58, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x0,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x1,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x2,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x3,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x4,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x5,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x6,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x7,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x8000,
+ 0x9490, 0xffffffff, 0x0,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x1,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x2,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x3,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x4,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x5,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x6,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x7,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x8000,
+ 0x9604, 0xffffffff, 0x0,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x1,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x2,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x3,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x4,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x5,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x6,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x7,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x80000000,
+ 0x9030, 0xffffffff, 0x100,
+ 0x9034, 0xffffffff, 0x100,
+ 0x9038, 0xffffffff, 0x100,
+ 0x903c, 0xffffffff, 0x100,
+ 0x9040, 0xffffffff, 0x100,
+ 0xa200, 0xffffffff, 0x100,
+ 0xa204, 0xffffffff, 0x100,
+ 0xa208, 0xffffffff, 0x100,
+ 0xa20c, 0xffffffff, 0x100,
+ 0x971c, 0xffffffff, 0x100,
+ 0x915c, 0xffffffff, 0x00020001,
+ 0x9160, 0xffffffff, 0x00040003,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00080007,
+ 0x9174, 0xffffffff, 0x000a0009,
+ 0x9178, 0xffffffff, 0x000c000b,
+ 0x917c, 0xffffffff, 0x000e000d,
+ 0x9180, 0xffffffff, 0x0010000f,
+ 0x918c, 0xffffffff, 0x00120011,
+ 0x9190, 0xffffffff, 0x00140013,
+ 0x9194, 0xffffffff, 0x00020001,
+ 0x9198, 0xffffffff, 0x00040003,
+ 0x919c, 0xffffffff, 0x00060005,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000a0009,
+ 0x91b0, 0xffffffff, 0x000c000b,
+ 0x91b4, 0xffffffff, 0x000e000d,
+ 0x91b8, 0xffffffff, 0x0010000f,
+ 0x91c4, 0xffffffff, 0x00120011,
+ 0x91c8, 0xffffffff, 0x00140013,
+ 0x91cc, 0xffffffff, 0x00020001,
+ 0x91d0, 0xffffffff, 0x00040003,
+ 0x91d4, 0xffffffff, 0x00060005,
+ 0x91e0, 0xffffffff, 0x00080007,
+ 0x91e4, 0xffffffff, 0x000a0009,
+ 0x91e8, 0xffffffff, 0x000c000b,
+ 0x91ec, 0xffffffff, 0x00020001,
+ 0x91f0, 0xffffffff, 0x00040003,
+ 0x91f4, 0xffffffff, 0x00060005,
+ 0x9200, 0xffffffff, 0x00080007,
+ 0x9204, 0xffffffff, 0x000a0009,
+ 0x9208, 0xffffffff, 0x000c000b,
+ 0x920c, 0xffffffff, 0x000e000d,
+ 0x9210, 0xffffffff, 0x0010000f,
+ 0x921c, 0xffffffff, 0x00120011,
+ 0x9220, 0xffffffff, 0x00140013,
+ 0x9224, 0xffffffff, 0x00020001,
+ 0x9228, 0xffffffff, 0x00040003,
+ 0x922c, 0xffffffff, 0x00060005,
+ 0x9238, 0xffffffff, 0x00080007,
+ 0x923c, 0xffffffff, 0x000a0009,
+ 0x9240, 0xffffffff, 0x000c000b,
+ 0x9244, 0xffffffff, 0x000e000d,
+ 0x9248, 0xffffffff, 0x0010000f,
+ 0x9254, 0xffffffff, 0x00120011,
+ 0x9258, 0xffffffff, 0x00140013,
+ 0x9294, 0xffffffff, 0x00020001,
+ 0x929c, 0xffffffff, 0x00040003,
+ 0x92a0, 0xffffffff, 0x00060005,
+ 0x92a4, 0xffffffff, 0x00080007
+};
+
+static void rv770_init_golden_registers(struct radeon_device *rdev)
+{
+ switch (rdev->family) {
+ case CHIP_RV770:
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_registers));
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_dyn_gpr_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+ if (rdev->pdev->device == 0x994e)
+ radeon_program_register_sequence(rdev,
+ rv770ce_golden_registers,
+ (const u32)ARRAY_SIZE(rv770ce_golden_registers));
+ else
+ radeon_program_register_sequence(rdev,
+ rv770_golden_registers,
+ (const u32)ARRAY_SIZE(rv770_golden_registers));
+ radeon_program_register_sequence(rdev,
+ rv770_mgcg_init,
+ (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ break;
+ case CHIP_RV730:
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_registers));
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_dyn_gpr_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+ radeon_program_register_sequence(rdev,
+ rv730_golden_registers,
+ (const u32)ARRAY_SIZE(rv730_golden_registers));
+ radeon_program_register_sequence(rdev,
+ rv730_mgcg_init,
+ (const u32)ARRAY_SIZE(rv730_mgcg_init));
+ break;
+ case CHIP_RV710:
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_registers));
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_dyn_gpr_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+ radeon_program_register_sequence(rdev,
+ rv710_golden_registers,
+ (const u32)ARRAY_SIZE(rv710_golden_registers));
+ radeon_program_register_sequence(rdev,
+ rv710_mgcg_init,
+ (const u32)ARRAY_SIZE(rv710_mgcg_init));
+ break;
+ case CHIP_RV740:
+ radeon_program_register_sequence(rdev,
+ rv740_golden_registers,
+ (const u32)ARRAY_SIZE(rv740_golden_registers));
+ radeon_program_register_sequence(rdev,
+ rv740_mgcg_init,
+ (const u32)ARRAY_SIZE(rv740_mgcg_init));
+ break;
+ default:
+ break;
+ }
+}
+
+#define PCIE_BUS_CLK 10000
+#define TCLK (PCIE_BUS_CLK / 10)
+
+/**
+ * rv770_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (r7xx-cayman).
+ */
+u32 rv770_get_xclk(struct radeon_device *rdev)
+{
+ u32 reference_clock = rdev->clock.spll.reference_freq;
+ u32 tmp = RREG32(CG_CLKPIN_CNTL);
+
+ if (tmp & MUX_TCLK_TO_XCLK)
+ return TCLK;
+
+ if (tmp & XTALIN_DIVIDE)
+ return reference_clock / 4;
+
+ return reference_clock;
+}
+
+int rv770_uvd_resume(struct radeon_device *rdev)
+{
+ uint64_t addr;
+ uint32_t chip_id, size;
+ int r;
+
+ r = radeon_uvd_resume(rdev);
+ if (r)
+ return r;
+
+ /* programm the VCPU memory controller bits 0-27 */
+ addr = rdev->uvd.gpu_addr >> 3;
+ size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE0, size);
+
+ addr += size;
+ size = RADEON_UVD_STACK_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE1, size);
+
+ addr += size;
+ size = RADEON_UVD_HEAP_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE2, size);
+
+ /* bits 28-31 */
+ addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
+ WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+
+ /* bits 32-39 */
+ addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
+ WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+
+ /* tell firmware which hardware it is running on */
+ switch (rdev->family) {
+ default:
+ return -EINVAL;
+ case CHIP_RV710:
+ chip_id = 0x01000005;
+ break;
+ case CHIP_RV730:
+ chip_id = 0x01000006;
+ break;
+ case CHIP_RV740:
+ chip_id = 0x01000007;
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ chip_id = 0x01000008;
+ break;
+ case CHIP_JUNIPER:
+ chip_id = 0x01000009;
+ break;
+ case CHIP_REDWOOD:
+ chip_id = 0x0100000a;
+ break;
+ case CHIP_CEDAR:
+ chip_id = 0x0100000b;
+ break;
+ case CHIP_SUMO:
+ case CHIP_SUMO2:
+ chip_id = 0x0100000c;
+ break;
+ case CHIP_PALM:
+ chip_id = 0x0100000e;
+ break;
+ case CHIP_CAYMAN:
+ chip_id = 0x0100000f;
+ break;
+ case CHIP_BARTS:
+ chip_id = 0x01000010;
+ break;
+ case CHIP_TURKS:
+ chip_id = 0x01000011;
+ break;
+ case CHIP_CAICOS:
+ chip_id = 0x01000012;
+ break;
+ case CHIP_TAHITI:
+ chip_id = 0x01000014;
+ break;
+ case CHIP_VERDE:
+ chip_id = 0x01000015;
+ break;
+ case CHIP_PITCAIRN:
+ chip_id = 0x01000016;
+ break;
+ case CHIP_ARUBA:
+ chip_id = 0x01000017;
+ break;
+ }
+ WREG32(UVD_VCPU_CHIP_ID, chip_id);
+
+ return 0;
+}
+
+u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
+
+ /* Lock the graphics update lock */
+ tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* update the scanout addresses */
+ if (radeon_crtc->crtc_id) {
+ WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+ WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+ } else {
+ WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+ WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+ }
+ WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+ WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ /* Wait for update_pending to go high. */
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* Return current update_pending status: */
+ return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+}
+
+/* get temperature in millidegrees */
+int rv770_get_temp(struct radeon_device *rdev)
+{
+ u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
+ ASIC_T_SHIFT;
+ int actual_temp;
+
+ if (temp & 0x400)
+ actual_temp = -256;
+ else if (temp & 0x200)
+ actual_temp = 255;
+ else if (temp & 0x100) {
+ actual_temp = temp & 0x1ff;
+ actual_temp |= ~0x1ff;
+ } else
+ actual_temp = temp & 0xff;
+
+ return (actual_temp * 1000) / 2;
+}
+
+void rv770_pm_misc(struct radeon_device *rdev)
+{
+ int req_ps_idx = rdev->pm.requested_power_state_index;
+ int req_cm_idx = rdev->pm.requested_clock_mode_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+ struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+ if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+ /* 0xff01 is a flag rather then an actual voltage */
+ if (voltage->voltage == 0xff01)
+ return;
+ if (voltage->voltage != rdev->pm.current_vddc) {
+ radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
+ rdev->pm.current_vddc = voltage->voltage;
+ DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+ }
+ }
+}
+
+/*
+ * GART
+ */
+static int rv770_pcie_gart_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int r, i;
+
+ if (rdev->gart.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
+ return r;
+ radeon_gart_restore(rdev);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ if (rdev->family == CHIP_RV740)
+ WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+ WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(rdev->dummy_page.addr >> 12));
+ for (i = 1; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+ r600_pcie_gart_tlb_flush(rdev);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)rdev->gart.table_addr);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+static void rv770_pcie_gart_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int i;
+
+ /* Disable all tables */
+ for (i = 0; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ radeon_gart_table_vram_unpin(rdev);
+}
+
+static void rv770_pcie_gart_fini(struct radeon_device *rdev)
+{
+ radeon_gart_fini(rdev);
+ rv770_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+}
+
+
+static void rv770_agp_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int i;
+
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ for (i = 0; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+}
+
+static void rv770_mc_program(struct radeon_device *rdev)
+{
+ struct rv515_mc_save save;
+ u32 tmp;
+ int i, j;
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+ /* r7xx hw bug. Read from HDP_DEBUG1 rather
+ * than writing to HDP_REG_COHERENCY_FLUSH_CNTL
+ */
+ tmp = RREG32(HDP_DEBUG1);
+
+ rv515_mc_stop(rdev, &save);
+ if (r600_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ /* Lockout access through VGA aperture*/
+ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+ /* Update configuration */
+ if (rdev->flags & RADEON_IS_AGP) {
+ if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+ /* VRAM before AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.gtt_end >> 12);
+ } else {
+ /* VRAM after AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.gtt_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.vram_end >> 12);
+ }
+ } else {
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.vram_end >> 12);
+ }
+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+ tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+ tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+ WREG32(MC_VM_FB_LOCATION, tmp);
+ WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+ WREG32(HDP_NONSURFACE_INFO, (2 << 7));
+ WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+ if (rdev->flags & RADEON_IS_AGP) {
+ WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
+ WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
+ WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
+ } else {
+ WREG32(MC_VM_AGP_BASE, 0);
+ WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+ WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+ }
+ if (r600_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ rv515_mc_resume(rdev, &save);
+ /* we need to own VRAM, so turn off the VGA renderer here
+ * to stop it overwriting our objects */
+ rv515_vga_render_disable(rdev);
+}
+
+
+/*
+ * CP.
+ */
+void r700_cp_stop(struct radeon_device *rdev)
+{
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
+ WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+}
+
+static int rv770_cp_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ int i;
+
+ if (!rdev->me_fw || !rdev->pfp_fw)
+ return -EINVAL;
+
+ r700_cp_stop(rdev);
+ WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+ BUF_SWAP_32BIT |
+#endif
+ RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+
+ /* Reset cp */
+ WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
+ RREG32(GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(GRBM_SOFT_RESET, 0);
+
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(CP_ME_RAM_RADDR, 0);
+ return 0;
+}
+
+void r700_cp_fini(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ r700_cp_stop(rdev);
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
+}
+
+/*
+ * Core functions
+ */
+static void rv770_gpu_init(struct radeon_device *rdev)
+{
+ int i, j, num_qd_pipes;
+ u32 ta_aux_cntl;
+ u32 sx_debug_1;
+ u32 smx_dc_ctl0;
+ u32 db_debug3;
+ u32 num_gs_verts_per_thread;
+ u32 vgt_gs_per_es;
+ u32 gs_prim_buffer_depth = 0;
+ u32 sq_ms_fifo_sizes;
+ u32 sq_config;
+ u32 sq_thread_resource_mgmt;
+ u32 hdp_host_path_cntl;
+ u32 sq_dyn_gpr_size_simd_ab_0;
+ u32 gb_tiling_config = 0;
+ u32 cc_rb_backend_disable = 0;
+ u32 cc_gc_shader_pipe_config = 0;
+ u32 mc_arb_ramcfg;
+ u32 db_debug4, tmp;
+ u32 inactive_pipes, shader_pipe_config;
+ u32 disabled_rb_mask;
+ unsigned active_number;
+
+ /* setup chip specs */
+ rdev->config.rv770.tiling_group_size = 256;
+ switch (rdev->family) {
+ case CHIP_RV770:
+ rdev->config.rv770.max_pipes = 4;
+ rdev->config.rv770.max_tile_pipes = 8;
+ rdev->config.rv770.max_simds = 10;
+ rdev->config.rv770.max_backends = 4;
+ rdev->config.rv770.max_gprs = 256;
+ rdev->config.rv770.max_threads = 248;
+ rdev->config.rv770.max_stack_entries = 512;
+ rdev->config.rv770.max_hw_contexts = 8;
+ rdev->config.rv770.max_gs_threads = 16 * 2;
+ rdev->config.rv770.sx_max_export_size = 128;
+ rdev->config.rv770.sx_max_export_pos_size = 16;
+ rdev->config.rv770.sx_max_export_smx_size = 112;
+ rdev->config.rv770.sq_num_cf_insts = 2;
+
+ rdev->config.rv770.sx_num_of_sets = 7;
+ rdev->config.rv770.sc_prim_fifo_size = 0xF9;
+ rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
+ break;
+ case CHIP_RV730:
+ rdev->config.rv770.max_pipes = 2;
+ rdev->config.rv770.max_tile_pipes = 4;
+ rdev->config.rv770.max_simds = 8;
+ rdev->config.rv770.max_backends = 2;
+ rdev->config.rv770.max_gprs = 128;
+ rdev->config.rv770.max_threads = 248;
+ rdev->config.rv770.max_stack_entries = 256;
+ rdev->config.rv770.max_hw_contexts = 8;
+ rdev->config.rv770.max_gs_threads = 16 * 2;
+ rdev->config.rv770.sx_max_export_size = 256;
+ rdev->config.rv770.sx_max_export_pos_size = 32;
+ rdev->config.rv770.sx_max_export_smx_size = 224;
+ rdev->config.rv770.sq_num_cf_insts = 2;
+
+ rdev->config.rv770.sx_num_of_sets = 7;
+ rdev->config.rv770.sc_prim_fifo_size = 0xf9;
+ rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
+ if (rdev->config.rv770.sx_max_export_pos_size > 16) {
+ rdev->config.rv770.sx_max_export_pos_size -= 16;
+ rdev->config.rv770.sx_max_export_smx_size += 16;
+ }
+ break;
+ case CHIP_RV710:
+ rdev->config.rv770.max_pipes = 2;
+ rdev->config.rv770.max_tile_pipes = 2;
+ rdev->config.rv770.max_simds = 2;
+ rdev->config.rv770.max_backends = 1;
+ rdev->config.rv770.max_gprs = 256;
+ rdev->config.rv770.max_threads = 192;
+ rdev->config.rv770.max_stack_entries = 256;
+ rdev->config.rv770.max_hw_contexts = 4;
+ rdev->config.rv770.max_gs_threads = 8 * 2;
+ rdev->config.rv770.sx_max_export_size = 128;
+ rdev->config.rv770.sx_max_export_pos_size = 16;
+ rdev->config.rv770.sx_max_export_smx_size = 112;
+ rdev->config.rv770.sq_num_cf_insts = 1;
+
+ rdev->config.rv770.sx_num_of_sets = 7;
+ rdev->config.rv770.sc_prim_fifo_size = 0x40;
+ rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
+ break;
+ case CHIP_RV740:
+ rdev->config.rv770.max_pipes = 4;
+ rdev->config.rv770.max_tile_pipes = 4;
+ rdev->config.rv770.max_simds = 8;
+ rdev->config.rv770.max_backends = 4;
+ rdev->config.rv770.max_gprs = 256;
+ rdev->config.rv770.max_threads = 248;
+ rdev->config.rv770.max_stack_entries = 512;
+ rdev->config.rv770.max_hw_contexts = 8;
+ rdev->config.rv770.max_gs_threads = 16 * 2;
+ rdev->config.rv770.sx_max_export_size = 256;
+ rdev->config.rv770.sx_max_export_pos_size = 32;
+ rdev->config.rv770.sx_max_export_smx_size = 224;
+ rdev->config.rv770.sq_num_cf_insts = 2;
+
+ rdev->config.rv770.sx_num_of_sets = 7;
+ rdev->config.rv770.sc_prim_fifo_size = 0x100;
+ rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
+
+ if (rdev->config.rv770.sx_max_export_pos_size > 16) {
+ rdev->config.rv770.sx_max_export_pos_size -= 16;
+ rdev->config.rv770.sx_max_export_smx_size += 16;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Initialize HDP */
+ j = 0;
+ for (i = 0; i < 32; i++) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ j += 0x18;
+ }
+
+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+ /* setup tiling, simd, pipe config */
+ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+ shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
+ inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
+ for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
+ if (!(inactive_pipes & tmp)) {
+ active_number++;
+ }
+ tmp <<= 1;
+ }
+ if (active_number == 1) {
+ WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1);
+ } else {
+ WREG32(SPI_CONFIG_CNTL, 0);
+ }
+
+ cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
+ if (tmp < rdev->config.rv770.max_backends) {
+ rdev->config.rv770.max_backends = tmp;
+ }
+
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
+ if (tmp < rdev->config.rv770.max_pipes) {
+ rdev->config.rv770.max_pipes = tmp;
+ }
+ tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
+ if (tmp < rdev->config.rv770.max_simds) {
+ rdev->config.rv770.max_simds = tmp;
+ }
+
+ switch (rdev->config.rv770.max_tile_pipes) {
+ case 1:
+ default:
+ gb_tiling_config = PIPE_TILING(0);
+ break;
+ case 2:
+ gb_tiling_config = PIPE_TILING(1);
+ break;
+ case 4:
+ gb_tiling_config = PIPE_TILING(2);
+ break;
+ case 8:
+ gb_tiling_config = PIPE_TILING(3);
+ break;
+ }
+ rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
+
+ disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
+ tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+ tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
+ R7XX_MAX_BACKENDS, disabled_rb_mask);
+ gb_tiling_config |= tmp << 16;
+ rdev->config.rv770.backend_map = tmp;
+
+ if (rdev->family == CHIP_RV770)
+ gb_tiling_config |= BANK_TILING(1);
+ else {
+ if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+ gb_tiling_config |= BANK_TILING(1);
+ else
+ gb_tiling_config |= BANK_TILING(0);
+ }
+ rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
+ gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+ if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
+ gb_tiling_config |= ROW_TILING(3);
+ gb_tiling_config |= SAMPLE_SPLIT(3);
+ } else {
+ gb_tiling_config |=
+ ROW_TILING(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
+ gb_tiling_config |=
+ SAMPLE_SPLIT(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
+ }
+
+ gb_tiling_config |= BANK_SWAPS(1);
+ rdev->config.rv770.tile_config = gb_tiling_config;
+
+ WREG32(GB_TILING_CONFIG, gb_tiling_config);
+ WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
+ if (rdev->family == CHIP_RV730) {
+ WREG32(UVD_UDEC_DB_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(UVD_UDEC_DBW_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(UVD_UDEC_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ }
+
+ WREG32(CGTS_SYS_TCC_DISABLE, 0);
+ WREG32(CGTS_TCC_DISABLE, 0);
+ WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+ WREG32(CGTS_USER_TCC_DISABLE, 0);
+
+
+ num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
+ WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
+ WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
+
+ /* set HW defaults for 3D engine */
+ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+ ROQ_IB2_START(0x2b)));
+
+ WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
+
+ ta_aux_cntl = RREG32(TA_CNTL_AUX);
+ WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO);
+
+ sx_debug_1 = RREG32(SX_DEBUG_1);
+ sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+ WREG32(SX_DEBUG_1, sx_debug_1);
+
+ smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+ smx_dc_ctl0 &= ~CACHE_DEPTH(0x1ff);
+ smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
+ WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+ if (rdev->family != CHIP_RV740)
+ WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
+ GS_FLUSH_CTL(4) |
+ ACK_FLUSH_CTL(3) |
+ SYNC_FLUSH_CTL));
+
+ if (rdev->family != CHIP_RV770)
+ WREG32(SMX_SAR_CTL0, 0x00003f3f);
+
+ db_debug3 = RREG32(DB_DEBUG3);
+ db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
+ switch (rdev->family) {
+ case CHIP_RV770:
+ case CHIP_RV740:
+ db_debug3 |= DB_CLK_OFF_DELAY(0x1f);
+ break;
+ case CHIP_RV710:
+ case CHIP_RV730:
+ default:
+ db_debug3 |= DB_CLK_OFF_DELAY(2);
+ break;
+ }
+ WREG32(DB_DEBUG3, db_debug3);
+
+ if (rdev->family != CHIP_RV770) {
+ db_debug4 = RREG32(DB_DEBUG4);
+ db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
+ WREG32(DB_DEBUG4, db_debug4);
+ }
+
+ WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) |
+ POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
+ SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
+
+ WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) |
+ SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
+ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
+
+ WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
+
+ WREG32(VGT_NUM_INSTANCES, 1);
+
+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+
+ WREG32(CP_PERFMON_CNTL, 0);
+
+ sq_ms_fifo_sizes = (CACHE_FIFO_SIZE(16 * rdev->config.rv770.sq_num_cf_insts) |
+ DONE_FIFO_HIWATER(0xe0) |
+ ALU_UPDATE_FIFO_HIWATER(0x8));
+ switch (rdev->family) {
+ case CHIP_RV770:
+ case CHIP_RV730:
+ case CHIP_RV710:
+ sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
+ break;
+ case CHIP_RV740:
+ default:
+ sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
+ break;
+ }
+ WREG32(SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
+
+ /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
+ * should be adjusted as needed by the 2D/3D drivers. This just sets default values
+ */
+ sq_config = RREG32(SQ_CONFIG);
+ sq_config &= ~(PS_PRIO(3) |
+ VS_PRIO(3) |
+ GS_PRIO(3) |
+ ES_PRIO(3));
+ sq_config |= (DX9_CONSTS |
+ VC_ENABLE |
+ EXPORT_SRC_C |
+ PS_PRIO(0) |
+ VS_PRIO(1) |
+ GS_PRIO(2) |
+ ES_PRIO(3));
+ if (rdev->family == CHIP_RV710)
+ /* no vertex cache */
+ sq_config &= ~VC_ENABLE;
+
+ WREG32(SQ_CONFIG, sq_config);
+
+ WREG32(SQ_GPR_RESOURCE_MGMT_1, (NUM_PS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
+ NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
+ NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2)));
+
+ WREG32(SQ_GPR_RESOURCE_MGMT_2, (NUM_GS_GPRS((rdev->config.rv770.max_gprs * 7)/64) |
+ NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64)));
+
+ sq_thread_resource_mgmt = (NUM_PS_THREADS((rdev->config.rv770.max_threads * 4)/8) |
+ NUM_VS_THREADS((rdev->config.rv770.max_threads * 2)/8) |
+ NUM_ES_THREADS((rdev->config.rv770.max_threads * 1)/8));
+ if (((rdev->config.rv770.max_threads * 1) / 8) > rdev->config.rv770.max_gs_threads)
+ sq_thread_resource_mgmt |= NUM_GS_THREADS(rdev->config.rv770.max_gs_threads);
+ else
+ sq_thread_resource_mgmt |= NUM_GS_THREADS((rdev->config.rv770.max_gs_threads * 1)/8);
+ WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+
+ WREG32(SQ_STACK_RESOURCE_MGMT_1, (NUM_PS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
+ NUM_VS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
+
+ WREG32(SQ_STACK_RESOURCE_MGMT_2, (NUM_GS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
+ NUM_ES_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
+
+ sq_dyn_gpr_size_simd_ab_0 = (SIMDA_RING0((rdev->config.rv770.max_gprs * 38)/64) |
+ SIMDA_RING1((rdev->config.rv770.max_gprs * 38)/64) |
+ SIMDB_RING0((rdev->config.rv770.max_gprs * 38)/64) |
+ SIMDB_RING1((rdev->config.rv770.max_gprs * 38)/64));
+
+ WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0);
+ WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0);
+ WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0);
+ WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0);
+ WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0);
+ WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0);
+ WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0);
+ WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0);
+
+ WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+ FORCE_EOV_MAX_REZ_CNT(255)));
+
+ if (rdev->family == CHIP_RV710)
+ WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(TC_ONLY) |
+ AUTO_INVLD_EN(ES_AND_GS_AUTO)));
+ else
+ WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(VC_AND_TC) |
+ AUTO_INVLD_EN(ES_AND_GS_AUTO)));
+
+ switch (rdev->family) {
+ case CHIP_RV770:
+ case CHIP_RV730:
+ case CHIP_RV740:
+ gs_prim_buffer_depth = 384;
+ break;
+ case CHIP_RV710:
+ gs_prim_buffer_depth = 128;
+ break;
+ default:
+ break;
+ }
+
+ num_gs_verts_per_thread = rdev->config.rv770.max_pipes * 16;
+ vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
+ /* Max value for this is 256 */
+ if (vgt_gs_per_es > 256)
+ vgt_gs_per_es = 256;
+
+ WREG32(VGT_ES_PER_GS, 128);
+ WREG32(VGT_GS_PER_ES, vgt_gs_per_es);
+ WREG32(VGT_GS_PER_VS, 2);
+
+ /* more default values. 2D/3D driver should adjust as needed */
+ WREG32(VGT_GS_VERTEX_REUSE, 16);
+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+ WREG32(VGT_STRMOUT_EN, 0);
+ WREG32(SX_MISC, 0);
+ WREG32(PA_SC_MODE_CNTL, 0);
+ WREG32(PA_SC_EDGERULE, 0xaaaaaaaa);
+ WREG32(PA_SC_AA_CONFIG, 0);
+ WREG32(PA_SC_CLIPRECT_RULE, 0xffff);
+ WREG32(PA_SC_LINE_STIPPLE, 0);
+ WREG32(SPI_INPUT_Z, 0);
+ WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
+ WREG32(CB_COLOR7_FRAG, 0);
+
+ /* clear render buffer base addresses */
+ WREG32(CB_COLOR0_BASE, 0);
+ WREG32(CB_COLOR1_BASE, 0);
+ WREG32(CB_COLOR2_BASE, 0);
+ WREG32(CB_COLOR3_BASE, 0);
+ WREG32(CB_COLOR4_BASE, 0);
+ WREG32(CB_COLOR5_BASE, 0);
+ WREG32(CB_COLOR6_BASE, 0);
+ WREG32(CB_COLOR7_BASE, 0);
+
+ WREG32(TCP_CNTL, 0);
+
+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+ WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
+
+ WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
+ NUM_CLIP_SEQ(3)));
+ WREG32(VC_ENHANCE, 0);
+}
+
+void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+ u64 size_bf, size_af;
+
+ if (mc->mc_vram_size > 0xE0000000) {
+ /* leave room for at least 512M GTT */
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = 0xE0000000;
+ mc->mc_vram_size = 0xE0000000;
+ }
+ if (rdev->flags & RADEON_IS_AGP) {
+ size_bf = mc->gtt_start;
+ size_af = mc->mc_mask - mc->gtt_end;
+ if (size_bf > size_af) {
+ if (mc->mc_vram_size > size_bf) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_bf;
+ mc->mc_vram_size = size_bf;
+ }
+ mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+ } else {
+ if (mc->mc_vram_size > size_af) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_af;
+ mc->mc_vram_size = size_af;
+ }
+ mc->vram_start = mc->gtt_end + 1;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+ } else {
+ radeon_vram_location(rdev, &rdev->mc, 0);
+ rdev->mc.gtt_base_align = 0;
+ radeon_gtt_location(rdev, mc);
+ }
+}
+
+static int rv770_mc_init(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int chansize, numchan;
+
+ /* Get VRAM informations */
+ rdev->mc.vram_is_ddr = true;
+ tmp = RREG32(MC_ARB_RAMCFG);
+ if (tmp & CHANSIZE_OVERRIDE) {
+ chansize = 16;
+ } else if (tmp & CHANSIZE_MASK) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ }
+ rdev->mc.vram_width = numchan * chansize;
+ /* Could aper size report 0 ? */
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+ /* Setup GPU memory space */
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ r700_vram_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
+
+ return 0;
+}
+
+/**
+ * rv770_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int rv770_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFF)
+ cur_size_in_dw = 0xFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
+static int rv770_startup(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ int r;
+
+ /* enable pcie gen2 link */
+ rv770_pcie_gen2_enable(rdev);
+
+ rv770_mc_program(rdev);
+
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
+ if (rdev->flags & RADEON_IS_AGP) {
+ rv770_agp_enable(rdev);
+ } else {
+ r = rv770_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ }
+
+ rv770_gpu_init(rdev);
+ r = r600_blit_init(rdev);
+ if (r) {
+ r600_blit_fini(rdev);
+ rdev->asic->copy.copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
+ r = rv770_uvd_resume(rdev);
+ if (!r) {
+ r = radeon_fence_driver_start_ring(rdev,
+ R600_RING_TYPE_UVD_INDEX);
+ if (r)
+ dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ }
+
+ if (r)
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
+ /* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
+ return r;
+ }
+ r600_irq_set(rdev);
+
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (r)
+ return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ if (r)
+ return r;
+
+ r = rv770_cp_load_microcode(rdev);
+ if (r)
+ return r;
+ r = r600_cp_resume(rdev);
+ if (r)
+ return r;
+
+ r = r600_dma_resume(rdev);
+ if (r)
+ return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ if (ring->ring_size) {
+ r = radeon_ring_init(rdev, ring, ring->ring_size,
+ R600_WB_UVD_RPTR_OFFSET,
+ UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (!r)
+ r = r600_uvd_init(rdev);
+
+ if (r)
+ DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+ }
+
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ return r;
+ }
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: audio init failed\n");
+ return r;
+ }
+
+ return 0;
+}
+
+int rv770_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+ * posting will perform necessary task to bring back GPU into good
+ * shape.
+ */
+ /* post card */
+ atom_asic_init(rdev->mode_info.atom_context);
+
+ /* init golden registers */
+ rv770_init_golden_registers(rdev);
+
+ rdev->accel_working = true;
+ r = rv770_startup(rdev);
+ if (r) {
+ DRM_ERROR("r600 startup failed on resume\n");
+ rdev->accel_working = false;
+ return r;
+ }
+
+ return r;
+
+}
+
+int rv770_suspend(struct radeon_device *rdev)
+{
+ r600_audio_fini(rdev);
+ r600_uvd_stop(rdev);
+ radeon_uvd_suspend(rdev);
+ r700_cp_stop(rdev);
+ r600_dma_stop(rdev);
+ r600_irq_suspend(rdev);
+ radeon_wb_disable(rdev);
+ rv770_pcie_gart_disable(rdev);
+
+ return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int rv770_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Read BIOS */
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ /* Must be an ATOMBIOS */
+ if (!rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
+ return -EINVAL;
+ }
+ r = radeon_atombios_init(rdev);
+ if (r)
+ return r;
+ /* Post card if necessary */
+ if (!radeon_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
+ DRM_INFO("GPU not posted. posting now...\n");
+ atom_asic_init(rdev->mode_info.atom_context);
+ }
+ /* init golden registers */
+ rv770_init_golden_registers(rdev);
+ /* Initialize scratch registers */
+ r600_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r)
+ radeon_agp_disable(rdev);
+ }
+ r = rv770_mc_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
+ r = radeon_uvd_init(rdev);
+ if (!r) {
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
+ 4096);
+ }
+
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
+
+ r = r600_pcie_gart_init(rdev);
+ if (r)
+ return r;
+
+ rdev->accel_working = true;
+ r = rv770_startup(rdev);
+ if (r) {
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ rv770_pcie_gart_fini(rdev);
+ rdev->accel_working = false;
+ }
+
+ return 0;
+}
+
+void rv770_fini(struct radeon_device *rdev)
+{
+ r600_blit_fini(rdev);
+ r700_cp_fini(rdev);
+ r600_dma_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ rv770_pcie_gart_fini(rdev);
+ r600_uvd_stop(rdev);
+ radeon_uvd_fini(rdev);
+ r600_vram_scratch_fini(rdev);
+ radeon_gem_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_agp_fini(rdev);
+ radeon_bo_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
+{
+ u32 link_width_cntl, lanes, speed_cntl, tmp;
+ u16 link_cntl2;
+
+ if (radeon_pcie_gen2 == 0)
+ return;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return;
+
+ if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+ (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
+ return;
+
+ DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
+ /* advertise upconfig capability */
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+ lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+ link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+ LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl |= lanes | LC_RECONFIG_NOW |
+ LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ } else {
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+ (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+ tmp = RREG32(0x541c);
+ WREG32(0x541c, tmp | 0x8);
+ WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+ link_cntl2 = RREG16(0x4088);
+ link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+ link_cntl2 |= 0x2;
+ WREG16(0x4088, link_cntl2);
+ WREG32(MM_CFGREGS_CNTL, 0);
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_GEN2_EN_STRAP;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ } else {
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+ if (1)
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ else
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
new file mode 100644
index 0000000..85b1626
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -0,0 +1,717 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef RV770_H
+#define RV770_H
+
+#define R7XX_MAX_SH_GPRS 256
+#define R7XX_MAX_TEMP_GPRS 16
+#define R7XX_MAX_SH_THREADS 256
+#define R7XX_MAX_SH_STACK_ENTRIES 4096
+#define R7XX_MAX_BACKENDS 8
+#define R7XX_MAX_BACKENDS_MASK 0xff
+#define R7XX_MAX_SIMDS 16
+#define R7XX_MAX_SIMDS_MASK 0xffff
+#define R7XX_MAX_PIPES 8
+#define R7XX_MAX_PIPES_MASK 0xff
+
+/* discrete uvd clocks */
+#define CG_UPLL_FUNC_CNTL 0x718
+# define UPLL_RESET_MASK 0x00000001
+# define UPLL_SLEEP_MASK 0x00000002
+# define UPLL_BYPASS_EN_MASK 0x00000004
+# define UPLL_CTLREQ_MASK 0x00000008
+# define UPLL_REF_DIV(x) ((x) << 16)
+# define UPLL_REF_DIV_MASK 0x003F0000
+# define UPLL_CTLACK_MASK 0x40000000
+# define UPLL_CTLACK2_MASK 0x80000000
+#define CG_UPLL_FUNC_CNTL_2 0x71c
+# define UPLL_SW_HILEN(x) ((x) << 0)
+# define UPLL_SW_LOLEN(x) ((x) << 4)
+# define UPLL_SW_HILEN2(x) ((x) << 8)
+# define UPLL_SW_LOLEN2(x) ((x) << 12)
+# define UPLL_SW_MASK 0x0000FFFF
+# define VCLK_SRC_SEL(x) ((x) << 20)
+# define VCLK_SRC_SEL_MASK 0x01F00000
+# define DCLK_SRC_SEL(x) ((x) << 25)
+# define DCLK_SRC_SEL_MASK 0x3E000000
+#define CG_UPLL_FUNC_CNTL_3 0x720
+# define UPLL_FB_DIV(x) ((x) << 0)
+# define UPLL_FB_DIV_MASK 0x01FFFFFF
+
+/* Registers */
+#define CB_COLOR0_BASE 0x28040
+#define CB_COLOR1_BASE 0x28044
+#define CB_COLOR2_BASE 0x28048
+#define CB_COLOR3_BASE 0x2804C
+#define CB_COLOR4_BASE 0x28050
+#define CB_COLOR5_BASE 0x28054
+#define CB_COLOR6_BASE 0x28058
+#define CB_COLOR7_BASE 0x2805C
+#define CB_COLOR7_FRAG 0x280FC
+
+#define CC_GC_SHADER_PIPE_CONFIG 0x8950
+#define CC_RB_BACKEND_DISABLE 0x98F4
+#define BACKEND_DISABLE(x) ((x) << 16)
+#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
+
+#define CGTS_SYS_TCC_DISABLE 0x3F90
+#define CGTS_TCC_DISABLE 0x9148
+#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
+#define CGTS_USER_TCC_DISABLE 0x914C
+
+#define CONFIG_MEMSIZE 0x5428
+
+#define CP_ME_CNTL 0x86D8
+#define CP_ME_HALT (1<<28)
+#define CP_PFP_HALT (1<<26)
+#define CP_ME_RAM_DATA 0xC160
+#define CP_ME_RAM_RADDR 0xC158
+#define CP_ME_RAM_WADDR 0xC15C
+#define CP_MEQ_THRESHOLDS 0x8764
+#define STQ_SPLIT(x) ((x) << 0)
+#define CP_PERFMON_CNTL 0x87FC
+#define CP_PFP_UCODE_ADDR 0xC150
+#define CP_PFP_UCODE_DATA 0xC154
+#define CP_QUEUE_THRESHOLDS 0x8760
+#define ROQ_IB1_START(x) ((x) << 0)
+#define ROQ_IB2_START(x) ((x) << 8)
+#define CP_RB_CNTL 0xC104
+#define RB_BUFSZ(x) ((x) << 0)
+#define RB_BLKSZ(x) ((x) << 8)
+#define RB_NO_UPDATE (1 << 27)
+#define RB_RPTR_WR_ENA (1 << 31)
+#define BUF_SWAP_32BIT (2 << 16)
+#define CP_RB_RPTR 0x8700
+#define CP_RB_RPTR_ADDR 0xC10C
+#define CP_RB_RPTR_ADDR_HI 0xC110
+#define CP_RB_RPTR_WR 0xC108
+#define CP_RB_WPTR 0xC114
+#define CP_RB_WPTR_ADDR 0xC118
+#define CP_RB_WPTR_ADDR_HI 0xC11C
+#define CP_RB_WPTR_DELAY 0x8704
+#define CP_SEM_WAIT_TIMER 0x85BC
+
+#define DB_DEBUG3 0x98B0
+#define DB_CLK_OFF_DELAY(x) ((x) << 11)
+#define DB_DEBUG4 0x9B8C
+#define DISABLE_TILE_COVERED_FOR_PS_ITER (1 << 6)
+
+#define DCP_TILING_CONFIG 0x6CA0
+#define PIPE_TILING(x) ((x) << 1)
+#define BANK_TILING(x) ((x) << 4)
+#define GROUP_SIZE(x) ((x) << 6)
+#define ROW_TILING(x) ((x) << 8)
+#define BANK_SWAPS(x) ((x) << 11)
+#define SAMPLE_SPLIT(x) ((x) << 14)
+#define BACKEND_MAP(x) ((x) << 16)
+
+#define GB_TILING_CONFIG 0x98F0
+#define PIPE_TILING__SHIFT 1
+#define PIPE_TILING__MASK 0x0000000e
+
+#define DMA_TILING_CONFIG 0x3ec8
+#define DMA_TILING_CONFIG2 0xd0b8
+
+/* RV730 only */
+#define UVD_UDEC_TILING_CONFIG 0xef40
+#define UVD_UDEC_DB_TILING_CONFIG 0xef44
+#define UVD_UDEC_DBW_TILING_CONFIG 0xef48
+
+#define GC_USER_SHADER_PIPE_CONFIG 0x8954
+#define INACTIVE_QD_PIPES(x) ((x) << 8)
+#define INACTIVE_QD_PIPES_MASK 0x0000FF00
+#define INACTIVE_QD_PIPES_SHIFT 8
+#define INACTIVE_SIMDS(x) ((x) << 16)
+#define INACTIVE_SIMDS_MASK 0x00FF0000
+
+#define GRBM_CNTL 0x8000
+#define GRBM_READ_TIMEOUT(x) ((x) << 0)
+#define GRBM_SOFT_RESET 0x8020
+#define SOFT_RESET_CP (1<<0)
+#define GRBM_STATUS 0x8010
+#define CMDFIFO_AVAIL_MASK 0x0000000F
+#define GUI_ACTIVE (1<<31)
+#define GRBM_STATUS2 0x8014
+
+#define CG_CLKPIN_CNTL 0x660
+# define MUX_TCLK_TO_XCLK (1 << 8)
+# define XTALIN_DIVIDE (1 << 9)
+
+#define CG_MULT_THERMAL_STATUS 0x740
+#define ASIC_T(x) ((x) << 16)
+#define ASIC_T_MASK 0x3FF0000
+#define ASIC_T_SHIFT 16
+
+#define HDP_HOST_PATH_CNTL 0x2C00
+#define HDP_NONSURFACE_BASE 0x2C04
+#define HDP_NONSURFACE_INFO 0x2C08
+#define HDP_NONSURFACE_SIZE 0x2C0C
+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
+#define HDP_TILING_CONFIG 0x2F3C
+#define HDP_DEBUG1 0x2F34
+
+#define MC_SHARED_CHMAP 0x2004
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x00003000
+#define MC_SHARED_CHREMAP 0x2008
+
+#define MC_ARB_RAMCFG 0x2760
+#define NOOFBANK_SHIFT 0
+#define NOOFBANK_MASK 0x00000003
+#define NOOFRANK_SHIFT 2
+#define NOOFRANK_MASK 0x00000004
+#define NOOFROWS_SHIFT 3
+#define NOOFROWS_MASK 0x00000038
+#define NOOFCOLS_SHIFT 6
+#define NOOFCOLS_MASK 0x000000C0
+#define CHANSIZE_SHIFT 8
+#define CHANSIZE_MASK 0x00000100
+#define BURSTLENGTH_SHIFT 9
+#define BURSTLENGTH_MASK 0x00000200
+#define CHANSIZE_OVERRIDE (1 << 11)
+#define MC_VM_AGP_TOP 0x2028
+#define MC_VM_AGP_BOT 0x202C
+#define MC_VM_AGP_BASE 0x2030
+#define MC_VM_FB_LOCATION 0x2024
+#define MC_VM_MB_L1_TLB0_CNTL 0x2234
+#define MC_VM_MB_L1_TLB1_CNTL 0x2238
+#define MC_VM_MB_L1_TLB2_CNTL 0x223C
+#define MC_VM_MB_L1_TLB3_CNTL 0x2240
+#define ENABLE_L1_TLB (1 << 0)
+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
+#define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15)
+#define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18)
+#define MC_VM_MD_L1_TLB0_CNTL 0x2654
+#define MC_VM_MD_L1_TLB1_CNTL 0x2658
+#define MC_VM_MD_L1_TLB2_CNTL 0x265C
+#define MC_VM_MD_L1_TLB3_CNTL 0x2698
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
+
+#define PA_CL_ENHANCE 0x8A14
+#define CLIP_VTX_REORDER_ENA (1 << 0)
+#define NUM_CLIP_SEQ(x) ((x) << 1)
+#define PA_SC_AA_CONFIG 0x28C04
+#define PA_SC_CLIPRECT_RULE 0x2820C
+#define PA_SC_EDGERULE 0x28230
+#define PA_SC_FIFO_SIZE 0x8BCC
+#define SC_PRIM_FIFO_SIZE(x) ((x) << 0)
+#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
+#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
+#define FORCE_EOV_MAX_CLK_CNT(x) ((x)<<0)
+#define FORCE_EOV_MAX_REZ_CNT(x) ((x)<<16)
+#define PA_SC_LINE_STIPPLE 0x28A0C
+#define PA_SC_LINE_STIPPLE_STATE 0x8B10
+#define PA_SC_MODE_CNTL 0x28A4C
+#define PA_SC_MULTI_CHIP_CNTL 0x8B20
+#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
+
+#define SCRATCH_REG0 0x8500
+#define SCRATCH_REG1 0x8504
+#define SCRATCH_REG2 0x8508
+#define SCRATCH_REG3 0x850C
+#define SCRATCH_REG4 0x8510
+#define SCRATCH_REG5 0x8514
+#define SCRATCH_REG6 0x8518
+#define SCRATCH_REG7 0x851C
+#define SCRATCH_UMSK 0x8540
+#define SCRATCH_ADDR 0x8544
+
+#define SMX_SAR_CTL0 0xA008
+#define SMX_DC_CTL0 0xA020
+#define USE_HASH_FUNCTION (1 << 0)
+#define CACHE_DEPTH(x) ((x) << 1)
+#define FLUSH_ALL_ON_EVENT (1 << 10)
+#define STALL_ON_EVENT (1 << 11)
+#define SMX_EVENT_CTL 0xA02C
+#define ES_FLUSH_CTL(x) ((x) << 0)
+#define GS_FLUSH_CTL(x) ((x) << 3)
+#define ACK_FLUSH_CTL(x) ((x) << 6)
+#define SYNC_FLUSH_CTL (1 << 8)
+
+#define SPI_CONFIG_CNTL 0x9100
+#define GPR_WRITE_PRIORITY(x) ((x) << 0)
+#define DISABLE_INTERP_1 (1 << 5)
+#define SPI_CONFIG_CNTL_1 0x913C
+#define VTX_DONE_DELAY(x) ((x) << 0)
+#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
+#define SPI_INPUT_Z 0x286D8
+#define SPI_PS_IN_CONTROL_0 0x286CC
+#define NUM_INTERP(x) ((x)<<0)
+#define POSITION_ENA (1<<8)
+#define POSITION_CENTROID (1<<9)
+#define POSITION_ADDR(x) ((x)<<10)
+#define PARAM_GEN(x) ((x)<<15)
+#define PARAM_GEN_ADDR(x) ((x)<<19)
+#define BARYC_SAMPLE_CNTL(x) ((x)<<26)
+#define PERSP_GRADIENT_ENA (1<<28)
+#define LINEAR_GRADIENT_ENA (1<<29)
+#define POSITION_SAMPLE (1<<30)
+#define BARYC_AT_SAMPLE_ENA (1<<31)
+
+#define SQ_CONFIG 0x8C00
+#define VC_ENABLE (1 << 0)
+#define EXPORT_SRC_C (1 << 1)
+#define DX9_CONSTS (1 << 2)
+#define ALU_INST_PREFER_VECTOR (1 << 3)
+#define DX10_CLAMP (1 << 4)
+#define CLAUSE_SEQ_PRIO(x) ((x) << 8)
+#define PS_PRIO(x) ((x) << 24)
+#define VS_PRIO(x) ((x) << 26)
+#define GS_PRIO(x) ((x) << 28)
+#define SQ_DYN_GPR_SIZE_SIMD_AB_0 0x8DB0
+#define SIMDA_RING0(x) ((x)<<0)
+#define SIMDA_RING1(x) ((x)<<8)
+#define SIMDB_RING0(x) ((x)<<16)
+#define SIMDB_RING1(x) ((x)<<24)
+#define SQ_DYN_GPR_SIZE_SIMD_AB_1 0x8DB4
+#define SQ_DYN_GPR_SIZE_SIMD_AB_2 0x8DB8
+#define SQ_DYN_GPR_SIZE_SIMD_AB_3 0x8DBC
+#define SQ_DYN_GPR_SIZE_SIMD_AB_4 0x8DC0
+#define SQ_DYN_GPR_SIZE_SIMD_AB_5 0x8DC4
+#define SQ_DYN_GPR_SIZE_SIMD_AB_6 0x8DC8
+#define SQ_DYN_GPR_SIZE_SIMD_AB_7 0x8DCC
+#define ES_PRIO(x) ((x) << 30)
+#define SQ_GPR_RESOURCE_MGMT_1 0x8C04
+#define NUM_PS_GPRS(x) ((x) << 0)
+#define NUM_VS_GPRS(x) ((x) << 16)
+#define DYN_GPR_ENABLE (1 << 27)
+#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
+#define SQ_GPR_RESOURCE_MGMT_2 0x8C08
+#define NUM_GS_GPRS(x) ((x) << 0)
+#define NUM_ES_GPRS(x) ((x) << 16)
+#define SQ_MS_FIFO_SIZES 0x8CF0
+#define CACHE_FIFO_SIZE(x) ((x) << 0)
+#define FETCH_FIFO_HIWATER(x) ((x) << 8)
+#define DONE_FIFO_HIWATER(x) ((x) << 16)
+#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
+#define SQ_STACK_RESOURCE_MGMT_1 0x8C10
+#define NUM_PS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_VS_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_STACK_RESOURCE_MGMT_2 0x8C14
+#define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_THREAD_RESOURCE_MGMT 0x8C0C
+#define NUM_PS_THREADS(x) ((x) << 0)
+#define NUM_VS_THREADS(x) ((x) << 8)
+#define NUM_GS_THREADS(x) ((x) << 16)
+#define NUM_ES_THREADS(x) ((x) << 24)
+
+#define SX_DEBUG_1 0x9058
+#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
+#define SX_EXPORT_BUFFER_SIZES 0x900C
+#define COLOR_BUFFER_SIZE(x) ((x) << 0)
+#define POSITION_BUFFER_SIZE(x) ((x) << 8)
+#define SMX_BUFFER_SIZE(x) ((x) << 16)
+#define SX_MISC 0x28350
+
+#define TA_CNTL_AUX 0x9508
+#define DISABLE_CUBE_WRAP (1 << 0)
+#define DISABLE_CUBE_ANISO (1 << 1)
+#define SYNC_GRADIENT (1 << 24)
+#define SYNC_WALKER (1 << 25)
+#define SYNC_ALIGNER (1 << 26)
+#define BILINEAR_PRECISION_6_BIT (0 << 31)
+#define BILINEAR_PRECISION_8_BIT (1 << 31)
+
+#define TCP_CNTL 0x9610
+#define TCP_CHAN_STEER 0x9614
+
+#define VC_ENHANCE 0x9714
+
+#define VGT_CACHE_INVALIDATION 0x88C4
+#define CACHE_INVALIDATION(x) ((x)<<0)
+#define VC_ONLY 0
+#define TC_ONLY 1
+#define VC_AND_TC 2
+#define AUTO_INVLD_EN(x) ((x) << 6)
+#define NO_AUTO 0
+#define ES_AUTO 1
+#define GS_AUTO 2
+#define ES_AND_GS_AUTO 3
+#define VGT_ES_PER_GS 0x88CC
+#define VGT_GS_PER_ES 0x88C8
+#define VGT_GS_PER_VS 0x88E8
+#define VGT_GS_VERTEX_REUSE 0x88D4
+#define VGT_NUM_INSTANCES 0x8974
+#define VGT_OUT_DEALLOC_CNTL 0x28C5C
+#define DEALLOC_DIST_MASK 0x0000007F
+#define VGT_STRMOUT_EN 0x28AB0
+#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
+#define VTX_REUSE_DEPTH_MASK 0x000000FF
+
+#define VM_CONTEXT0_CNTL 0x1410
+#define ENABLE_CONTEXT (1 << 0)
+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
+#define VM_L2_CNTL 0x1400
+#define ENABLE_L2_CACHE (1 << 0)
+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14)
+#define VM_L2_CNTL2 0x1404
+#define INVALIDATE_ALL_L1_TLBS (1 << 0)
+#define INVALIDATE_L2_CACHE (1 << 1)
+#define VM_L2_CNTL3 0x1408
+#define BANK_SELECT(x) ((x) << 0)
+#define CACHE_UPDATE_MODE(x) ((x) << 6)
+#define VM_L2_STATUS 0x140C
+#define L2_BUSY (1 << 0)
+
+#define WAIT_UNTIL 0x8040
+
+/* async DMA */
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
+
+#define SRBM_STATUS 0x0E50
+
+/* DCE 3.2 HDMI */
+#define HDMI_CONTROL 0x7400
+# define HDMI_KEEPOUT_MODE (1 << 0)
+# define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */
+# define HDMI_ERROR_ACK (1 << 8)
+# define HDMI_ERROR_MASK (1 << 9)
+#define HDMI_STATUS 0x7404
+# define HDMI_ACTIVE_AVMUTE (1 << 0)
+# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
+# define HDMI_VBI_PACKET_ERROR (1 << 20)
+#define HDMI_AUDIO_PACKET_CONTROL 0x7408
+# define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
+# define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
+#define HDMI_ACR_PACKET_CONTROL 0x740c
+# define HDMI_ACR_SEND (1 << 0)
+# define HDMI_ACR_CONT (1 << 1)
+# define HDMI_ACR_SELECT(x) (((x) & 3) << 4)
+# define HDMI_ACR_HW 0
+# define HDMI_ACR_32 1
+# define HDMI_ACR_44 2
+# define HDMI_ACR_48 3
+# define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
+# define HDMI_ACR_AUTO_SEND (1 << 12)
+#define HDMI_VBI_PACKET_CONTROL 0x7410
+# define HDMI_NULL_SEND (1 << 0)
+# define HDMI_GC_SEND (1 << 4)
+# define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI_INFOFRAME_CONTROL0 0x7414
+# define HDMI_AVI_INFO_SEND (1 << 0)
+# define HDMI_AVI_INFO_CONT (1 << 1)
+# define HDMI_AUDIO_INFO_SEND (1 << 4)
+# define HDMI_AUDIO_INFO_CONT (1 << 5)
+# define HDMI_MPEG_INFO_SEND (1 << 8)
+# define HDMI_MPEG_INFO_CONT (1 << 9)
+#define HDMI_INFOFRAME_CONTROL1 0x7418
+# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
+# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
+# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
+#define HDMI_GENERIC_PACKET_CONTROL 0x741c
+# define HDMI_GENERIC0_SEND (1 << 0)
+# define HDMI_GENERIC0_CONT (1 << 1)
+# define HDMI_GENERIC1_SEND (1 << 4)
+# define HDMI_GENERIC1_CONT (1 << 5)
+# define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
+# define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
+#define HDMI_GC 0x7428
+# define HDMI_GC_AVMUTE (1 << 0)
+#define AFMT_AUDIO_PACKET_CONTROL2 0x742c
+# define AFMT_AUDIO_LAYOUT_OVRD (1 << 0)
+# define AFMT_AUDIO_LAYOUT_SELECT (1 << 1)
+# define AFMT_60958_CS_SOURCE (1 << 4)
+# define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8)
+# define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16)
+#define AFMT_AVI_INFO0 0x7454
+# define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_AVI_INFO_S(x) (((x) & 3) << 8)
+# define AFMT_AVI_INFO_B(x) (((x) & 3) << 10)
+# define AFMT_AVI_INFO_A(x) (((x) & 1) << 12)
+# define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13)
+# define AFMT_AVI_INFO_Y_RGB 0
+# define AFMT_AVI_INFO_Y_YCBCR422 1
+# define AFMT_AVI_INFO_Y_YCBCR444 2
+# define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
+# define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16)
+# define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20)
+# define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22)
+# define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
+# define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24)
+# define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26)
+# define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28)
+# define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31)
+# define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
+#define AFMT_AVI_INFO1 0x7458
+# define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+# define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+# define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO2 0x745c
+# define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
+# define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO3 0x7460
+# define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
+# define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24)
+#define AFMT_MPEG_INFO0 0x7464
+# define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
+# define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
+# define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
+#define AFMT_MPEG_INFO1 0x7468
+# define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
+# define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8)
+# define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12)
+#define AFMT_GENERIC0_HDR 0x746c
+#define AFMT_GENERIC0_0 0x7470
+#define AFMT_GENERIC0_1 0x7474
+#define AFMT_GENERIC0_2 0x7478
+#define AFMT_GENERIC0_3 0x747c
+#define AFMT_GENERIC0_4 0x7480
+#define AFMT_GENERIC0_5 0x7484
+#define AFMT_GENERIC0_6 0x7488
+#define AFMT_GENERIC1_HDR 0x748c
+#define AFMT_GENERIC1_0 0x7490
+#define AFMT_GENERIC1_1 0x7494
+#define AFMT_GENERIC1_2 0x7498
+#define AFMT_GENERIC1_3 0x749c
+#define AFMT_GENERIC1_4 0x74a0
+#define AFMT_GENERIC1_5 0x74a4
+#define AFMT_GENERIC1_6 0x74a8
+#define HDMI_ACR_32_0 0x74ac
+# define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_32_1 0x74b0
+# define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_44_0 0x74b4
+# define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_44_1 0x74b8
+# define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_48_0 0x74bc
+# define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_48_1 0x74c0
+# define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_STATUS_0 0x74c4
+#define HDMI_ACR_STATUS_1 0x74c8
+#define AFMT_AUDIO_INFO0 0x74cc
+# define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8)
+# define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16)
+#define AFMT_AUDIO_INFO1 0x74d0
+# define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
+# define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
+# define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
+# define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+#define AFMT_60958_0 0x74d4
+# define AFMT_60958_CS_A(x) (((x) & 1) << 0)
+# define AFMT_60958_CS_B(x) (((x) & 1) << 1)
+# define AFMT_60958_CS_C(x) (((x) & 1) << 2)
+# define AFMT_60958_CS_D(x) (((x) & 3) << 3)
+# define AFMT_60958_CS_MODE(x) (((x) & 3) << 6)
+# define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
+# define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
+# define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
+# define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+# define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
+#define AFMT_60958_1 0x74d8
+# define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
+# define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
+# define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16)
+# define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18)
+# define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
+#define AFMT_AUDIO_CRC_CONTROL 0x74dc
+# define AFMT_AUDIO_CRC_EN (1 << 0)
+#define AFMT_RAMP_CONTROL0 0x74e0
+# define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
+# define AFMT_RAMP_DATA_SIGN (1 << 31)
+#define AFMT_RAMP_CONTROL1 0x74e4
+# define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
+# define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
+#define AFMT_RAMP_CONTROL2 0x74e8
+# define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
+#define AFMT_RAMP_CONTROL3 0x74ec
+# define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
+#define AFMT_60958_2 0x74f0
+# define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
+# define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
+# define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
+# define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
+# define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
+# define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
+#define AFMT_STATUS 0x7600
+# define AFMT_AUDIO_ENABLE (1 << 4)
+# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL 0x7604
+# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
+# define AFMT_AUDIO_TEST_EN (1 << 12)
+# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
+# define AFMT_60958_CS_UPDATE (1 << 26)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
+#define AFMT_VBI_PACKET_CONTROL 0x7608
+# define AFMT_GENERIC0_UPDATE (1 << 2)
+#define AFMT_INFOFRAME_CONTROL0 0x760c
+# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
+# define AFMT_MPEG_INFO_UPDATE (1 << 10)
+#define AFMT_GENERIC0_7 0x7610
+/* second instance starts at 0x7800 */
+#define HDMI_OFFSET0 (0x7400 - 0x7400)
+#define HDMI_OFFSET1 (0x7800 - 0x7400)
+
+/* DCE3.2 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL 0x7300
+# define AZ_FORCE_CODEC_WAKE (1 << 0)
+# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
+# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
+# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
+# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
+# define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8)
+# define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9)
+# define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10)
+# define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11)
+# define CODEC_HOT_PLUG_ENABLE (1 << 12)
+# define PIN0_AUDIO_ENABLED (1 << 24)
+# define PIN1_AUDIO_ENABLED (1 << 25)
+# define PIN2_AUDIO_ENABLED (1 << 26)
+# define PIN3_AUDIO_ENABLED (1 << 27)
+# define AUDIO_ENABLED (1 << 31)
+
+
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
+#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c
+#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
+# define LC_LINK_WIDTH_SHIFT 0
+# define LC_LINK_WIDTH_MASK 0x7
+# define LC_LINK_WIDTH_X0 0
+# define LC_LINK_WIDTH_X1 1
+# define LC_LINK_WIDTH_X2 2
+# define LC_LINK_WIDTH_X4 3
+# define LC_LINK_WIDTH_X8 4
+# define LC_LINK_WIDTH_X16 6
+# define LC_LINK_WIDTH_RD_SHIFT 4
+# define LC_LINK_WIDTH_RD_MASK 0x70
+# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define LC_RECONFIG_NOW (1 << 8)
+# define LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define LC_RENEGOTIATE_EN (1 << 10)
+# define LC_SHORT_RECONFIG_EN (1 << 11)
+# define LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define LC_UPCONFIGURE_DIS (1 << 13)
+#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
+# define LC_GEN2_EN_STRAP (1 << 0)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
+# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
+# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
+# define LC_CURRENT_DATA_RATE (1 << 11)
+# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
+# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
+# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
+# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
+#define MM_CFGREGS_CNTL 0x544c
+# define MM_WR_TO_CFG_EN (1 << 3)
+#define LINK_CNTL2 0x88 /* F0 */
+# define TARGET_LINK_SPEED_MASK (0xf << 0)
+# define SELECTABLE_DEEMPHASIS (1 << 6)
+
+/* UVD */
+#define UVD_LMI_EXT40_ADDR 0xf498
+#define UVD_VCPU_CHIP_ID 0xf4d4
+#define UVD_VCPU_CACHE_OFFSET0 0xf4d8
+#define UVD_VCPU_CACHE_SIZE0 0xf4dc
+#define UVD_VCPU_CACHE_OFFSET1 0xf4e0
+#define UVD_VCPU_CACHE_SIZE1 0xf4e4
+#define UVD_VCPU_CACHE_OFFSET2 0xf4e8
+#define UVD_VCPU_CACHE_SIZE2 0xf4ec
+#define UVD_LMI_ADDR_EXT 0xf594
+
+#define UVD_RBC_RB_RPTR 0xf690
+#define UVD_RBC_RB_WPTR 0xf694
+
+#endif
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
new file mode 100644
index 0000000..f054a3b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si.c
@@ -0,0 +1,5771 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include <drm/radeon_drm.h>
+#include "sid.h"
+#include "atom.h"
+#include "si_blit_shaders.h"
+
+#define SI_PFP_UCODE_SIZE 2144
+#define SI_PM4_UCODE_SIZE 2144
+#define SI_CE_UCODE_SIZE 2144
+#define SI_RLC_UCODE_SIZE 2048
+#define SI_MC_UCODE_SIZE 7769
+#define OLAND_MC_UCODE_SIZE 7863
+
+MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
+MODULE_FIRMWARE("radeon/TAHITI_me.bin");
+MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
+MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
+MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
+MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
+MODULE_FIRMWARE("radeon/VERDE_me.bin");
+MODULE_FIRMWARE("radeon/VERDE_ce.bin");
+MODULE_FIRMWARE("radeon/VERDE_mc.bin");
+MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
+MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
+MODULE_FIRMWARE("radeon/OLAND_me.bin");
+MODULE_FIRMWARE("radeon/OLAND_ce.bin");
+MODULE_FIRMWARE("radeon/OLAND_mc.bin");
+MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
+MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
+MODULE_FIRMWARE("radeon/HAINAN_me.bin");
+MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
+MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
+MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
+
+extern int r600_ih_ring_alloc(struct radeon_device *rdev);
+extern void r600_ih_ring_fini(struct radeon_device *rdev);
+extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
+extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
+extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
+extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+
+static const u32 tahiti_golden_rlc_registers[] =
+{
+ 0xc424, 0xffffffff, 0x00601005,
+ 0xc47c, 0xffffffff, 0x10104040,
+ 0xc488, 0xffffffff, 0x0100000a,
+ 0xc314, 0xffffffff, 0x00000800,
+ 0xc30c, 0xffffffff, 0x800000f4,
+ 0xf4a8, 0xffffffff, 0x00000000
+};
+
+static const u32 tahiti_golden_registers[] =
+{
+ 0x9a10, 0x00010000, 0x00018208,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0x0002021c, 0x00020200,
+ 0xc78, 0x00000080, 0x00000000,
+ 0xd030, 0x000300c0, 0x00800040,
+ 0xd830, 0x000300c0, 0x00800040,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x277c, 0x00000003, 0x000007ff,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ffffff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x4e000000,
+ 0x28350, 0x3f3f3fff, 0x2a00126a,
+ 0x30, 0x000000ff, 0x0040,
+ 0x34, 0x00000040, 0x00004040,
+ 0x9100, 0x07ffffff, 0x03000000,
+ 0x8e88, 0x01ff1f3f, 0x00000000,
+ 0x8e84, 0x01ff1f3f, 0x00000000,
+ 0x9060, 0x0000007f, 0x00000020,
+ 0x9508, 0x00010000, 0x00010000,
+ 0xac14, 0x00000200, 0x000002fb,
+ 0xac10, 0xffffffff, 0x0000543b,
+ 0xac0c, 0xffffffff, 0xa9210876,
+ 0x88d0, 0xffffffff, 0x000fff40,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x1410, 0x20000000, 0x20fffed8,
+ 0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 tahiti_golden_registers2[] =
+{
+ 0xc64, 0x00000001, 0x00000001
+};
+
+static const u32 pitcairn_golden_rlc_registers[] =
+{
+ 0xc424, 0xffffffff, 0x00601004,
+ 0xc47c, 0xffffffff, 0x10102020,
+ 0xc488, 0xffffffff, 0x01000020,
+ 0xc314, 0xffffffff, 0x00000800,
+ 0xc30c, 0xffffffff, 0x800000a4
+};
+
+static const u32 pitcairn_golden_registers[] =
+{
+ 0x9a10, 0x00010000, 0x00018208,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0x0002021c, 0x00020200,
+ 0xc78, 0x00000080, 0x00000000,
+ 0xd030, 0x000300c0, 0x00800040,
+ 0xd830, 0x000300c0, 0x00800040,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ffffff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x4e000000,
+ 0x28350, 0x3f3f3fff, 0x2a00126a,
+ 0x30, 0x000000ff, 0x0040,
+ 0x34, 0x00000040, 0x00004040,
+ 0x9100, 0x07ffffff, 0x03000000,
+ 0x9060, 0x0000007f, 0x00000020,
+ 0x9508, 0x00010000, 0x00010000,
+ 0xac14, 0x000003ff, 0x000000f7,
+ 0xac10, 0xffffffff, 0x00000000,
+ 0xac0c, 0xffffffff, 0x32761054,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 verde_golden_rlc_registers[] =
+{
+ 0xc424, 0xffffffff, 0x033f1005,
+ 0xc47c, 0xffffffff, 0x10808020,
+ 0xc488, 0xffffffff, 0x00800008,
+ 0xc314, 0xffffffff, 0x00001000,
+ 0xc30c, 0xffffffff, 0x80010014
+};
+
+static const u32 verde_golden_registers[] =
+{
+ 0x9a10, 0x00010000, 0x00018208,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0x0002021c, 0x00020200,
+ 0xc78, 0x00000080, 0x00000000,
+ 0xd030, 0x000300c0, 0x00800040,
+ 0xd030, 0x000300c0, 0x00800040,
+ 0xd830, 0x000300c0, 0x00800040,
+ 0xd830, 0x000300c0, 0x00800040,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ffffff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x4e000000,
+ 0x28350, 0x3f3f3fff, 0x0000124a,
+ 0x28350, 0x3f3f3fff, 0x0000124a,
+ 0x28350, 0x3f3f3fff, 0x0000124a,
+ 0x30, 0x000000ff, 0x0040,
+ 0x34, 0x00000040, 0x00004040,
+ 0x9100, 0x07ffffff, 0x03000000,
+ 0x9100, 0x07ffffff, 0x03000000,
+ 0x8e88, 0x01ff1f3f, 0x00000000,
+ 0x8e88, 0x01ff1f3f, 0x00000000,
+ 0x8e88, 0x01ff1f3f, 0x00000000,
+ 0x8e84, 0x01ff1f3f, 0x00000000,
+ 0x8e84, 0x01ff1f3f, 0x00000000,
+ 0x8e84, 0x01ff1f3f, 0x00000000,
+ 0x9060, 0x0000007f, 0x00000020,
+ 0x9508, 0x00010000, 0x00010000,
+ 0xac14, 0x000003ff, 0x00000003,
+ 0xac14, 0x000003ff, 0x00000003,
+ 0xac14, 0x000003ff, 0x00000003,
+ 0xac10, 0xffffffff, 0x00000000,
+ 0xac10, 0xffffffff, 0x00000000,
+ 0xac10, 0xffffffff, 0x00000000,
+ 0xac0c, 0xffffffff, 0x00001032,
+ 0xac0c, 0xffffffff, 0x00001032,
+ 0xac0c, 0xffffffff, 0x00001032,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_rlc_registers[] =
+{
+ 0xc424, 0xffffffff, 0x00601005,
+ 0xc47c, 0xffffffff, 0x10104040,
+ 0xc488, 0xffffffff, 0x0100000a,
+ 0xc314, 0xffffffff, 0x00000800,
+ 0xc30c, 0xffffffff, 0x800000f4
+};
+
+static const u32 oland_golden_registers[] =
+{
+ 0x9a10, 0x00010000, 0x00018208,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0x0002021c, 0x00020200,
+ 0xc78, 0x00000080, 0x00000000,
+ 0xd030, 0x000300c0, 0x00800040,
+ 0xd830, 0x000300c0, 0x00800040,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ffffff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x4e000000,
+ 0x28350, 0x3f3f3fff, 0x00000082,
+ 0x30, 0x000000ff, 0x0040,
+ 0x34, 0x00000040, 0x00004040,
+ 0x9100, 0x07ffffff, 0x03000000,
+ 0x9060, 0x0000007f, 0x00000020,
+ 0x9508, 0x00010000, 0x00010000,
+ 0xac14, 0x000003ff, 0x000000f3,
+ 0xac10, 0xffffffff, 0x00000000,
+ 0xac0c, 0xffffffff, 0x00003210,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 hainan_golden_registers[] =
+{
+ 0x9a10, 0x00010000, 0x00018208,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0x0002021c, 0x00020200,
+ 0xd0c0, 0xff000fff, 0x00000100,
+ 0xd030, 0x000300c0, 0x00800040,
+ 0xd8c0, 0xff000fff, 0x00000100,
+ 0xd830, 0x000300c0, 0x00800040,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ffffff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x4e000000,
+ 0x28350, 0x3f3f3fff, 0x00000000,
+ 0x30, 0x000000ff, 0x0040,
+ 0x34, 0x00000040, 0x00004040,
+ 0x9100, 0x03e00000, 0x03600000,
+ 0x9060, 0x0000007f, 0x00000020,
+ 0x9508, 0x00010000, 0x00010000,
+ 0xac14, 0x000003ff, 0x000000f1,
+ 0xac10, 0xffffffff, 0x00000000,
+ 0xac0c, 0xffffffff, 0x00003210,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 hainan_golden_registers2[] =
+{
+ 0x98f8, 0xffffffff, 0x02010001
+};
+
+static const u32 tahiti_mgcg_cgcg_init[] =
+{
+ 0xc400, 0xffffffff, 0xfffffffc,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x92a4, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x9774, 0xffffffff, 0x00000100,
+ 0x8984, 0xffffffff, 0x06000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x92a0, 0xffffffff, 0x00000100,
+ 0xc380, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x8d88, 0xffffffff, 0x00000100,
+ 0x8d8c, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0xad80, 0xffffffff, 0x00000100,
+ 0xac54, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0xaf04, 0xffffffff, 0x00000100,
+ 0xae04, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9160, 0xffffffff, 0x00010000,
+ 0x9164, 0xffffffff, 0x00030002,
+ 0x9168, 0xffffffff, 0x00040007,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00090008,
+ 0x9174, 0xffffffff, 0x00020001,
+ 0x9178, 0xffffffff, 0x00040003,
+ 0x917c, 0xffffffff, 0x00000007,
+ 0x9180, 0xffffffff, 0x00060005,
+ 0x9184, 0xffffffff, 0x00090008,
+ 0x9188, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00050004,
+ 0x9190, 0xffffffff, 0x00000008,
+ 0x9194, 0xffffffff, 0x00070006,
+ 0x9198, 0xffffffff, 0x000a0009,
+ 0x919c, 0xffffffff, 0x00040003,
+ 0x91a0, 0xffffffff, 0x00060005,
+ 0x91a4, 0xffffffff, 0x00000009,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000b000a,
+ 0x91b0, 0xffffffff, 0x00050004,
+ 0x91b4, 0xffffffff, 0x00070006,
+ 0x91b8, 0xffffffff, 0x0008000b,
+ 0x91bc, 0xffffffff, 0x000a0009,
+ 0x91c0, 0xffffffff, 0x000d000c,
+ 0x91c4, 0xffffffff, 0x00060005,
+ 0x91c8, 0xffffffff, 0x00080007,
+ 0x91cc, 0xffffffff, 0x0000000b,
+ 0x91d0, 0xffffffff, 0x000a0009,
+ 0x91d4, 0xffffffff, 0x000d000c,
+ 0x91d8, 0xffffffff, 0x00070006,
+ 0x91dc, 0xffffffff, 0x00090008,
+ 0x91e0, 0xffffffff, 0x0000000c,
+ 0x91e4, 0xffffffff, 0x000b000a,
+ 0x91e8, 0xffffffff, 0x000e000d,
+ 0x91ec, 0xffffffff, 0x00080007,
+ 0x91f0, 0xffffffff, 0x000a0009,
+ 0x91f4, 0xffffffff, 0x0000000d,
+ 0x91f8, 0xffffffff, 0x000c000b,
+ 0x91fc, 0xffffffff, 0x000f000e,
+ 0x9200, 0xffffffff, 0x00090008,
+ 0x9204, 0xffffffff, 0x000b000a,
+ 0x9208, 0xffffffff, 0x000c000f,
+ 0x920c, 0xffffffff, 0x000e000d,
+ 0x9210, 0xffffffff, 0x00110010,
+ 0x9214, 0xffffffff, 0x000a0009,
+ 0x9218, 0xffffffff, 0x000c000b,
+ 0x921c, 0xffffffff, 0x0000000f,
+ 0x9220, 0xffffffff, 0x000e000d,
+ 0x9224, 0xffffffff, 0x00110010,
+ 0x9228, 0xffffffff, 0x000b000a,
+ 0x922c, 0xffffffff, 0x000d000c,
+ 0x9230, 0xffffffff, 0x00000010,
+ 0x9234, 0xffffffff, 0x000f000e,
+ 0x9238, 0xffffffff, 0x00120011,
+ 0x923c, 0xffffffff, 0x000c000b,
+ 0x9240, 0xffffffff, 0x000e000d,
+ 0x9244, 0xffffffff, 0x00000011,
+ 0x9248, 0xffffffff, 0x0010000f,
+ 0x924c, 0xffffffff, 0x00130012,
+ 0x9250, 0xffffffff, 0x000d000c,
+ 0x9254, 0xffffffff, 0x000f000e,
+ 0x9258, 0xffffffff, 0x00100013,
+ 0x925c, 0xffffffff, 0x00120011,
+ 0x9260, 0xffffffff, 0x00150014,
+ 0x9264, 0xffffffff, 0x000e000d,
+ 0x9268, 0xffffffff, 0x0010000f,
+ 0x926c, 0xffffffff, 0x00000013,
+ 0x9270, 0xffffffff, 0x00120011,
+ 0x9274, 0xffffffff, 0x00150014,
+ 0x9278, 0xffffffff, 0x000f000e,
+ 0x927c, 0xffffffff, 0x00110010,
+ 0x9280, 0xffffffff, 0x00000014,
+ 0x9284, 0xffffffff, 0x00130012,
+ 0x9288, 0xffffffff, 0x00160015,
+ 0x928c, 0xffffffff, 0x0010000f,
+ 0x9290, 0xffffffff, 0x00120011,
+ 0x9294, 0xffffffff, 0x00000015,
+ 0x9298, 0xffffffff, 0x00140013,
+ 0x929c, 0xffffffff, 0x00170016,
+ 0x9150, 0xffffffff, 0x96940200,
+ 0x8708, 0xffffffff, 0x00900100,
+ 0xc478, 0xffffffff, 0x00000080,
+ 0xc404, 0xffffffff, 0x0020003f,
+ 0x30, 0xffffffff, 0x0000001c,
+ 0x34, 0x000f0000, 0x000f0000,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x1024, 0xffffffff, 0x00000100,
+ 0x102c, 0x00000101, 0x00000000,
+ 0x20a8, 0xffffffff, 0x00000104,
+ 0x264c, 0x000c0000, 0x000c0000,
+ 0x2648, 0x000c0000, 0x000c0000,
+ 0x55e4, 0xff000fff, 0x00000100,
+ 0x55e8, 0x00000001, 0x00000001,
+ 0x2f50, 0x00000001, 0x00000001,
+ 0x30cc, 0xc0000fff, 0x00000104,
+ 0xc1e4, 0x00000001, 0x00000001,
+ 0xd0c0, 0xfffffff0, 0x00000100,
+ 0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 pitcairn_mgcg_cgcg_init[] =
+{
+ 0xc400, 0xffffffff, 0xfffffffc,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x92a4, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x9774, 0xffffffff, 0x00000100,
+ 0x8984, 0xffffffff, 0x06000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x92a0, 0xffffffff, 0x00000100,
+ 0xc380, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x8d88, 0xffffffff, 0x00000100,
+ 0x8d8c, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0xad80, 0xffffffff, 0x00000100,
+ 0xac54, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0xaf04, 0xffffffff, 0x00000100,
+ 0xae04, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9160, 0xffffffff, 0x00010000,
+ 0x9164, 0xffffffff, 0x00030002,
+ 0x9168, 0xffffffff, 0x00040007,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00090008,
+ 0x9174, 0xffffffff, 0x00020001,
+ 0x9178, 0xffffffff, 0x00040003,
+ 0x917c, 0xffffffff, 0x00000007,
+ 0x9180, 0xffffffff, 0x00060005,
+ 0x9184, 0xffffffff, 0x00090008,
+ 0x9188, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00050004,
+ 0x9190, 0xffffffff, 0x00000008,
+ 0x9194, 0xffffffff, 0x00070006,
+ 0x9198, 0xffffffff, 0x000a0009,
+ 0x919c, 0xffffffff, 0x00040003,
+ 0x91a0, 0xffffffff, 0x00060005,
+ 0x91a4, 0xffffffff, 0x00000009,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000b000a,
+ 0x91b0, 0xffffffff, 0x00050004,
+ 0x91b4, 0xffffffff, 0x00070006,
+ 0x91b8, 0xffffffff, 0x0008000b,
+ 0x91bc, 0xffffffff, 0x000a0009,
+ 0x91c0, 0xffffffff, 0x000d000c,
+ 0x9200, 0xffffffff, 0x00090008,
+ 0x9204, 0xffffffff, 0x000b000a,
+ 0x9208, 0xffffffff, 0x000c000f,
+ 0x920c, 0xffffffff, 0x000e000d,
+ 0x9210, 0xffffffff, 0x00110010,
+ 0x9214, 0xffffffff, 0x000a0009,
+ 0x9218, 0xffffffff, 0x000c000b,
+ 0x921c, 0xffffffff, 0x0000000f,
+ 0x9220, 0xffffffff, 0x000e000d,
+ 0x9224, 0xffffffff, 0x00110010,
+ 0x9228, 0xffffffff, 0x000b000a,
+ 0x922c, 0xffffffff, 0x000d000c,
+ 0x9230, 0xffffffff, 0x00000010,
+ 0x9234, 0xffffffff, 0x000f000e,
+ 0x9238, 0xffffffff, 0x00120011,
+ 0x923c, 0xffffffff, 0x000c000b,
+ 0x9240, 0xffffffff, 0x000e000d,
+ 0x9244, 0xffffffff, 0x00000011,
+ 0x9248, 0xffffffff, 0x0010000f,
+ 0x924c, 0xffffffff, 0x00130012,
+ 0x9250, 0xffffffff, 0x000d000c,
+ 0x9254, 0xffffffff, 0x000f000e,
+ 0x9258, 0xffffffff, 0x00100013,
+ 0x925c, 0xffffffff, 0x00120011,
+ 0x9260, 0xffffffff, 0x00150014,
+ 0x9150, 0xffffffff, 0x96940200,
+ 0x8708, 0xffffffff, 0x00900100,
+ 0xc478, 0xffffffff, 0x00000080,
+ 0xc404, 0xffffffff, 0x0020003f,
+ 0x30, 0xffffffff, 0x0000001c,
+ 0x34, 0x000f0000, 0x000f0000,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x1024, 0xffffffff, 0x00000100,
+ 0x102c, 0x00000101, 0x00000000,
+ 0x20a8, 0xffffffff, 0x00000104,
+ 0x55e4, 0xff000fff, 0x00000100,
+ 0x55e8, 0x00000001, 0x00000001,
+ 0x2f50, 0x00000001, 0x00000001,
+ 0x30cc, 0xc0000fff, 0x00000104,
+ 0xc1e4, 0x00000001, 0x00000001,
+ 0xd0c0, 0xfffffff0, 0x00000100,
+ 0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 verde_mgcg_cgcg_init[] =
+{
+ 0xc400, 0xffffffff, 0xfffffffc,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x92a4, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x9774, 0xffffffff, 0x00000100,
+ 0x8984, 0xffffffff, 0x06000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x92a0, 0xffffffff, 0x00000100,
+ 0xc380, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x8d88, 0xffffffff, 0x00000100,
+ 0x8d8c, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0xad80, 0xffffffff, 0x00000100,
+ 0xac54, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0xaf04, 0xffffffff, 0x00000100,
+ 0xae04, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9160, 0xffffffff, 0x00010000,
+ 0x9164, 0xffffffff, 0x00030002,
+ 0x9168, 0xffffffff, 0x00040007,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00090008,
+ 0x9174, 0xffffffff, 0x00020001,
+ 0x9178, 0xffffffff, 0x00040003,
+ 0x917c, 0xffffffff, 0x00000007,
+ 0x9180, 0xffffffff, 0x00060005,
+ 0x9184, 0xffffffff, 0x00090008,
+ 0x9188, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00050004,
+ 0x9190, 0xffffffff, 0x00000008,
+ 0x9194, 0xffffffff, 0x00070006,
+ 0x9198, 0xffffffff, 0x000a0009,
+ 0x919c, 0xffffffff, 0x00040003,
+ 0x91a0, 0xffffffff, 0x00060005,
+ 0x91a4, 0xffffffff, 0x00000009,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000b000a,
+ 0x91b0, 0xffffffff, 0x00050004,
+ 0x91b4, 0xffffffff, 0x00070006,
+ 0x91b8, 0xffffffff, 0x0008000b,
+ 0x91bc, 0xffffffff, 0x000a0009,
+ 0x91c0, 0xffffffff, 0x000d000c,
+ 0x9200, 0xffffffff, 0x00090008,
+ 0x9204, 0xffffffff, 0x000b000a,
+ 0x9208, 0xffffffff, 0x000c000f,
+ 0x920c, 0xffffffff, 0x000e000d,
+ 0x9210, 0xffffffff, 0x00110010,
+ 0x9214, 0xffffffff, 0x000a0009,
+ 0x9218, 0xffffffff, 0x000c000b,
+ 0x921c, 0xffffffff, 0x0000000f,
+ 0x9220, 0xffffffff, 0x000e000d,
+ 0x9224, 0xffffffff, 0x00110010,
+ 0x9228, 0xffffffff, 0x000b000a,
+ 0x922c, 0xffffffff, 0x000d000c,
+ 0x9230, 0xffffffff, 0x00000010,
+ 0x9234, 0xffffffff, 0x000f000e,
+ 0x9238, 0xffffffff, 0x00120011,
+ 0x923c, 0xffffffff, 0x000c000b,
+ 0x9240, 0xffffffff, 0x000e000d,
+ 0x9244, 0xffffffff, 0x00000011,
+ 0x9248, 0xffffffff, 0x0010000f,
+ 0x924c, 0xffffffff, 0x00130012,
+ 0x9250, 0xffffffff, 0x000d000c,
+ 0x9254, 0xffffffff, 0x000f000e,
+ 0x9258, 0xffffffff, 0x00100013,
+ 0x925c, 0xffffffff, 0x00120011,
+ 0x9260, 0xffffffff, 0x00150014,
+ 0x9150, 0xffffffff, 0x96940200,
+ 0x8708, 0xffffffff, 0x00900100,
+ 0xc478, 0xffffffff, 0x00000080,
+ 0xc404, 0xffffffff, 0x0020003f,
+ 0x30, 0xffffffff, 0x0000001c,
+ 0x34, 0x000f0000, 0x000f0000,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x1024, 0xffffffff, 0x00000100,
+ 0x102c, 0x00000101, 0x00000000,
+ 0x20a8, 0xffffffff, 0x00000104,
+ 0x264c, 0x000c0000, 0x000c0000,
+ 0x2648, 0x000c0000, 0x000c0000,
+ 0x55e4, 0xff000fff, 0x00000100,
+ 0x55e8, 0x00000001, 0x00000001,
+ 0x2f50, 0x00000001, 0x00000001,
+ 0x30cc, 0xc0000fff, 0x00000104,
+ 0xc1e4, 0x00000001, 0x00000001,
+ 0xd0c0, 0xfffffff0, 0x00000100,
+ 0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 oland_mgcg_cgcg_init[] =
+{
+ 0xc400, 0xffffffff, 0xfffffffc,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x92a4, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x9774, 0xffffffff, 0x00000100,
+ 0x8984, 0xffffffff, 0x06000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x92a0, 0xffffffff, 0x00000100,
+ 0xc380, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x8d88, 0xffffffff, 0x00000100,
+ 0x8d8c, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0xad80, 0xffffffff, 0x00000100,
+ 0xac54, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0xaf04, 0xffffffff, 0x00000100,
+ 0xae04, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9160, 0xffffffff, 0x00010000,
+ 0x9164, 0xffffffff, 0x00030002,
+ 0x9168, 0xffffffff, 0x00040007,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00090008,
+ 0x9174, 0xffffffff, 0x00020001,
+ 0x9178, 0xffffffff, 0x00040003,
+ 0x917c, 0xffffffff, 0x00000007,
+ 0x9180, 0xffffffff, 0x00060005,
+ 0x9184, 0xffffffff, 0x00090008,
+ 0x9188, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00050004,
+ 0x9190, 0xffffffff, 0x00000008,
+ 0x9194, 0xffffffff, 0x00070006,
+ 0x9198, 0xffffffff, 0x000a0009,
+ 0x919c, 0xffffffff, 0x00040003,
+ 0x91a0, 0xffffffff, 0x00060005,
+ 0x91a4, 0xffffffff, 0x00000009,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000b000a,
+ 0x91b0, 0xffffffff, 0x00050004,
+ 0x91b4, 0xffffffff, 0x00070006,
+ 0x91b8, 0xffffffff, 0x0008000b,
+ 0x91bc, 0xffffffff, 0x000a0009,
+ 0x91c0, 0xffffffff, 0x000d000c,
+ 0x91c4, 0xffffffff, 0x00060005,
+ 0x91c8, 0xffffffff, 0x00080007,
+ 0x91cc, 0xffffffff, 0x0000000b,
+ 0x91d0, 0xffffffff, 0x000a0009,
+ 0x91d4, 0xffffffff, 0x000d000c,
+ 0x9150, 0xffffffff, 0x96940200,
+ 0x8708, 0xffffffff, 0x00900100,
+ 0xc478, 0xffffffff, 0x00000080,
+ 0xc404, 0xffffffff, 0x0020003f,
+ 0x30, 0xffffffff, 0x0000001c,
+ 0x34, 0x000f0000, 0x000f0000,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x1024, 0xffffffff, 0x00000100,
+ 0x102c, 0x00000101, 0x00000000,
+ 0x20a8, 0xffffffff, 0x00000104,
+ 0x264c, 0x000c0000, 0x000c0000,
+ 0x2648, 0x000c0000, 0x000c0000,
+ 0x55e4, 0xff000fff, 0x00000100,
+ 0x55e8, 0x00000001, 0x00000001,
+ 0x2f50, 0x00000001, 0x00000001,
+ 0x30cc, 0xc0000fff, 0x00000104,
+ 0xc1e4, 0x00000001, 0x00000001,
+ 0xd0c0, 0xfffffff0, 0x00000100,
+ 0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 hainan_mgcg_cgcg_init[] =
+{
+ 0xc400, 0xffffffff, 0xfffffffc,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x92a4, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x9774, 0xffffffff, 0x00000100,
+ 0x8984, 0xffffffff, 0x06000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x92a0, 0xffffffff, 0x00000100,
+ 0xc380, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x8d88, 0xffffffff, 0x00000100,
+ 0x8d8c, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0xad80, 0xffffffff, 0x00000100,
+ 0xac54, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0xaf04, 0xffffffff, 0x00000100,
+ 0xae04, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9160, 0xffffffff, 0x00010000,
+ 0x9164, 0xffffffff, 0x00030002,
+ 0x9168, 0xffffffff, 0x00040007,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00090008,
+ 0x9174, 0xffffffff, 0x00020001,
+ 0x9178, 0xffffffff, 0x00040003,
+ 0x917c, 0xffffffff, 0x00000007,
+ 0x9180, 0xffffffff, 0x00060005,
+ 0x9184, 0xffffffff, 0x00090008,
+ 0x9188, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00050004,
+ 0x9190, 0xffffffff, 0x00000008,
+ 0x9194, 0xffffffff, 0x00070006,
+ 0x9198, 0xffffffff, 0x000a0009,
+ 0x919c, 0xffffffff, 0x00040003,
+ 0x91a0, 0xffffffff, 0x00060005,
+ 0x91a4, 0xffffffff, 0x00000009,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000b000a,
+ 0x91b0, 0xffffffff, 0x00050004,
+ 0x91b4, 0xffffffff, 0x00070006,
+ 0x91b8, 0xffffffff, 0x0008000b,
+ 0x91bc, 0xffffffff, 0x000a0009,
+ 0x91c0, 0xffffffff, 0x000d000c,
+ 0x91c4, 0xffffffff, 0x00060005,
+ 0x91c8, 0xffffffff, 0x00080007,
+ 0x91cc, 0xffffffff, 0x0000000b,
+ 0x91d0, 0xffffffff, 0x000a0009,
+ 0x91d4, 0xffffffff, 0x000d000c,
+ 0x9150, 0xffffffff, 0x96940200,
+ 0x8708, 0xffffffff, 0x00900100,
+ 0xc478, 0xffffffff, 0x00000080,
+ 0xc404, 0xffffffff, 0x0020003f,
+ 0x30, 0xffffffff, 0x0000001c,
+ 0x34, 0x000f0000, 0x000f0000,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x1024, 0xffffffff, 0x00000100,
+ 0x20a8, 0xffffffff, 0x00000104,
+ 0x264c, 0x000c0000, 0x000c0000,
+ 0x2648, 0x000c0000, 0x000c0000,
+ 0x2f50, 0x00000001, 0x00000001,
+ 0x30cc, 0xc0000fff, 0x00000104,
+ 0xc1e4, 0x00000001, 0x00000001,
+ 0xd0c0, 0xfffffff0, 0x00000100,
+ 0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static u32 verde_pg_init[] =
+{
+ 0x353c, 0xffffffff, 0x40000,
+ 0x3538, 0xffffffff, 0x200010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x7007,
+ 0x3538, 0xffffffff, 0x300010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x400000,
+ 0x3538, 0xffffffff, 0x100010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x120200,
+ 0x3538, 0xffffffff, 0x500010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x1e1e16,
+ 0x3538, 0xffffffff, 0x600010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x171f1e,
+ 0x3538, 0xffffffff, 0x700010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x3538, 0xffffffff, 0x9ff,
+ 0x3500, 0xffffffff, 0x0,
+ 0x3504, 0xffffffff, 0x10000800,
+ 0x3504, 0xffffffff, 0xf,
+ 0x3504, 0xffffffff, 0xf,
+ 0x3500, 0xffffffff, 0x4,
+ 0x3504, 0xffffffff, 0x1000051e,
+ 0x3504, 0xffffffff, 0xffff,
+ 0x3504, 0xffffffff, 0xffff,
+ 0x3500, 0xffffffff, 0x8,
+ 0x3504, 0xffffffff, 0x80500,
+ 0x3500, 0xffffffff, 0x12,
+ 0x3504, 0xffffffff, 0x9050c,
+ 0x3500, 0xffffffff, 0x1d,
+ 0x3504, 0xffffffff, 0xb052c,
+ 0x3500, 0xffffffff, 0x2a,
+ 0x3504, 0xffffffff, 0x1053e,
+ 0x3500, 0xffffffff, 0x2d,
+ 0x3504, 0xffffffff, 0x10546,
+ 0x3500, 0xffffffff, 0x30,
+ 0x3504, 0xffffffff, 0xa054e,
+ 0x3500, 0xffffffff, 0x3c,
+ 0x3504, 0xffffffff, 0x1055f,
+ 0x3500, 0xffffffff, 0x3f,
+ 0x3504, 0xffffffff, 0x10567,
+ 0x3500, 0xffffffff, 0x42,
+ 0x3504, 0xffffffff, 0x1056f,
+ 0x3500, 0xffffffff, 0x45,
+ 0x3504, 0xffffffff, 0x10572,
+ 0x3500, 0xffffffff, 0x48,
+ 0x3504, 0xffffffff, 0x20575,
+ 0x3500, 0xffffffff, 0x4c,
+ 0x3504, 0xffffffff, 0x190801,
+ 0x3500, 0xffffffff, 0x67,
+ 0x3504, 0xffffffff, 0x1082a,
+ 0x3500, 0xffffffff, 0x6a,
+ 0x3504, 0xffffffff, 0x1b082d,
+ 0x3500, 0xffffffff, 0x87,
+ 0x3504, 0xffffffff, 0x310851,
+ 0x3500, 0xffffffff, 0xba,
+ 0x3504, 0xffffffff, 0x891,
+ 0x3500, 0xffffffff, 0xbc,
+ 0x3504, 0xffffffff, 0x893,
+ 0x3500, 0xffffffff, 0xbe,
+ 0x3504, 0xffffffff, 0x20895,
+ 0x3500, 0xffffffff, 0xc2,
+ 0x3504, 0xffffffff, 0x20899,
+ 0x3500, 0xffffffff, 0xc6,
+ 0x3504, 0xffffffff, 0x2089d,
+ 0x3500, 0xffffffff, 0xca,
+ 0x3504, 0xffffffff, 0x8a1,
+ 0x3500, 0xffffffff, 0xcc,
+ 0x3504, 0xffffffff, 0x8a3,
+ 0x3500, 0xffffffff, 0xce,
+ 0x3504, 0xffffffff, 0x308a5,
+ 0x3500, 0xffffffff, 0xd3,
+ 0x3504, 0xffffffff, 0x6d08cd,
+ 0x3500, 0xffffffff, 0x142,
+ 0x3504, 0xffffffff, 0x2000095a,
+ 0x3504, 0xffffffff, 0x1,
+ 0x3500, 0xffffffff, 0x144,
+ 0x3504, 0xffffffff, 0x301f095b,
+ 0x3500, 0xffffffff, 0x165,
+ 0x3504, 0xffffffff, 0xc094d,
+ 0x3500, 0xffffffff, 0x173,
+ 0x3504, 0xffffffff, 0xf096d,
+ 0x3500, 0xffffffff, 0x184,
+ 0x3504, 0xffffffff, 0x15097f,
+ 0x3500, 0xffffffff, 0x19b,
+ 0x3504, 0xffffffff, 0xc0998,
+ 0x3500, 0xffffffff, 0x1a9,
+ 0x3504, 0xffffffff, 0x409a7,
+ 0x3500, 0xffffffff, 0x1af,
+ 0x3504, 0xffffffff, 0xcdc,
+ 0x3500, 0xffffffff, 0x1b1,
+ 0x3504, 0xffffffff, 0x800,
+ 0x3508, 0xffffffff, 0x6c9b2000,
+ 0x3510, 0xfc00, 0x2000,
+ 0x3544, 0xffffffff, 0xfc0,
+ 0x28d4, 0x00000100, 0x100
+};
+
+static void si_init_golden_registers(struct radeon_device *rdev)
+{
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ radeon_program_register_sequence(rdev,
+ tahiti_golden_registers,
+ (const u32)ARRAY_SIZE(tahiti_golden_registers));
+ radeon_program_register_sequence(rdev,
+ tahiti_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
+ radeon_program_register_sequence(rdev,
+ tahiti_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
+ radeon_program_register_sequence(rdev,
+ tahiti_golden_registers2,
+ (const u32)ARRAY_SIZE(tahiti_golden_registers2));
+ break;
+ case CHIP_PITCAIRN:
+ radeon_program_register_sequence(rdev,
+ pitcairn_golden_registers,
+ (const u32)ARRAY_SIZE(pitcairn_golden_registers));
+ radeon_program_register_sequence(rdev,
+ pitcairn_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
+ radeon_program_register_sequence(rdev,
+ pitcairn_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+ break;
+ case CHIP_VERDE:
+ radeon_program_register_sequence(rdev,
+ verde_golden_registers,
+ (const u32)ARRAY_SIZE(verde_golden_registers));
+ radeon_program_register_sequence(rdev,
+ verde_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
+ radeon_program_register_sequence(rdev,
+ verde_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
+ radeon_program_register_sequence(rdev,
+ verde_pg_init,
+ (const u32)ARRAY_SIZE(verde_pg_init));
+ break;
+ case CHIP_OLAND:
+ radeon_program_register_sequence(rdev,
+ oland_golden_registers,
+ (const u32)ARRAY_SIZE(oland_golden_registers));
+ radeon_program_register_sequence(rdev,
+ oland_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
+ radeon_program_register_sequence(rdev,
+ oland_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
+ break;
+ case CHIP_HAINAN:
+ radeon_program_register_sequence(rdev,
+ hainan_golden_registers,
+ (const u32)ARRAY_SIZE(hainan_golden_registers));
+ radeon_program_register_sequence(rdev,
+ hainan_golden_registers2,
+ (const u32)ARRAY_SIZE(hainan_golden_registers2));
+ radeon_program_register_sequence(rdev,
+ hainan_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
+ break;
+ default:
+ break;
+ }
+}
+
+#define PCIE_BUS_CLK 10000
+#define TCLK (PCIE_BUS_CLK / 10)
+
+/**
+ * si_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (SI).
+ */
+u32 si_get_xclk(struct radeon_device *rdev)
+{
+ u32 reference_clock = rdev->clock.spll.reference_freq;
+ u32 tmp;
+
+ tmp = RREG32(CG_CLKPIN_CNTL_2);
+ if (tmp & MUX_TCLK_TO_XCLK)
+ return TCLK;
+
+ tmp = RREG32(CG_CLKPIN_CNTL);
+ if (tmp & XTALIN_DIVIDE)
+ return reference_clock / 4;
+
+ return reference_clock;
+}
+
+/* get temperature in millidegrees */
+int si_get_temp(struct radeon_device *rdev)
+{
+ u32 temp;
+ int actual_temp = 0;
+
+ temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
+ CTF_TEMP_SHIFT;
+
+ if (temp & 0x200)
+ actual_temp = 255;
+ else
+ actual_temp = temp & 0x1ff;
+
+ actual_temp = (actual_temp * 1000);
+
+ return actual_temp;
+}
+
+#define TAHITI_IO_MC_REGS_SIZE 36
+
+static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+ {0x0000006f, 0x03044000},
+ {0x00000070, 0x0480c018},
+ {0x00000071, 0x00000040},
+ {0x00000072, 0x01000000},
+ {0x00000074, 0x000000ff},
+ {0x00000075, 0x00143400},
+ {0x00000076, 0x08ec0800},
+ {0x00000077, 0x040000cc},
+ {0x00000079, 0x00000000},
+ {0x0000007a, 0x21000409},
+ {0x0000007c, 0x00000000},
+ {0x0000007d, 0xe8000000},
+ {0x0000007e, 0x044408a8},
+ {0x0000007f, 0x00000003},
+ {0x00000080, 0x00000000},
+ {0x00000081, 0x01000000},
+ {0x00000082, 0x02000000},
+ {0x00000083, 0x00000000},
+ {0x00000084, 0xe3f3e4f4},
+ {0x00000085, 0x00052024},
+ {0x00000087, 0x00000000},
+ {0x00000088, 0x66036603},
+ {0x00000089, 0x01000000},
+ {0x0000008b, 0x1c0a0000},
+ {0x0000008c, 0xff010000},
+ {0x0000008e, 0xffffefff},
+ {0x0000008f, 0xfff3efff},
+ {0x00000090, 0xfff3efbf},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00a77400}
+};
+
+static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+ {0x0000006f, 0x03044000},
+ {0x00000070, 0x0480c018},
+ {0x00000071, 0x00000040},
+ {0x00000072, 0x01000000},
+ {0x00000074, 0x000000ff},
+ {0x00000075, 0x00143400},
+ {0x00000076, 0x08ec0800},
+ {0x00000077, 0x040000cc},
+ {0x00000079, 0x00000000},
+ {0x0000007a, 0x21000409},
+ {0x0000007c, 0x00000000},
+ {0x0000007d, 0xe8000000},
+ {0x0000007e, 0x044408a8},
+ {0x0000007f, 0x00000003},
+ {0x00000080, 0x00000000},
+ {0x00000081, 0x01000000},
+ {0x00000082, 0x02000000},
+ {0x00000083, 0x00000000},
+ {0x00000084, 0xe3f3e4f4},
+ {0x00000085, 0x00052024},
+ {0x00000087, 0x00000000},
+ {0x00000088, 0x66036603},
+ {0x00000089, 0x01000000},
+ {0x0000008b, 0x1c0a0000},
+ {0x0000008c, 0xff010000},
+ {0x0000008e, 0xffffefff},
+ {0x0000008f, 0xfff3efff},
+ {0x00000090, 0xfff3efbf},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00a47400}
+};
+
+static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+ {0x0000006f, 0x03044000},
+ {0x00000070, 0x0480c018},
+ {0x00000071, 0x00000040},
+ {0x00000072, 0x01000000},
+ {0x00000074, 0x000000ff},
+ {0x00000075, 0x00143400},
+ {0x00000076, 0x08ec0800},
+ {0x00000077, 0x040000cc},
+ {0x00000079, 0x00000000},
+ {0x0000007a, 0x21000409},
+ {0x0000007c, 0x00000000},
+ {0x0000007d, 0xe8000000},
+ {0x0000007e, 0x044408a8},
+ {0x0000007f, 0x00000003},
+ {0x00000080, 0x00000000},
+ {0x00000081, 0x01000000},
+ {0x00000082, 0x02000000},
+ {0x00000083, 0x00000000},
+ {0x00000084, 0xe3f3e4f4},
+ {0x00000085, 0x00052024},
+ {0x00000087, 0x00000000},
+ {0x00000088, 0x66036603},
+ {0x00000089, 0x01000000},
+ {0x0000008b, 0x1c0a0000},
+ {0x0000008c, 0xff010000},
+ {0x0000008e, 0xffffefff},
+ {0x0000008f, 0xfff3efff},
+ {0x00000090, 0xfff3efbf},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00a37400}
+};
+
+static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+ {0x0000006f, 0x03044000},
+ {0x00000070, 0x0480c018},
+ {0x00000071, 0x00000040},
+ {0x00000072, 0x01000000},
+ {0x00000074, 0x000000ff},
+ {0x00000075, 0x00143400},
+ {0x00000076, 0x08ec0800},
+ {0x00000077, 0x040000cc},
+ {0x00000079, 0x00000000},
+ {0x0000007a, 0x21000409},
+ {0x0000007c, 0x00000000},
+ {0x0000007d, 0xe8000000},
+ {0x0000007e, 0x044408a8},
+ {0x0000007f, 0x00000003},
+ {0x00000080, 0x00000000},
+ {0x00000081, 0x01000000},
+ {0x00000082, 0x02000000},
+ {0x00000083, 0x00000000},
+ {0x00000084, 0xe3f3e4f4},
+ {0x00000085, 0x00052024},
+ {0x00000087, 0x00000000},
+ {0x00000088, 0x66036603},
+ {0x00000089, 0x01000000},
+ {0x0000008b, 0x1c0a0000},
+ {0x0000008c, 0xff010000},
+ {0x0000008e, 0xffffefff},
+ {0x0000008f, 0xfff3efff},
+ {0x00000090, 0xfff3efbf},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00a17730}
+};
+
+static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+ {0x0000006f, 0x03044000},
+ {0x00000070, 0x0480c018},
+ {0x00000071, 0x00000040},
+ {0x00000072, 0x01000000},
+ {0x00000074, 0x000000ff},
+ {0x00000075, 0x00143400},
+ {0x00000076, 0x08ec0800},
+ {0x00000077, 0x040000cc},
+ {0x00000079, 0x00000000},
+ {0x0000007a, 0x21000409},
+ {0x0000007c, 0x00000000},
+ {0x0000007d, 0xe8000000},
+ {0x0000007e, 0x044408a8},
+ {0x0000007f, 0x00000003},
+ {0x00000080, 0x00000000},
+ {0x00000081, 0x01000000},
+ {0x00000082, 0x02000000},
+ {0x00000083, 0x00000000},
+ {0x00000084, 0xe3f3e4f4},
+ {0x00000085, 0x00052024},
+ {0x00000087, 0x00000000},
+ {0x00000088, 0x66036603},
+ {0x00000089, 0x01000000},
+ {0x0000008b, 0x1c0a0000},
+ {0x0000008c, 0xff010000},
+ {0x0000008e, 0xffffefff},
+ {0x0000008f, 0xfff3efff},
+ {0x00000090, 0xfff3efbf},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00a07730}
+};
+
+/* ucode loading */
+static int si_mc_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ u32 running, blackout = 0;
+ u32 *io_mc_regs;
+ int i, ucode_size, regs_size;
+
+ if (!rdev->mc_fw)
+ return -EINVAL;
+
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ io_mc_regs = (u32 *)&tahiti_io_mc_regs;
+ ucode_size = SI_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_PITCAIRN:
+ io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
+ ucode_size = SI_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_VERDE:
+ default:
+ io_mc_regs = (u32 *)&verde_io_mc_regs;
+ ucode_size = SI_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_OLAND:
+ io_mc_regs = (u32 *)&oland_io_mc_regs;
+ ucode_size = OLAND_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_HAINAN:
+ io_mc_regs = (u32 *)&hainan_io_mc_regs;
+ ucode_size = OLAND_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ }
+
+ running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+ if (running == 0) {
+ if (running) {
+ blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+ WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+ }
+
+ /* reset the engine and set to writable */
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+ /* load mc io regs */
+ for (i = 0; i < regs_size; i++) {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ }
+ /* load the MC ucode */
+ fw_data = (const __be32 *)rdev->mc_fw->data;
+ for (i = 0; i < ucode_size; i++)
+ WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+
+ /* put the engine back into the active state */
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+ /* wait for training to complete */
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
+ break;
+ udelay(1);
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
+ break;
+ udelay(1);
+ }
+
+ if (running)
+ WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
+ }
+
+ return 0;
+}
+
+static int si_init_microcode(struct radeon_device *rdev)
+{
+ struct platform_device *pdev;
+ const char *chip_name;
+ const char *rlc_chip_name;
+ size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
+ char fw_name[30];
+ int err;
+
+ DRM_DEBUG("\n");
+
+ pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
+ err = IS_ERR(pdev);
+ if (err) {
+ printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
+ return -EINVAL;
+ }
+
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ chip_name = "TAHITI";
+ rlc_chip_name = "TAHITI";
+ pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+ me_req_size = SI_PM4_UCODE_SIZE * 4;
+ ce_req_size = SI_CE_UCODE_SIZE * 4;
+ rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+ mc_req_size = SI_MC_UCODE_SIZE * 4;
+ break;
+ case CHIP_PITCAIRN:
+ chip_name = "PITCAIRN";
+ rlc_chip_name = "PITCAIRN";
+ pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+ me_req_size = SI_PM4_UCODE_SIZE * 4;
+ ce_req_size = SI_CE_UCODE_SIZE * 4;
+ rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+ mc_req_size = SI_MC_UCODE_SIZE * 4;
+ break;
+ case CHIP_VERDE:
+ chip_name = "VERDE";
+ rlc_chip_name = "VERDE";
+ pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+ me_req_size = SI_PM4_UCODE_SIZE * 4;
+ ce_req_size = SI_CE_UCODE_SIZE * 4;
+ rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+ mc_req_size = SI_MC_UCODE_SIZE * 4;
+ break;
+ case CHIP_OLAND:
+ chip_name = "OLAND";
+ rlc_chip_name = "OLAND";
+ pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+ me_req_size = SI_PM4_UCODE_SIZE * 4;
+ ce_req_size = SI_CE_UCODE_SIZE * 4;
+ rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+ mc_req_size = OLAND_MC_UCODE_SIZE * 4;
+ break;
+ case CHIP_HAINAN:
+ chip_name = "HAINAN";
+ rlc_chip_name = "HAINAN";
+ pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+ me_req_size = SI_PM4_UCODE_SIZE * 4;
+ ce_req_size = SI_CE_UCODE_SIZE * 4;
+ rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+ mc_req_size = OLAND_MC_UCODE_SIZE * 4;
+ break;
+ default: BUG();
+ }
+
+ DRM_INFO("Loading %s Microcode\n", chip_name);
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->pfp_fw->size != pfp_req_size) {
+ printk(KERN_ERR
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->pfp_fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->me_fw->size != me_req_size) {
+ printk(KERN_ERR
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->me_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->ce_fw->size != ce_req_size) {
+ printk(KERN_ERR
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->ce_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "si_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->mc_fw->size != mc_req_size) {
+ printk(KERN_ERR
+ "si_mc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
+out:
+ platform_device_unregister(pdev);
+
+ if (err) {
+ if (err != -EINVAL)
+ printk(KERN_ERR
+ "si_cp: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->pfp_fw);
+ rdev->pfp_fw = NULL;
+ release_firmware(rdev->me_fw);
+ rdev->me_fw = NULL;
+ release_firmware(rdev->ce_fw);
+ rdev->ce_fw = NULL;
+ release_firmware(rdev->rlc_fw);
+ rdev->rlc_fw = NULL;
+ release_firmware(rdev->mc_fw);
+ rdev->mc_fw = NULL;
+ }
+ return err;
+}
+
+/* watermark setup */
+static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
+ struct radeon_crtc *radeon_crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *other_mode)
+{
+ u32 tmp, buffer_alloc, i;
+ u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
+ /*
+ * Line Buffer Setup
+ * There are 3 line buffers, each one shared by 2 display controllers.
+ * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+ * the display controllers. The paritioning is done via one of four
+ * preset allocations specified in bits 21:20:
+ * 0 - half lb
+ * 2 - whole lb, other crtc must be disabled
+ */
+ /* this can get tricky if we have two large displays on a paired group
+ * of crtcs. Ideally for multiple large displays we'd assign them to
+ * non-linked crtcs for maximum line buffer allocation.
+ */
+ if (radeon_crtc->base.enabled && mode) {
+ if (other_mode) {
+ tmp = 0; /* 1/2 */
+ buffer_alloc = 1;
+ } else {
+ tmp = 2; /* whole */
+ buffer_alloc = 2;
+ }
+ } else {
+ tmp = 0;
+ buffer_alloc = 0;
+ }
+
+ WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
+ DC_LB_MEMORY_CONFIG(tmp));
+
+ WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ DMIF_BUFFERS_ALLOCATED_COMPLETED)
+ break;
+ udelay(1);
+ }
+
+ if (radeon_crtc->base.enabled && mode) {
+ switch (tmp) {
+ case 0:
+ default:
+ return 4096 * 2;
+ case 2:
+ return 8192 * 2;
+ }
+ }
+
+ /* controller not enabled, so no lb used */
+ return 0;
+}
+
+static u32 si_get_number_of_dram_channels(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ return 1;
+ case 1:
+ return 2;
+ case 2:
+ return 4;
+ case 3:
+ return 8;
+ case 4:
+ return 3;
+ case 5:
+ return 6;
+ case 6:
+ return 10;
+ case 7:
+ return 12;
+ case 8:
+ return 16;
+ }
+}
+
+struct dce6_wm_params {
+ u32 dram_channels; /* number of dram channels */
+ u32 yclk; /* bandwidth per dram data pin in kHz */
+ u32 sclk; /* engine clock in kHz */
+ u32 disp_clk; /* display clock in kHz */
+ u32 src_width; /* viewport width */
+ u32 active_time; /* active display time in ns */
+ u32 blank_time; /* blank time in ns */
+ bool interlaced; /* mode is interlaced */
+ fixed20_12 vsc; /* vertical scale ratio */
+ u32 num_heads; /* number of active crtcs */
+ u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+ u32 lb_size; /* line buffer allocated to pipe */
+ u32 vtaps; /* vertical scaler taps */
+};
+
+static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm)
+{
+ /* Calculate raw DRAM Bandwidth */
+ fixed20_12 dram_efficiency; /* 0.7 */
+ fixed20_12 yclk, dram_channels, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ yclk.full = dfixed_const(wm->yclk);
+ yclk.full = dfixed_div(yclk, a);
+ dram_channels.full = dfixed_const(wm->dram_channels * 4);
+ a.full = dfixed_const(10);
+ dram_efficiency.full = dfixed_const(7);
+ dram_efficiency.full = dfixed_div(dram_efficiency, a);
+ bandwidth.full = dfixed_mul(dram_channels, yclk);
+ bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+ /* Calculate DRAM Bandwidth and the part allocated to display. */
+ fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+ fixed20_12 yclk, dram_channels, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ yclk.full = dfixed_const(wm->yclk);
+ yclk.full = dfixed_div(yclk, a);
+ dram_channels.full = dfixed_const(wm->dram_channels * 4);
+ a.full = dfixed_const(10);
+ disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+ disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+ bandwidth.full = dfixed_mul(dram_channels, yclk);
+ bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm)
+{
+ /* Calculate the display Data return Bandwidth */
+ fixed20_12 return_efficiency; /* 0.8 */
+ fixed20_12 sclk, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ sclk.full = dfixed_const(wm->sclk);
+ sclk.full = dfixed_div(sclk, a);
+ a.full = dfixed_const(10);
+ return_efficiency.full = dfixed_const(8);
+ return_efficiency.full = dfixed_div(return_efficiency, a);
+ a.full = dfixed_const(32);
+ bandwidth.full = dfixed_mul(a, sclk);
+ bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm)
+{
+ return 32;
+}
+
+static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm)
+{
+ /* Calculate the DMIF Request Bandwidth */
+ fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+ fixed20_12 disp_clk, sclk, bandwidth;
+ fixed20_12 a, b1, b2;
+ u32 min_bandwidth;
+
+ a.full = dfixed_const(1000);
+ disp_clk.full = dfixed_const(wm->disp_clk);
+ disp_clk.full = dfixed_div(disp_clk, a);
+ a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2);
+ b1.full = dfixed_mul(a, disp_clk);
+
+ a.full = dfixed_const(1000);
+ sclk.full = dfixed_const(wm->sclk);
+ sclk.full = dfixed_div(sclk, a);
+ a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm));
+ b2.full = dfixed_mul(a, sclk);
+
+ a.full = dfixed_const(10);
+ disp_clk_request_efficiency.full = dfixed_const(8);
+ disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+
+ min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2));
+
+ a.full = dfixed_const(min_bandwidth);
+ bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_available_bandwidth(struct dce6_wm_params *wm)
+{
+ /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+ u32 dram_bandwidth = dce6_dram_bandwidth(wm);
+ u32 data_return_bandwidth = dce6_data_return_bandwidth(wm);
+ u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm);
+
+ return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+static u32 dce6_average_bandwidth(struct dce6_wm_params *wm)
+{
+ /* Calculate the display mode Average Bandwidth
+ * DisplayMode should contain the source and destination dimensions,
+ * timing, etc.
+ */
+ fixed20_12 bpp;
+ fixed20_12 line_time;
+ fixed20_12 src_width;
+ fixed20_12 bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+ line_time.full = dfixed_div(line_time, a);
+ bpp.full = dfixed_const(wm->bytes_per_pixel);
+ src_width.full = dfixed_const(wm->src_width);
+ bandwidth.full = dfixed_mul(src_width, bpp);
+ bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+ bandwidth.full = dfixed_div(bandwidth, line_time);
+
+ return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_latency_watermark(struct dce6_wm_params *wm)
+{
+ /* First calcualte the latency in ns */
+ u32 mc_latency = 2000; /* 2000 ns. */
+ u32 available_bandwidth = dce6_available_bandwidth(wm);
+ u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+ u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+ u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+ u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+ (wm->num_heads * cursor_line_pair_return_time);
+ u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+ u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+ u32 tmp, dmif_size = 12288;
+ fixed20_12 a, b, c;
+
+ if (wm->num_heads == 0)
+ return 0;
+
+ a.full = dfixed_const(2);
+ b.full = dfixed_const(1);
+ if ((wm->vsc.full > a.full) ||
+ ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+ (wm->vtaps >= 5) ||
+ ((wm->vsc.full >= a.full) && wm->interlaced))
+ max_src_lines_per_dst_line = 4;
+ else
+ max_src_lines_per_dst_line = 2;
+
+ a.full = dfixed_const(available_bandwidth);
+ b.full = dfixed_const(wm->num_heads);
+ a.full = dfixed_div(a, b);
+
+ b.full = dfixed_const(mc_latency + 512);
+ c.full = dfixed_const(wm->disp_clk);
+ b.full = dfixed_div(b, c);
+
+ c.full = dfixed_const(dmif_size);
+ b.full = dfixed_div(c, b);
+
+ tmp = min(dfixed_trunc(a), dfixed_trunc(b));
+
+ b.full = dfixed_const(1000);
+ c.full = dfixed_const(wm->disp_clk);
+ b.full = dfixed_div(c, b);
+ c.full = dfixed_const(wm->bytes_per_pixel);
+ b.full = dfixed_mul(b, c);
+
+ lb_fill_bw = min(tmp, dfixed_trunc(b));
+
+ a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+ b.full = dfixed_const(1000);
+ c.full = dfixed_const(lb_fill_bw);
+ b.full = dfixed_div(c, b);
+ a.full = dfixed_div(a, b);
+ line_fill_time = dfixed_trunc(a);
+
+ if (line_fill_time < wm->active_time)
+ return latency;
+ else
+ return latency + (line_fill_time - wm->active_time);
+
+}
+
+static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+ if (dce6_average_bandwidth(wm) <=
+ (dce6_dram_bandwidth_for_display(wm) / wm->num_heads))
+ return true;
+ else
+ return false;
+};
+
+static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
+{
+ if (dce6_average_bandwidth(wm) <=
+ (dce6_available_bandwidth(wm) / wm->num_heads))
+ return true;
+ else
+ return false;
+};
+
+static bool dce6_check_latency_hiding(struct dce6_wm_params *wm)
+{
+ u32 lb_partitions = wm->lb_size / wm->src_width;
+ u32 line_time = wm->active_time + wm->blank_time;
+ u32 latency_tolerant_lines;
+ u32 latency_hiding;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1);
+ if (wm->vsc.full > a.full)
+ latency_tolerant_lines = 1;
+ else {
+ if (lb_partitions <= (wm->vtaps + 1))
+ latency_tolerant_lines = 1;
+ else
+ latency_tolerant_lines = 2;
+ }
+
+ latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+ if (dce6_latency_watermark(wm) <= latency_hiding)
+ return true;
+ else
+ return false;
+}
+
+static void dce6_program_watermarks(struct radeon_device *rdev,
+ struct radeon_crtc *radeon_crtc,
+ u32 lb_size, u32 num_heads)
+{
+ struct drm_display_mode *mode = &radeon_crtc->base.mode;
+ struct dce6_wm_params wm;
+ u32 pixel_period;
+ u32 line_time = 0;
+ u32 latency_watermark_a = 0, latency_watermark_b = 0;
+ u32 priority_a_mark = 0, priority_b_mark = 0;
+ u32 priority_a_cnt = PRIORITY_OFF;
+ u32 priority_b_cnt = PRIORITY_OFF;
+ u32 tmp, arb_control3;
+ fixed20_12 a, b, c;
+
+ if (radeon_crtc->base.enabled && num_heads && mode) {
+ pixel_period = 1000000 / (u32)mode->clock;
+ line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ priority_a_cnt = 0;
+ priority_b_cnt = 0;
+
+ wm.yclk = rdev->pm.current_mclk * 10;
+ wm.sclk = rdev->pm.current_sclk * 10;
+ wm.disp_clk = mode->clock;
+ wm.src_width = mode->crtc_hdisplay;
+ wm.active_time = mode->crtc_hdisplay * pixel_period;
+ wm.blank_time = line_time - wm.active_time;
+ wm.interlaced = false;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ wm.interlaced = true;
+ wm.vsc = radeon_crtc->vsc;
+ wm.vtaps = 1;
+ if (radeon_crtc->rmx_type != RMX_OFF)
+ wm.vtaps = 2;
+ wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm.lb_size = lb_size;
+ if (rdev->family == CHIP_ARUBA)
+ wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
+ else
+ wm.dram_channels = si_get_number_of_dram_channels(rdev);
+ wm.num_heads = num_heads;
+
+ /* set for high clocks */
+ latency_watermark_a = min(dce6_latency_watermark(&wm), (u32)65535);
+ /* set for low clocks */
+ /* wm.yclk = low clk; wm.sclk = low clk */
+ latency_watermark_b = min(dce6_latency_watermark(&wm), (u32)65535);
+
+ /* possibly force display priority to high */
+ /* should really do this at mode validation time... */
+ if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
+ !dce6_average_bandwidth_vs_available_bandwidth(&wm) ||
+ !dce6_check_latency_hiding(&wm) ||
+ (rdev->disp_priority == 2)) {
+ DRM_DEBUG_KMS("force priority to high\n");
+ priority_a_cnt |= PRIORITY_ALWAYS_ON;
+ priority_b_cnt |= PRIORITY_ALWAYS_ON;
+ }
+
+ a.full = dfixed_const(1000);
+ b.full = dfixed_const(mode->clock);
+ b.full = dfixed_div(b, a);
+ c.full = dfixed_const(latency_watermark_a);
+ c.full = dfixed_mul(c, b);
+ c.full = dfixed_mul(c, radeon_crtc->hsc);
+ c.full = dfixed_div(c, a);
+ a.full = dfixed_const(16);
+ c.full = dfixed_div(c, a);
+ priority_a_mark = dfixed_trunc(c);
+ priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
+
+ a.full = dfixed_const(1000);
+ b.full = dfixed_const(mode->clock);
+ b.full = dfixed_div(b, a);
+ c.full = dfixed_const(latency_watermark_b);
+ c.full = dfixed_mul(c, b);
+ c.full = dfixed_mul(c, radeon_crtc->hsc);
+ c.full = dfixed_div(c, a);
+ a.full = dfixed_const(16);
+ c.full = dfixed_div(c, a);
+ priority_b_mark = dfixed_trunc(c);
+ priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+ }
+
+ /* select wm A */
+ arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
+ tmp = arb_control3;
+ tmp &= ~LATENCY_WATERMARK_MASK(3);
+ tmp |= LATENCY_WATERMARK_MASK(1);
+ WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
+ WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
+ (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+ LATENCY_HIGH_WATERMARK(line_time)));
+ /* select wm B */
+ tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
+ tmp &= ~LATENCY_WATERMARK_MASK(3);
+ tmp |= LATENCY_WATERMARK_MASK(2);
+ WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
+ WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
+ (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+ LATENCY_HIGH_WATERMARK(line_time)));
+ /* restore original selection */
+ WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3);
+
+ /* write the priority marks */
+ WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
+ WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
+
+}
+
+void dce6_bandwidth_update(struct radeon_device *rdev)
+{
+ struct drm_display_mode *mode0 = NULL;
+ struct drm_display_mode *mode1 = NULL;
+ u32 num_heads = 0, lb_size;
+ int i;
+
+ radeon_update_display_priority(rdev);
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->mode_info.crtcs[i]->base.enabled)
+ num_heads++;
+ }
+ for (i = 0; i < rdev->num_crtc; i += 2) {
+ mode0 = &rdev->mode_info.crtcs[i]->base.mode;
+ mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
+ lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
+ dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
+ lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
+ dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
+ }
+}
+
+/*
+ * Core functions
+ */
+static void si_tiling_mode_table_init(struct radeon_device *rdev)
+{
+ const u32 num_tile_mode_states = 32;
+ u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+
+ switch (rdev->config.si.mem_row_size_in_kb) {
+ case 1:
+ split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
+ break;
+ case 2:
+ default:
+ split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
+ break;
+ case 4:
+ split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
+ break;
+ }
+
+ if ((rdev->family == CHIP_TAHITI) ||
+ (rdev->family == CHIP_PITCAIRN)) {
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0: /* non-AA compressed depth or any compressed stencil */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 1: /* 2xAA/4xAA compressed depth only */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 2: /* 8xAA compressed depth only */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 8: /* 1D and 1D Array Surfaces */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 9: /* Displayable maps. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 10: /* Display 8bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 11: /* Display 16bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 12: /* Display 32bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 13: /* Thin. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 14: /* Thin 8 bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 15: /* Thin 16 bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 16: /* Thin 32 bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 17: /* Thin 64 bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 21: /* 8 bpp PRT. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 22: /* 16 bpp PRT */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 23: /* 32 bpp PRT */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 24: /* 64 bpp PRT */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 25: /* 128 bpp PRT */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
+ }
+ } else if ((rdev->family == CHIP_VERDE) ||
+ (rdev->family == CHIP_OLAND) ||
+ (rdev->family == CHIP_HAINAN)) {
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0: /* non-AA compressed depth or any compressed stencil */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 1: /* 2xAA/4xAA compressed depth only */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 2: /* 8xAA compressed depth only */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 8: /* 1D and 1D Array Surfaces */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 9: /* Displayable maps. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 10: /* Display 8bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 11: /* Display 16bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 12: /* Display 32bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 13: /* Thin. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 14: /* Thin 8 bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 15: /* Thin 16 bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 16: /* Thin 32 bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 17: /* Thin 64 bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 21: /* 8 bpp PRT. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 22: /* 16 bpp PRT */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 23: /* 32 bpp PRT */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 24: /* 64 bpp PRT */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 25: /* 128 bpp PRT */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
+ }
+ } else
+ DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
+}
+
+static void si_select_se_sh(struct radeon_device *rdev,
+ u32 se_num, u32 sh_num)
+{
+ u32 data = INSTANCE_BROADCAST_WRITES;
+
+ if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+ data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+ else if (se_num == 0xffffffff)
+ data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+ else if (sh_num == 0xffffffff)
+ data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+ else
+ data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+ WREG32(GRBM_GFX_INDEX, data);
+}
+
+static u32 si_create_bitmask(u32 bit_width)
+{
+ u32 i, mask = 0;
+
+ for (i = 0; i < bit_width; i++) {
+ mask <<= 1;
+ mask |= 1;
+ }
+ return mask;
+}
+
+static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
+{
+ u32 data, mask;
+
+ data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+ if (data & 1)
+ data &= INACTIVE_CUS_MASK;
+ else
+ data = 0;
+ data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+
+ data >>= INACTIVE_CUS_SHIFT;
+
+ mask = si_create_bitmask(cu_per_sh);
+
+ return ~data & mask;
+}
+
+static void si_setup_spi(struct radeon_device *rdev,
+ u32 se_num, u32 sh_per_se,
+ u32 cu_per_sh)
+{
+ int i, j, k;
+ u32 data, mask, active_cu;
+
+ for (i = 0; i < se_num; i++) {
+ for (j = 0; j < sh_per_se; j++) {
+ si_select_se_sh(rdev, i, j);
+ data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+ active_cu = si_get_cu_enabled(rdev, cu_per_sh);
+
+ mask = 1;
+ for (k = 0; k < 16; k++) {
+ mask <<= k;
+ if (active_cu & mask) {
+ data &= ~mask;
+ WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+ break;
+ }
+ }
+ }
+ }
+ si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
+static u32 si_get_rb_disabled(struct radeon_device *rdev,
+ u32 max_rb_num, u32 se_num,
+ u32 sh_per_se)
+{
+ u32 data, mask;
+
+ data = RREG32(CC_RB_BACKEND_DISABLE);
+ if (data & 1)
+ data &= BACKEND_DISABLE_MASK;
+ else
+ data = 0;
+ data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+ data >>= BACKEND_DISABLE_SHIFT;
+
+ mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
+
+ return data & mask;
+}
+
+static void si_setup_rb(struct radeon_device *rdev,
+ u32 se_num, u32 sh_per_se,
+ u32 max_rb_num)
+{
+ int i, j;
+ u32 data, mask;
+ u32 disabled_rbs = 0;
+ u32 enabled_rbs = 0;
+
+ for (i = 0; i < se_num; i++) {
+ for (j = 0; j < sh_per_se; j++) {
+ si_select_se_sh(rdev, i, j);
+ data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+ disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+ }
+ }
+ si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+ mask = 1;
+ for (i = 0; i < max_rb_num; i++) {
+ if (!(disabled_rbs & mask))
+ enabled_rbs |= mask;
+ mask <<= 1;
+ }
+
+ for (i = 0; i < se_num; i++) {
+ si_select_se_sh(rdev, i, 0xffffffff);
+ data = 0;
+ for (j = 0; j < sh_per_se; j++) {
+ switch (enabled_rbs & 3) {
+ case 1:
+ data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+ break;
+ case 2:
+ data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+ break;
+ case 3:
+ default:
+ data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+ break;
+ }
+ enabled_rbs >>= 2;
+ }
+ WREG32(PA_SC_RASTER_CONFIG, data);
+ }
+ si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
+static void si_gpu_init(struct radeon_device *rdev)
+{
+ u32 gb_addr_config = 0;
+ u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 sx_debug_1;
+ u32 hdp_host_path_cntl;
+ u32 tmp;
+ int i, j;
+
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ rdev->config.si.max_shader_engines = 2;
+ rdev->config.si.max_tile_pipes = 12;
+ rdev->config.si.max_cu_per_sh = 8;
+ rdev->config.si.max_sh_per_se = 2;
+ rdev->config.si.max_backends_per_se = 4;
+ rdev->config.si.max_texture_channel_caches = 12;
+ rdev->config.si.max_gprs = 256;
+ rdev->config.si.max_gs_threads = 32;
+ rdev->config.si.max_hw_contexts = 8;
+
+ rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+ rdev->config.si.sc_prim_fifo_size_backend = 0x100;
+ rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_PITCAIRN:
+ rdev->config.si.max_shader_engines = 2;
+ rdev->config.si.max_tile_pipes = 8;
+ rdev->config.si.max_cu_per_sh = 5;
+ rdev->config.si.max_sh_per_se = 2;
+ rdev->config.si.max_backends_per_se = 4;
+ rdev->config.si.max_texture_channel_caches = 8;
+ rdev->config.si.max_gprs = 256;
+ rdev->config.si.max_gs_threads = 32;
+ rdev->config.si.max_hw_contexts = 8;
+
+ rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+ rdev->config.si.sc_prim_fifo_size_backend = 0x100;
+ rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_VERDE:
+ default:
+ rdev->config.si.max_shader_engines = 1;
+ rdev->config.si.max_tile_pipes = 4;
+ rdev->config.si.max_cu_per_sh = 5;
+ rdev->config.si.max_sh_per_se = 2;
+ rdev->config.si.max_backends_per_se = 4;
+ rdev->config.si.max_texture_channel_caches = 4;
+ rdev->config.si.max_gprs = 256;
+ rdev->config.si.max_gs_threads = 32;
+ rdev->config.si.max_hw_contexts = 8;
+
+ rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+ rdev->config.si.sc_prim_fifo_size_backend = 0x40;
+ rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_OLAND:
+ rdev->config.si.max_shader_engines = 1;
+ rdev->config.si.max_tile_pipes = 4;
+ rdev->config.si.max_cu_per_sh = 6;
+ rdev->config.si.max_sh_per_se = 1;
+ rdev->config.si.max_backends_per_se = 2;
+ rdev->config.si.max_texture_channel_caches = 4;
+ rdev->config.si.max_gprs = 256;
+ rdev->config.si.max_gs_threads = 16;
+ rdev->config.si.max_hw_contexts = 8;
+
+ rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+ rdev->config.si.sc_prim_fifo_size_backend = 0x40;
+ rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_HAINAN:
+ rdev->config.si.max_shader_engines = 1;
+ rdev->config.si.max_tile_pipes = 4;
+ rdev->config.si.max_cu_per_sh = 5;
+ rdev->config.si.max_sh_per_se = 1;
+ rdev->config.si.max_backends_per_se = 1;
+ rdev->config.si.max_texture_channel_caches = 2;
+ rdev->config.si.max_gprs = 256;
+ rdev->config.si.max_gs_threads = 16;
+ rdev->config.si.max_hw_contexts = 8;
+
+ rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+ rdev->config.si.sc_prim_fifo_size_backend = 0x40;
+ rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ }
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+
+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+ evergreen_fix_pci_max_read_req_size(rdev);
+
+ WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+ mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+ rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
+ rdev->config.si.mem_max_burst_length_bytes = 256;
+ tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+ rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+ if (rdev->config.si.mem_row_size_in_kb > 4)
+ rdev->config.si.mem_row_size_in_kb = 4;
+ /* XXX use MC settings? */
+ rdev->config.si.shader_engine_tile_size = 32;
+ rdev->config.si.num_gpus = 1;
+ rdev->config.si.multi_gpu_tile_size = 64;
+
+ /* fix up row size */
+ gb_addr_config &= ~ROW_SIZE_MASK;
+ switch (rdev->config.si.mem_row_size_in_kb) {
+ case 1:
+ default:
+ gb_addr_config |= ROW_SIZE(0);
+ break;
+ case 2:
+ gb_addr_config |= ROW_SIZE(1);
+ break;
+ case 4:
+ gb_addr_config |= ROW_SIZE(2);
+ break;
+ }
+
+ /* setup tiling info dword. gb_addr_config is not adequate since it does
+ * not have bank info, so create a custom tiling dword.
+ * bits 3:0 num_pipes
+ * bits 7:4 num_banks
+ * bits 11:8 group_size
+ * bits 15:12 row_size
+ */
+ rdev->config.si.tile_config = 0;
+ switch (rdev->config.si.num_tile_pipes) {
+ case 1:
+ rdev->config.si.tile_config |= (0 << 0);
+ break;
+ case 2:
+ rdev->config.si.tile_config |= (1 << 0);
+ break;
+ case 4:
+ rdev->config.si.tile_config |= (2 << 0);
+ break;
+ case 8:
+ default:
+ /* XXX what about 12? */
+ rdev->config.si.tile_config |= (3 << 0);
+ break;
+ }
+ switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+ case 0: /* four banks */
+ rdev->config.si.tile_config |= 0 << 4;
+ break;
+ case 1: /* eight banks */
+ rdev->config.si.tile_config |= 1 << 4;
+ break;
+ case 2: /* sixteen banks */
+ default:
+ rdev->config.si.tile_config |= 2 << 4;
+ break;
+ }
+ rdev->config.si.tile_config |=
+ ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
+ rdev->config.si.tile_config |=
+ ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
+
+ WREG32(GB_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CALC, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+ if (rdev->has_uvd) {
+ WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+ }
+
+ si_tiling_mode_table_init(rdev);
+
+ si_setup_rb(rdev, rdev->config.si.max_shader_engines,
+ rdev->config.si.max_sh_per_se,
+ rdev->config.si.max_backends_per_se);
+
+ si_setup_spi(rdev, rdev->config.si.max_shader_engines,
+ rdev->config.si.max_sh_per_se,
+ rdev->config.si.max_cu_per_sh);
+
+
+ /* set HW defaults for 3D engine */
+ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+ ROQ_IB2_START(0x2b)));
+ WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+ sx_debug_1 = RREG32(SX_DEBUG_1);
+ WREG32(SX_DEBUG_1, sx_debug_1);
+
+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+
+ WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) |
+ SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) |
+ SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) |
+ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size)));
+
+ WREG32(VGT_NUM_INSTANCES, 1);
+
+ WREG32(CP_PERFMON_CNTL, 0);
+
+ WREG32(SQ_CONFIG, 0);
+
+ WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+ FORCE_EOV_MAX_REZ_CNT(255)));
+
+ WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+ AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+ WREG32(VGT_GS_VERTEX_REUSE, 16);
+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+ WREG32(CB_PERFCOUNTER0_SELECT0, 0);
+ WREG32(CB_PERFCOUNTER0_SELECT1, 0);
+ WREG32(CB_PERFCOUNTER1_SELECT0, 0);
+ WREG32(CB_PERFCOUNTER1_SELECT1, 0);
+ WREG32(CB_PERFCOUNTER2_SELECT0, 0);
+ WREG32(CB_PERFCOUNTER2_SELECT1, 0);
+ WREG32(CB_PERFCOUNTER3_SELECT0, 0);
+ WREG32(CB_PERFCOUNTER3_SELECT1, 0);
+
+ tmp = RREG32(HDP_MISC_CNTL);
+ tmp |= HDP_FLUSH_INVALIDATE_CACHE;
+ WREG32(HDP_MISC_CNTL, tmp);
+
+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+ WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+ udelay(50);
+}
+
+/*
+ * GPU scratch registers helpers function.
+ */
+static void si_scratch_init(struct radeon_device *rdev)
+{
+ int i;
+
+ rdev->scratch.num_reg = 7;
+ rdev->scratch.reg_base = SCRATCH_REG0;
+ for (i = 0; i < rdev->scratch.num_reg; i++) {
+ rdev->scratch.free[i] = true;
+ rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
+ }
+}
+
+void si_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ /* flush read cache over gart */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+ /* EVENT_WRITE_EOP - flush caches, send int */
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, 0);
+}
+
+/*
+ * IB stuff
+ */
+void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ u32 header;
+
+ if (ib->is_const_ib) {
+ /* set switch buffer packet before const IB */
+ radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ radeon_ring_write(ring, 0);
+
+ header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
+ } else {
+ u32 next_rptr;
+ if (ring->rptr_save_reg) {
+ next_rptr = ring->wptr + 3 + 4 + 8;
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((ring->rptr_save_reg -
+ PACKET3_SET_CONFIG_REG_START) >> 2));
+ radeon_ring_write(ring, next_rptr);
+ } else if (rdev->wb.enabled) {
+ next_rptr = ring->wptr + 5 + 4 + 8;
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ radeon_ring_write(ring, (1 << 8));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+ }
+
+ radeon_ring_write(ring, header);
+ radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+ radeon_ring_write(ring, ib->length_dw |
+ (ib->vm ? (ib->vm->id << 24) : 0));
+
+ if (!ib->is_const_ib) {
+ /* flush read cache over gart for this vmid */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+ }
+}
+
+/*
+ * CP.
+ */
+static void si_cp_enable(struct radeon_device *rdev, bool enable)
+{
+ if (enable)
+ WREG32(CP_ME_CNTL, 0);
+ else {
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
+ WREG32(SCRATCH_UMSK, 0);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ }
+ udelay(50);
+}
+
+static int si_cp_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ int i;
+
+ if (!rdev->me_fw || !rdev->pfp_fw)
+ return -EINVAL;
+
+ si_cp_enable(rdev, false);
+
+ /* PFP */
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __be32 *)rdev->ce_fw->data;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < SI_CE_UCODE_SIZE; i++)
+ WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(CP_ME_RAM_RADDR, 0);
+ return 0;
+}
+
+static int si_cp_start(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ int r, i;
+
+ r = radeon_ring_lock(rdev, ring, 7 + 4);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+ /* init the CP */
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(ring, 0x1);
+ radeon_ring_write(ring, 0x0);
+ radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
+ radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+
+ /* init the CE partitions */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
+ radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
+ radeon_ring_write(ring, 0xc000);
+ radeon_ring_write(ring, 0xe000);
+ radeon_ring_unlock_commit(rdev, ring);
+
+ si_cp_enable(rdev, true);
+
+ r = radeon_ring_lock(rdev, ring, si_default_size + 10);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+
+ /* setup clear context state */
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ for (i = 0; i < si_default_size; i++)
+ radeon_ring_write(ring, si_default_state[i]);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ /* set clear context state */
+ radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, 0x00000316);
+ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
+
+ radeon_ring_unlock_commit(rdev, ring);
+
+ for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
+ ring = &rdev->ring[i];
+ r = radeon_ring_lock(rdev, ring, 2);
+
+ /* clear the compute context state */
+ radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_unlock_commit(rdev, ring);
+ }
+
+ return 0;
+}
+
+static void si_cp_fini(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ si_cp_enable(rdev, false);
+
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
+}
+
+static int si_cp_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ u32 tmp;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+ SOFT_RESET_PA |
+ SOFT_RESET_VGT |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SX));
+ RREG32(GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(GRBM_SOFT_RESET, 0);
+ RREG32(GRBM_SOFT_RESET);
+
+ WREG32(CP_SEM_WAIT_TIMER, 0x0);
+ WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+ /* Set the write pointer delay */
+ WREG32(CP_RB_WPTR_DELAY, 0);
+
+ WREG32(CP_DEBUG, 0);
+ WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+ /* ring 0 - compute and gfx */
+ /* Set ring buffer size */
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ rb_bufsz = drm_order(ring->ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB0_CNTL, tmp);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
+ ring->wptr = 0;
+ WREG32(CP_RB0_WPTR, ring->wptr);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+
+ if (rdev->wb.enabled)
+ WREG32(SCRATCH_UMSK, 0xff);
+ else {
+ tmp |= RB_NO_UPDATE;
+ WREG32(SCRATCH_UMSK, 0);
+ }
+
+ mdelay(1);
+ WREG32(CP_RB0_CNTL, tmp);
+
+ WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
+
+ ring->rptr = RREG32(CP_RB0_RPTR);
+
+ /* ring1 - compute only */
+ /* Set ring buffer size */
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ rb_bufsz = drm_order(ring->ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB1_CNTL, tmp);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
+ ring->wptr = 0;
+ WREG32(CP_RB1_WPTR, ring->wptr);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
+
+ mdelay(1);
+ WREG32(CP_RB1_CNTL, tmp);
+
+ WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
+
+ ring->rptr = RREG32(CP_RB1_RPTR);
+
+ /* ring2 - compute only */
+ /* Set ring buffer size */
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ rb_bufsz = drm_order(ring->ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB2_CNTL, tmp);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
+ ring->wptr = 0;
+ WREG32(CP_RB2_WPTR, ring->wptr);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
+
+ mdelay(1);
+ WREG32(CP_RB2_CNTL, tmp);
+
+ WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
+
+ ring->rptr = RREG32(CP_RB2_RPTR);
+
+ /* start the rings */
+ si_cp_start(rdev);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
+ r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ if (r) {
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ return r;
+ }
+ r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
+ if (r) {
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ }
+ r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
+ if (r) {
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ }
+
+ return 0;
+}
+
+static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
+{
+ u32 reset_mask = 0;
+ u32 tmp;
+
+ /* GRBM_STATUS */
+ tmp = RREG32(GRBM_STATUS);
+ if (tmp & (PA_BUSY | SC_BUSY |
+ BCI_BUSY | SX_BUSY |
+ TA_BUSY | VGT_BUSY |
+ DB_BUSY | CB_BUSY |
+ GDS_BUSY | SPI_BUSY |
+ IA_BUSY | IA_BUSY_NO_DMA))
+ reset_mask |= RADEON_RESET_GFX;
+
+ if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+ CP_BUSY | CP_COHERENCY_BUSY))
+ reset_mask |= RADEON_RESET_CP;
+
+ if (tmp & GRBM_EE_BUSY)
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
+
+ /* GRBM_STATUS2 */
+ tmp = RREG32(GRBM_STATUS2);
+ if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+ reset_mask |= RADEON_RESET_RLC;
+
+ /* DMA_STATUS_REG 0 */
+ tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
+
+ /* DMA_STATUS_REG 1 */
+ tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA1;
+
+ /* SRBM_STATUS2 */
+ tmp = RREG32(SRBM_STATUS2);
+ if (tmp & DMA_BUSY)
+ reset_mask |= RADEON_RESET_DMA;
+
+ if (tmp & DMA1_BUSY)
+ reset_mask |= RADEON_RESET_DMA1;
+
+ /* SRBM_STATUS */
+ tmp = RREG32(SRBM_STATUS);
+
+ if (tmp & IH_BUSY)
+ reset_mask |= RADEON_RESET_IH;
+
+ if (tmp & SEM_BUSY)
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (tmp & GRBM_RQ_PENDING)
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (tmp & VMC_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+ MCC_BUSY | MCD_BUSY))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (evergreen_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+
+ /* VM_L2_STATUS */
+ tmp = RREG32(VM_L2_STATUS);
+ if (tmp & L2_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ /* Skip MC reset as it's mostly likely not hung, just busy */
+ if (reset_mask & RADEON_RESET_MC) {
+ DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+ reset_mask &= ~RADEON_RESET_MC;
+ }
+
+ return reset_mask;
+}
+
+static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct evergreen_mc_save save;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
+
+ if (reset_mask == 0)
+ return;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ evergreen_print_gpu_status_regs(rdev);
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ }
+ if (reset_mask & RADEON_RESET_DMA1) {
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ }
+
+ udelay(50);
+
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
+ grbm_soft_reset = SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_GDS |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_BCI |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VGT |
+ SOFT_RESET_IA;
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
+
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+ }
+
+ if (reset_mask & RADEON_RESET_DMA)
+ srbm_soft_reset |= SOFT_RESET_DMA;
+
+ if (reset_mask & RADEON_RESET_DMA1)
+ srbm_soft_reset |= SOFT_RESET_DMA1;
+
+ if (reset_mask & RADEON_RESET_DISPLAY)
+ srbm_soft_reset |= SOFT_RESET_DC;
+
+ if (reset_mask & RADEON_RESET_RLC)
+ grbm_soft_reset |= SOFT_RESET_RLC;
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= SOFT_RESET_SEM;
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= SOFT_RESET_IH;
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= SOFT_RESET_VMC;
+
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= SOFT_RESET_MC;
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
+ evergreen_mc_resume(rdev, &save);
+ udelay(50);
+
+ evergreen_print_gpu_status_regs(rdev);
+}
+
+int si_asic_reset(struct radeon_device *rdev)
+{
+ u32 reset_mask;
+
+ reset_mask = si_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ si_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = si_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
+ return 0;
+}
+
+/**
+ * si_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = si_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * si_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = si_gpu_check_soft_reset(rdev);
+ u32 mask;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ mask = RADEON_RESET_DMA;
+ else
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/* MC */
+static void si_mc_program(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ u32 tmp;
+ int i, j;
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+ WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+ evergreen_mc_stop(rdev, &save);
+ if (radeon_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ if (!ASIC_IS_NODCE(rdev))
+ /* Lockout access through VGA aperture*/
+ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+ /* Update configuration */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.vram_end >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
+ rdev->vram_scratch.gpu_addr >> 12);
+ tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+ tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+ WREG32(MC_VM_FB_LOCATION, tmp);
+ /* XXX double check these! */
+ WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+ WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+ WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+ WREG32(MC_VM_AGP_BASE, 0);
+ WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+ WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+ if (radeon_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ evergreen_mc_resume(rdev, &save);
+ if (!ASIC_IS_NODCE(rdev)) {
+ /* we need to own VRAM, so turn off the VGA renderer here
+ * to stop it overwriting our objects */
+ rv515_vga_render_disable(rdev);
+ }
+}
+
+static void si_vram_gtt_location(struct radeon_device *rdev,
+ struct radeon_mc *mc)
+{
+ if (mc->mc_vram_size > 0xFFC0000000ULL) {
+ /* leave room for at least 1024M GTT */
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = 0xFFC0000000ULL;
+ mc->mc_vram_size = 0xFFC0000000ULL;
+ }
+ radeon_vram_location(rdev, &rdev->mc, 0);
+ rdev->mc.gtt_base_align = 0;
+ radeon_gtt_location(rdev, mc);
+}
+
+static int si_mc_init(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int chansize, numchan;
+
+ /* Get VRAM informations */
+ rdev->mc.vram_is_ddr = true;
+ tmp = RREG32(MC_ARB_RAMCFG);
+ if (tmp & CHANSIZE_OVERRIDE) {
+ chansize = 16;
+ } else if (tmp & CHANSIZE_MASK) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ case 4:
+ numchan = 3;
+ break;
+ case 5:
+ numchan = 6;
+ break;
+ case 6:
+ numchan = 10;
+ break;
+ case 7:
+ numchan = 12;
+ break;
+ case 8:
+ numchan = 16;
+ break;
+ }
+ rdev->mc.vram_width = numchan * chansize;
+ /* Could aper size report 0 ? */
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+ /* size in MB on si */
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ si_vram_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
+
+ return 0;
+}
+
+/*
+ * GART
+ */
+void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+ /* flush hdp cache */
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+ /* bits 0-15 are the VM contexts0-15 */
+ WREG32(VM_INVALIDATE_REQUEST, 1);
+}
+
+static int si_pcie_gart_enable(struct radeon_device *rdev)
+{
+ int r, i;
+
+ if (rdev->gart.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
+ return r;
+ radeon_gart_restore(rdev);
+ /* Setup TLB control */
+ WREG32(MC_VM_MX_L1_TLB_CNTL,
+ (0xA << 7) |
+ ENABLE_L1_TLB |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ ENABLE_ADVANCED_DRIVER_MODEL |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7) |
+ CONTEXT1_IDENTITY_ACCESS_MODE(1));
+ WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+ WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(0));
+ /* setup context0 */
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(rdev->dummy_page.addr >> 12));
+ WREG32(VM_CONTEXT0_CNTL2, 0);
+ WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
+
+ WREG32(0x15D4, 0);
+ WREG32(0x15D8, 0);
+ WREG32(0x15DC, 0);
+
+ /* empty context1-15 */
+ /* set vm size, must be a multiple of 4 */
+ WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
+ /* Assign the pt base to something valid for now; the pts used for
+ * the VMs are determined by the application and setup and assigned
+ * on the fly in the vm part of radeon_gart.c
+ */
+ for (i = 1; i < 16; i++) {
+ if (i < 8)
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+ rdev->gart.table_addr >> 12);
+ else
+ WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
+ rdev->gart.table_addr >> 12);
+ }
+
+ /* enable context1-15 */
+ WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(rdev->dummy_page.addr >> 12));
+ WREG32(VM_CONTEXT1_CNTL2, 4);
+ WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+ RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+ VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+ READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+ WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+ si_pcie_gart_tlb_flush(rdev);
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(rdev->mc.gtt_size >> 20),
+ (unsigned long long)rdev->gart.table_addr);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+static void si_pcie_gart_disable(struct radeon_device *rdev)
+{
+ /* Disable all tables */
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
+ /* Setup TLB control */
+ WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7) |
+ CONTEXT1_IDENTITY_ACCESS_MODE(1));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(0));
+ radeon_gart_table_vram_unpin(rdev);
+}
+
+static void si_pcie_gart_fini(struct radeon_device *rdev)
+{
+ si_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
+/* vm parser */
+static bool si_vm_reg_valid(u32 reg)
+{
+ /* context regs are fine */
+ if (reg >= 0x28000)
+ return true;
+
+ /* check config regs */
+ switch (reg) {
+ case GRBM_GFX_INDEX:
+ case CP_STRMOUT_CNTL:
+ case VGT_VTX_VECT_EJECT_REG:
+ case VGT_CACHE_INVALIDATION:
+ case VGT_ESGS_RING_SIZE:
+ case VGT_GSVS_RING_SIZE:
+ case VGT_GS_VERTEX_REUSE:
+ case VGT_PRIMITIVE_TYPE:
+ case VGT_INDEX_TYPE:
+ case VGT_NUM_INDICES:
+ case VGT_NUM_INSTANCES:
+ case VGT_TF_RING_SIZE:
+ case VGT_HS_OFFCHIP_PARAM:
+ case VGT_TF_MEMORY_BASE:
+ case PA_CL_ENHANCE:
+ case PA_SU_LINE_STIPPLE_VALUE:
+ case PA_SC_LINE_STIPPLE_STATE:
+ case PA_SC_ENHANCE:
+ case SQC_CACHES:
+ case SPI_STATIC_THREAD_MGMT_1:
+ case SPI_STATIC_THREAD_MGMT_2:
+ case SPI_STATIC_THREAD_MGMT_3:
+ case SPI_PS_MAX_WAVE_ID:
+ case SPI_CONFIG_CNTL:
+ case SPI_CONFIG_CNTL_1:
+ case TA_CNTL_AUX:
+ return true;
+ default:
+ DRM_ERROR("Invalid register 0x%x in CS\n", reg);
+ return false;
+ }
+}
+
+static int si_vm_packet3_ce_check(struct radeon_device *rdev,
+ u32 *ib, struct radeon_cs_packet *pkt)
+{
+ switch (pkt->opcode) {
+ case PACKET3_NOP:
+ case PACKET3_SET_BASE:
+ case PACKET3_SET_CE_DE_COUNTERS:
+ case PACKET3_LOAD_CONST_RAM:
+ case PACKET3_WRITE_CONST_RAM:
+ case PACKET3_WRITE_CONST_RAM_OFFSET:
+ case PACKET3_DUMP_CONST_RAM:
+ case PACKET3_INCREMENT_CE_COUNTER:
+ case PACKET3_WAIT_ON_DE_COUNTER:
+ case PACKET3_CE_WRITE:
+ break;
+ default:
+ DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
+{
+ u32 start_reg, reg, i;
+ u32 command = ib[idx + 4];
+ u32 info = ib[idx + 1];
+ u32 idx_value = ib[idx];
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ if (((info & 0x60000000) >> 29) == 0) {
+ start_reg = idx_value << 2;
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ reg = start_reg;
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ if (((info & 0x00300000) >> 20) == 0) {
+ start_reg = ib[idx + 2];
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ reg = start_reg;
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
+ u32 *ib, struct radeon_cs_packet *pkt)
+{
+ int r;
+ u32 idx = pkt->idx + 1;
+ u32 idx_value = ib[idx];
+ u32 start_reg, end_reg, reg, i;
+
+ switch (pkt->opcode) {
+ case PACKET3_NOP:
+ case PACKET3_SET_BASE:
+ case PACKET3_CLEAR_STATE:
+ case PACKET3_INDEX_BUFFER_SIZE:
+ case PACKET3_DISPATCH_DIRECT:
+ case PACKET3_DISPATCH_INDIRECT:
+ case PACKET3_ALLOC_GDS:
+ case PACKET3_WRITE_GDS_RAM:
+ case PACKET3_ATOMIC_GDS:
+ case PACKET3_ATOMIC:
+ case PACKET3_OCCLUSION_QUERY:
+ case PACKET3_SET_PREDICATION:
+ case PACKET3_COND_EXEC:
+ case PACKET3_PRED_EXEC:
+ case PACKET3_DRAW_INDIRECT:
+ case PACKET3_DRAW_INDEX_INDIRECT:
+ case PACKET3_INDEX_BASE:
+ case PACKET3_DRAW_INDEX_2:
+ case PACKET3_CONTEXT_CONTROL:
+ case PACKET3_INDEX_TYPE:
+ case PACKET3_DRAW_INDIRECT_MULTI:
+ case PACKET3_DRAW_INDEX_AUTO:
+ case PACKET3_DRAW_INDEX_IMMD:
+ case PACKET3_NUM_INSTANCES:
+ case PACKET3_DRAW_INDEX_MULTI_AUTO:
+ case PACKET3_STRMOUT_BUFFER_UPDATE:
+ case PACKET3_DRAW_INDEX_OFFSET_2:
+ case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
+ case PACKET3_DRAW_INDEX_INDIRECT_MULTI:
+ case PACKET3_MPEG_INDEX:
+ case PACKET3_WAIT_REG_MEM:
+ case PACKET3_MEM_WRITE:
+ case PACKET3_PFP_SYNC_ME:
+ case PACKET3_SURFACE_SYNC:
+ case PACKET3_EVENT_WRITE:
+ case PACKET3_EVENT_WRITE_EOP:
+ case PACKET3_EVENT_WRITE_EOS:
+ case PACKET3_SET_CONTEXT_REG:
+ case PACKET3_SET_CONTEXT_REG_INDIRECT:
+ case PACKET3_SET_SH_REG:
+ case PACKET3_SET_SH_REG_OFFSET:
+ case PACKET3_INCREMENT_DE_COUNTER:
+ case PACKET3_WAIT_ON_CE_COUNTER:
+ case PACKET3_WAIT_ON_AVAIL_BUFFER:
+ case PACKET3_ME_WRITE:
+ break;
+ case PACKET3_COPY_DATA:
+ if ((idx_value & 0xf00) == 0) {
+ reg = ib[idx + 3] * 4;
+ if (!si_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_WRITE_DATA:
+ if ((idx_value & 0xf00) == 0) {
+ start_reg = ib[idx + 1] * 4;
+ if (idx_value & 0x10000) {
+ if (!si_vm_reg_valid(start_reg))
+ return -EINVAL;
+ } else {
+ for (i = 0; i < (pkt->count - 2); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ }
+ }
+ break;
+ case PACKET3_COND_WRITE:
+ if (idx_value & 0x100) {
+ reg = ib[idx + 5] * 4;
+ if (!si_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_COPY_DW:
+ if (idx_value & 0x2) {
+ reg = ib[idx + 3] * 4;
+ if (!si_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_CONFIG_REG:
+ start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+ (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+ (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+ DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_CP_DMA:
+ r = si_vm_packet3_cp_dma_check(ib, idx);
+ if (r)
+ return r;
+ break;
+ default:
+ DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int si_vm_packet3_compute_check(struct radeon_device *rdev,
+ u32 *ib, struct radeon_cs_packet *pkt)
+{
+ int r;
+ u32 idx = pkt->idx + 1;
+ u32 idx_value = ib[idx];
+ u32 start_reg, reg, i;
+
+ switch (pkt->opcode) {
+ case PACKET3_NOP:
+ case PACKET3_SET_BASE:
+ case PACKET3_CLEAR_STATE:
+ case PACKET3_DISPATCH_DIRECT:
+ case PACKET3_DISPATCH_INDIRECT:
+ case PACKET3_ALLOC_GDS:
+ case PACKET3_WRITE_GDS_RAM:
+ case PACKET3_ATOMIC_GDS:
+ case PACKET3_ATOMIC:
+ case PACKET3_OCCLUSION_QUERY:
+ case PACKET3_SET_PREDICATION:
+ case PACKET3_COND_EXEC:
+ case PACKET3_PRED_EXEC:
+ case PACKET3_CONTEXT_CONTROL:
+ case PACKET3_STRMOUT_BUFFER_UPDATE:
+ case PACKET3_WAIT_REG_MEM:
+ case PACKET3_MEM_WRITE:
+ case PACKET3_PFP_SYNC_ME:
+ case PACKET3_SURFACE_SYNC:
+ case PACKET3_EVENT_WRITE:
+ case PACKET3_EVENT_WRITE_EOP:
+ case PACKET3_EVENT_WRITE_EOS:
+ case PACKET3_SET_CONTEXT_REG:
+ case PACKET3_SET_CONTEXT_REG_INDIRECT:
+ case PACKET3_SET_SH_REG:
+ case PACKET3_SET_SH_REG_OFFSET:
+ case PACKET3_INCREMENT_DE_COUNTER:
+ case PACKET3_WAIT_ON_CE_COUNTER:
+ case PACKET3_WAIT_ON_AVAIL_BUFFER:
+ case PACKET3_ME_WRITE:
+ break;
+ case PACKET3_COPY_DATA:
+ if ((idx_value & 0xf00) == 0) {
+ reg = ib[idx + 3] * 4;
+ if (!si_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_WRITE_DATA:
+ if ((idx_value & 0xf00) == 0) {
+ start_reg = ib[idx + 1] * 4;
+ if (idx_value & 0x10000) {
+ if (!si_vm_reg_valid(start_reg))
+ return -EINVAL;
+ } else {
+ for (i = 0; i < (pkt->count - 2); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ }
+ }
+ break;
+ case PACKET3_COND_WRITE:
+ if (idx_value & 0x100) {
+ reg = ib[idx + 5] * 4;
+ if (!si_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_COPY_DW:
+ if (idx_value & 0x2) {
+ reg = ib[idx + 3] * 4;
+ if (!si_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_CP_DMA:
+ r = si_vm_packet3_cp_dma_check(ib, idx);
+ if (r)
+ return r;
+ break;
+ default:
+ DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ int ret = 0;
+ u32 idx = 0;
+ struct radeon_cs_packet pkt;
+
+ do {
+ pkt.idx = idx;
+ pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
+ pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
+ pkt.one_reg_wr = 0;
+ switch (pkt.type) {
+ case RADEON_PACKET_TYPE0:
+ dev_err(rdev->dev, "Packet0 not allowed!\n");
+ ret = -EINVAL;
+ break;
+ case RADEON_PACKET_TYPE2:
+ idx += 1;
+ break;
+ case RADEON_PACKET_TYPE3:
+ pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+ if (ib->is_const_ib)
+ ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
+ else {
+ switch (ib->ring) {
+ case RADEON_RING_TYPE_GFX_INDEX:
+ ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
+ break;
+ case CAYMAN_RING_TYPE_CP1_INDEX:
+ case CAYMAN_RING_TYPE_CP2_INDEX:
+ ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
+ break;
+ default:
+ dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
+ ret = -EINVAL;
+ break;
+ }
+ }
+ idx += pkt.count + 2;
+ break;
+ default:
+ dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
+ ret = -EINVAL;
+ break;
+ }
+ if (ret)
+ break;
+ } while (idx < ib->length_dw);
+
+ return ret;
+}
+
+/*
+ * vm
+ */
+int si_vm_init(struct radeon_device *rdev)
+{
+ /* number of VMs */
+ rdev->vm_manager.nvm = 16;
+ /* base offset of vram pages */
+ rdev->vm_manager.vram_base_offset = 0;
+
+ return 0;
+}
+
+void si_vm_fini(struct radeon_device *rdev)
+{
+}
+
+/**
+ * si_vm_set_page - update the page tables using the CP
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the CP (SI).
+ */
+void si_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+ uint64_t value;
+ unsigned ndw;
+
+ if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+ while (count) {
+ ndw = 2 + count * 2;
+ if (ndw > 0x3FFE)
+ ndw = 0x3FFE;
+
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
+ ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(1));
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ for (; ndw > 2; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ }
+ }
+ } else {
+ /* DMA */
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
+ }
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+ }
+}
+
+void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ /* write new base address */
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0)));
+
+ if (vm->id < 8) {
+ radeon_ring_write(ring,
+ (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
+ } else {
+ radeon_ring_write(ring,
+ (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+ }
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0)));
+ radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0x1);
+
+ /* bits 0-15 are the VM contexts0-15 */
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0)));
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 1 << vm->id);
+
+ /* sync PFP to ME, otherwise we might get invalid PFP reads */
+ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+ radeon_ring_write(ring, 0x0);
+}
+
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ if (vm->id < 8) {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ } else {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
+ }
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
+/*
+ * RLC
+ */
+void si_rlc_fini(struct radeon_device *rdev)
+{
+ int r;
+
+ /* save restore block */
+ if (rdev->rlc.save_restore_obj) {
+ r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
+ radeon_bo_unpin(rdev->rlc.save_restore_obj);
+ radeon_bo_unreserve(rdev->rlc.save_restore_obj);
+
+ radeon_bo_unref(&rdev->rlc.save_restore_obj);
+ rdev->rlc.save_restore_obj = NULL;
+ }
+
+ /* clear state block */
+ if (rdev->rlc.clear_state_obj) {
+ r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
+ radeon_bo_unpin(rdev->rlc.clear_state_obj);
+ radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+
+ radeon_bo_unref(&rdev->rlc.clear_state_obj);
+ rdev->rlc.clear_state_obj = NULL;
+ }
+}
+
+int si_rlc_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* save restore block */
+ if (rdev->rlc.save_restore_obj == NULL) {
+ r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL,
+ &rdev->rlc.save_restore_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
+ if (unlikely(r != 0)) {
+ si_rlc_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->rlc.save_restore_gpu_addr);
+ radeon_bo_unreserve(rdev->rlc.save_restore_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
+ si_rlc_fini(rdev);
+ return r;
+ }
+
+ /* clear state block */
+ if (rdev->rlc.clear_state_obj == NULL) {
+ r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL,
+ &rdev->rlc.clear_state_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
+ si_rlc_fini(rdev);
+ return r;
+ }
+ }
+ r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
+ if (unlikely(r != 0)) {
+ si_rlc_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->rlc.clear_state_gpu_addr);
+ radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
+ si_rlc_fini(rdev);
+ return r;
+ }
+
+ return 0;
+}
+
+static void si_rlc_stop(struct radeon_device *rdev)
+{
+ WREG32(RLC_CNTL, 0);
+}
+
+static void si_rlc_start(struct radeon_device *rdev)
+{
+ WREG32(RLC_CNTL, RLC_ENABLE);
+}
+
+static int si_rlc_resume(struct radeon_device *rdev)
+{
+ u32 i;
+ const __be32 *fw_data;
+
+ if (!rdev->rlc_fw)
+ return -EINVAL;
+
+ si_rlc_stop(rdev);
+
+ WREG32(RLC_RL_BASE, 0);
+ WREG32(RLC_RL_SIZE, 0);
+ WREG32(RLC_LB_CNTL, 0);
+ WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
+ WREG32(RLC_LB_CNTR_INIT, 0);
+
+ WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+ WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
+
+ WREG32(RLC_MC_CNTL, 0);
+ WREG32(RLC_UCODE_CNTL, 0);
+
+ fw_data = (const __be32 *)rdev->rlc_fw->data;
+ for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ WREG32(RLC_UCODE_ADDR, 0);
+
+ si_rlc_start(rdev);
+
+ return 0;
+}
+
+static void si_enable_interrupts(struct radeon_device *rdev)
+{
+ u32 ih_cntl = RREG32(IH_CNTL);
+ u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+ ih_cntl |= ENABLE_INTR;
+ ih_rb_cntl |= IH_RB_ENABLE;
+ WREG32(IH_CNTL, ih_cntl);
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ rdev->ih.enabled = true;
+}
+
+static void si_disable_interrupts(struct radeon_device *rdev)
+{
+ u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+ u32 ih_cntl = RREG32(IH_CNTL);
+
+ ih_rb_cntl &= ~IH_RB_ENABLE;
+ ih_cntl &= ~ENABLE_INTR;
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ WREG32(IH_CNTL, ih_cntl);
+ /* set rptr, wptr to 0 */
+ WREG32(IH_RB_RPTR, 0);
+ WREG32(IH_RB_WPTR, 0);
+ rdev->ih.enabled = false;
+ rdev->ih.rptr = 0;
+}
+
+static void si_disable_interrupt_state(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ WREG32(CP_INT_CNTL_RING1, 0);
+ WREG32(CP_INT_CNTL_RING2, 0);
+ tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ WREG32(GRBM_INT_CNTL, 0);
+ if (rdev->num_crtc >= 2) {
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 4) {
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
+
+ if (rdev->num_crtc >= 2) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 4) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
+
+ if (!ASIC_IS_NODCE(rdev)) {
+ WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+
+ tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+}
+
+static int si_irq_init(struct radeon_device *rdev)
+{
+ int ret = 0;
+ int rb_bufsz;
+ u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+
+ /* allocate ring */
+ ret = r600_ih_ring_alloc(rdev);
+ if (ret)
+ return ret;
+
+ /* disable irqs */
+ si_disable_interrupts(rdev);
+
+ /* init rlc */
+ ret = si_rlc_resume(rdev);
+ if (ret) {
+ r600_ih_ring_fini(rdev);
+ return ret;
+ }
+
+ /* setup interrupt control */
+ /* set dummy read address to ring address */
+ WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ interrupt_cntl = RREG32(INTERRUPT_CNTL);
+ /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+ * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+ */
+ interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+ /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
+ interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+ WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+ WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
+ rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+
+ ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
+ IH_WPTR_OVERFLOW_CLEAR |
+ (rb_bufsz << 1));
+
+ if (rdev->wb.enabled)
+ ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
+
+ /* set the writeback address whether it's enabled or not */
+ WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
+
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+
+ /* set rptr, wptr to 0 */
+ WREG32(IH_RB_RPTR, 0);
+ WREG32(IH_RB_WPTR, 0);
+
+ /* Default settings for IH_CNTL (disabled at first) */
+ ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
+ /* RPTR_REARM only works if msi's are enabled */
+ if (rdev->msi_enabled)
+ ih_cntl |= RPTR_REARM;
+ WREG32(IH_CNTL, ih_cntl);
+
+ /* force the active interrupt state to all disabled */
+ si_disable_interrupt_state(rdev);
+
+ pci_set_master(rdev->pdev);
+
+ /* enable irqs */
+ si_enable_interrupts(rdev);
+
+ return ret;
+}
+
+int si_irq_set(struct radeon_device *rdev)
+{
+ u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
+ u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
+ u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+ u32 grbm_int_cntl = 0;
+ u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+ u32 dma_cntl, dma_cntl1;
+
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+ return -EINVAL;
+ }
+ /* don't enable anything if the ih is disabled */
+ if (!rdev->ih.enabled) {
+ si_disable_interrupts(rdev);
+ /* force the active interrupt state to all disabled */
+ si_disable_interrupt_state(rdev);
+ return 0;
+ }
+
+ if (!ASIC_IS_NODCE(rdev)) {
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ }
+
+ dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+ dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+
+ /* enable CP interrupts on all rings */
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+ DRM_DEBUG("si_irq_set: sw int gfx\n");
+ cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+ }
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
+ DRM_DEBUG("si_irq_set: sw int cp1\n");
+ cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
+ }
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
+ DRM_DEBUG("si_irq_set: sw int cp2\n");
+ cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
+ }
+ if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+ DRM_DEBUG("si_irq_set: sw int dma\n");
+ dma_cntl |= TRAP_ENABLE;
+ }
+
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+ DRM_DEBUG("si_irq_set: sw int dma1\n");
+ dma_cntl1 |= TRAP_ENABLE;
+ }
+ if (rdev->irq.crtc_vblank_int[0] ||
+ atomic_read(&rdev->irq.pflip[0])) {
+ DRM_DEBUG("si_irq_set: vblank 0\n");
+ crtc1 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[1] ||
+ atomic_read(&rdev->irq.pflip[1])) {
+ DRM_DEBUG("si_irq_set: vblank 1\n");
+ crtc2 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[2] ||
+ atomic_read(&rdev->irq.pflip[2])) {
+ DRM_DEBUG("si_irq_set: vblank 2\n");
+ crtc3 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[3] ||
+ atomic_read(&rdev->irq.pflip[3])) {
+ DRM_DEBUG("si_irq_set: vblank 3\n");
+ crtc4 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[4] ||
+ atomic_read(&rdev->irq.pflip[4])) {
+ DRM_DEBUG("si_irq_set: vblank 4\n");
+ crtc5 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[5] ||
+ atomic_read(&rdev->irq.pflip[5])) {
+ DRM_DEBUG("si_irq_set: vblank 5\n");
+ crtc6 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.hpd[0]) {
+ DRM_DEBUG("si_irq_set: hpd 1\n");
+ hpd1 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[1]) {
+ DRM_DEBUG("si_irq_set: hpd 2\n");
+ hpd2 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[2]) {
+ DRM_DEBUG("si_irq_set: hpd 3\n");
+ hpd3 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[3]) {
+ DRM_DEBUG("si_irq_set: hpd 4\n");
+ hpd4 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[4]) {
+ DRM_DEBUG("si_irq_set: hpd 5\n");
+ hpd5 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[5]) {
+ DRM_DEBUG("si_irq_set: hpd 6\n");
+ hpd6 |= DC_HPDx_INT_EN;
+ }
+
+ WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
+ WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
+
+ WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
+ WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
+
+ WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+
+ if (rdev->num_crtc >= 2) {
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+ }
+ if (rdev->num_crtc >= 4) {
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+ }
+
+ if (rdev->num_crtc >= 2) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+ }
+ if (rdev->num_crtc >= 4) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+ }
+
+ if (!ASIC_IS_NODCE(rdev)) {
+ WREG32(DC_HPD1_INT_CONTROL, hpd1);
+ WREG32(DC_HPD2_INT_CONTROL, hpd2);
+ WREG32(DC_HPD3_INT_CONTROL, hpd3);
+ WREG32(DC_HPD4_INT_CONTROL, hpd4);
+ WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ }
+
+ return 0;
+}
+
+static inline void si_irq_ack(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (ASIC_IS_NODCE(rdev))
+ return;
+
+ rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+ rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ if (rdev->num_crtc >= 4) {
+ rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ }
+ if (rdev->num_crtc >= 6) {
+ rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ }
+
+ if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
+
+ if (rdev->num_crtc >= 4) {
+ if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+ }
+
+ if (rdev->num_crtc >= 6) {
+ if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+ }
+
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+}
+
+static void si_irq_disable(struct radeon_device *rdev)
+{
+ si_disable_interrupts(rdev);
+ /* Wait and acknowledge irq */
+ mdelay(1);
+ si_irq_ack(rdev);
+ si_disable_interrupt_state(rdev);
+}
+
+static void si_irq_suspend(struct radeon_device *rdev)
+{
+ si_irq_disable(rdev);
+ si_rlc_stop(rdev);
+}
+
+static void si_irq_fini(struct radeon_device *rdev)
+{
+ si_irq_suspend(rdev);
+ r600_ih_ring_fini(rdev);
+}
+
+static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
+{
+ u32 wptr, tmp;
+
+ if (rdev->wb.enabled)
+ wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
+ else
+ wptr = RREG32(IH_RB_WPTR);
+
+ if (wptr & RB_OVERFLOW) {
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+ tmp = RREG32(IH_RB_CNTL);
+ tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ WREG32(IH_RB_CNTL, tmp);
+ }
+ return (wptr & rdev->ih.ptr_mask);
+}
+
+/* SI IV Ring
+ * Each IV ring entry is 128 bits:
+ * [7:0] - interrupt source id
+ * [31:8] - reserved
+ * [59:32] - interrupt source data
+ * [63:60] - reserved
+ * [71:64] - RINGID
+ * [79:72] - VMID
+ * [127:80] - reserved
+ */
+int si_irq_process(struct radeon_device *rdev)
+{
+ u32 wptr;
+ u32 rptr;
+ u32 src_id, src_data, ring_id;
+ u32 ring_index;
+ bool queue_hotplug = false;
+
+ if (!rdev->ih.enabled || rdev->shutdown)
+ return IRQ_NONE;
+
+ wptr = si_get_ih_wptr(rdev);
+
+restart_ih:
+ /* is somebody else already processing irqs? */
+ if (atomic_xchg(&rdev->ih.lock, 1))
+ return IRQ_NONE;
+
+ rptr = rdev->ih.rptr;
+ DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+ /* Order reading of wptr vs. reading of IH ring data */
+ rmb();
+
+ /* display interrupts */
+ si_irq_ack(rdev);
+
+ while (rptr != wptr) {
+ /* wptr/rptr are in bytes! */
+ ring_index = rptr / 4;
+ src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+ src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
+ ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
+
+ switch (src_id) {
+ case 1: /* D1 vblank/vline */
+ switch (src_data) {
+ case 0: /* D1 vblank */
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_flip(rdev, 0);
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+ }
+ break;
+ case 1: /* D1 vline */
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 2: /* D2 vblank/vline */
+ switch (src_data) {
+ case 0: /* D2 vblank */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_flip(rdev, 1);
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+ }
+ break;
+ case 1: /* D2 vline */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 3: /* D3 vblank/vline */
+ switch (src_data) {
+ case 0: /* D3 vblank */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[2]))
+ radeon_crtc_handle_flip(rdev, 2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+ }
+ break;
+ case 1: /* D3 vline */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 4: /* D4 vblank/vline */
+ switch (src_data) {
+ case 0: /* D4 vblank */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[3]))
+ radeon_crtc_handle_flip(rdev, 3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+ }
+ break;
+ case 1: /* D4 vline */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 5: /* D5 vblank/vline */
+ switch (src_data) {
+ case 0: /* D5 vblank */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[4]))
+ radeon_crtc_handle_flip(rdev, 4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+ }
+ break;
+ case 1: /* D5 vline */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 6: /* D6 vblank/vline */
+ switch (src_data) {
+ case 0: /* D6 vblank */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[5]))
+ radeon_crtc_handle_flip(rdev, 5);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+ }
+ break;
+ case 1: /* D6 vline */
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 42: /* HPD hotplug */
+ switch (src_data) {
+ case 0:
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+ }
+ break;
+ case 1:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+ }
+ break;
+ case 2:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+ }
+ break;
+ case 3:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+ }
+ break;
+ case 4:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+ }
+ break;
+ case 5:
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 146:
+ case 147:
+ dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+ break;
+ case 176: /* RINGID0 CP_INT */
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ break;
+ case 177: /* RINGID1 CP_INT */
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+ break;
+ case 178: /* RINGID2 CP_INT */
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+ break;
+ case 181: /* CP EOP event */
+ DRM_DEBUG("IH: CP EOP\n");
+ switch (ring_id) {
+ case 0:
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ break;
+ case 1:
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+ break;
+ case 2:
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+ break;
+ }
+ break;
+ case 224: /* DMA trap event */
+ DRM_DEBUG("IH: DMA trap\n");
+ radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+ break;
+ case 233: /* GUI IDLE */
+ DRM_DEBUG("IH: GUI idle\n");
+ break;
+ case 244: /* DMA trap event */
+ DRM_DEBUG("IH: DMA1 trap\n");
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+
+ /* wptr/rptr are in bytes! */
+ rptr += 16;
+ rptr &= rdev->ih.ptr_mask;
+ }
+ if (queue_hotplug)
+ schedule_work(&rdev->hotplug_work);
+ rdev->ih.rptr = rptr;
+ WREG32(IH_RB_RPTR, rdev->ih.rptr);
+ atomic_set(&rdev->ih.lock, 0);
+
+ /* make sure wptr hasn't changed while processing */
+ wptr = si_get_ih_wptr(rdev);
+ if (wptr != rptr)
+ goto restart_ih;
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * si_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (SI).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int si_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_bytes, cur_size_in_bytes;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+ num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_bytes = size_in_bytes;
+ if (cur_size_in_bytes > 0xFFFFF)
+ cur_size_in_bytes = 0xFFFFF;
+ size_in_bytes -= cur_size_in_bytes;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
+ radeon_ring_write(ring, dst_offset & 0xffffffff);
+ radeon_ring_write(ring, src_offset & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
+/*
+ * startup/shutdown callbacks
+ */
+static int si_startup(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ int r;
+
+ si_mc_program(rdev);
+
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+ !rdev->rlc_fw || !rdev->mc_fw) {
+ r = si_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
+ r = si_mc_load_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load MC firmware!\n");
+ return r;
+ }
+
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
+ r = si_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ si_gpu_init(rdev);
+
+ /* allocate rlc buffers */
+ r = si_rlc_init(rdev);
+ if (r) {
+ DRM_ERROR("Failed to init rlc BOs!\n");
+ return r;
+ }
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
+ }
+
+ if (rdev->has_uvd) {
+ r = rv770_uvd_resume(rdev);
+ if (!r) {
+ r = radeon_fence_driver_start_ring(rdev,
+ R600_RING_TYPE_UVD_INDEX);
+ if (r)
+ dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ }
+ if (r)
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+ }
+
+ /* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
+ r = si_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
+ return r;
+ }
+ si_irq_set(rdev);
+
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ CP_RB0_RPTR, CP_RB0_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (r)
+ return r;
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
+ CP_RB1_RPTR, CP_RB1_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (r)
+ return r;
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
+ CP_RB2_RPTR, CP_RB2_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (r)
+ return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ if (r)
+ return r;
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+ DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ if (r)
+ return r;
+
+ r = si_cp_load_microcode(rdev);
+ if (r)
+ return r;
+ r = si_cp_resume(rdev);
+ if (r)
+ return r;
+
+ r = cayman_dma_resume(rdev);
+ if (r)
+ return r;
+
+ if (rdev->has_uvd) {
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ if (ring->ring_size) {
+ r = radeon_ring_init(rdev, ring, ring->ring_size,
+ R600_WB_UVD_RPTR_OFFSET,
+ UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (!r)
+ r = r600_uvd_init(rdev);
+ if (r)
+ DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+ }
+ }
+
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_vm_manager_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
+ return r;
+ }
+
+ return 0;
+}
+
+int si_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+ * posting will perform necessary task to bring back GPU into good
+ * shape.
+ */
+ /* post card */
+ atom_asic_init(rdev->mode_info.atom_context);
+
+ /* init golden registers */
+ si_init_golden_registers(rdev);
+
+ rdev->accel_working = true;
+ r = si_startup(rdev);
+ if (r) {
+ DRM_ERROR("si startup failed on resume\n");
+ rdev->accel_working = false;
+ return r;
+ }
+
+ return r;
+
+}
+
+int si_suspend(struct radeon_device *rdev)
+{
+ radeon_vm_manager_fini(rdev);
+ si_cp_enable(rdev, false);
+ cayman_dma_stop(rdev);
+ if (rdev->has_uvd) {
+ r600_uvd_stop(rdev);
+ radeon_uvd_suspend(rdev);
+ }
+ si_irq_suspend(rdev);
+ radeon_wb_disable(rdev);
+ si_pcie_gart_disable(rdev);
+ return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int si_init(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ int r;
+
+ /* Read BIOS */
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ /* Must be an ATOMBIOS */
+ if (!rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
+ return -EINVAL;
+ }
+ r = radeon_atombios_init(rdev);
+ if (r)
+ return r;
+
+ /* Post card if necessary */
+ if (!radeon_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
+ DRM_INFO("GPU not posted. posting now...\n");
+ atom_asic_init(rdev->mode_info.atom_context);
+ }
+ /* init golden registers */
+ si_init_golden_registers(rdev);
+ /* Initialize scratch registers */
+ si_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+
+ /* initialize memory controller */
+ r = si_mc_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 1024 * 1024);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 1024 * 1024);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 1024 * 1024);
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 64 * 1024);
+
+ if (rdev->has_uvd) {
+ r = radeon_uvd_init(rdev);
+ if (!r) {
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 4096);
+ }
+ }
+
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
+
+ r = r600_pcie_gart_init(rdev);
+ if (r)
+ return r;
+
+ rdev->accel_working = true;
+ r = si_startup(rdev);
+ if (r) {
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ si_cp_fini(rdev);
+ cayman_dma_fini(rdev);
+ si_irq_fini(rdev);
+ si_rlc_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_vm_manager_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ si_pcie_gart_fini(rdev);
+ rdev->accel_working = false;
+ }
+
+ /* Don't start up if the MC ucode is missing.
+ * The default clocks and voltages before the MC ucode
+ * is loaded are not suffient for advanced operations.
+ */
+ if (!rdev->mc_fw) {
+ DRM_ERROR("radeon: MC ucode required for NI+.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void si_fini(struct radeon_device *rdev)
+{
+ si_cp_fini(rdev);
+ cayman_dma_fini(rdev);
+ si_irq_fini(rdev);
+ si_rlc_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_vm_manager_fini(rdev);
+ radeon_ib_pool_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ if (rdev->has_uvd) {
+ r600_uvd_stop(rdev);
+ radeon_uvd_fini(rdev);
+ }
+ si_pcie_gart_fini(rdev);
+ r600_vram_scratch_fini(rdev);
+ radeon_gem_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_bo_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
+/**
+ * si_get_gpu_clock_counter - return GPU clock counter snapshot
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (SI).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
+{
+ uint64_t clock;
+
+ mutex_lock(&rdev->gpu_clock_mutex);
+ WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ mutex_unlock(&rdev->gpu_clock_mutex);
+ return clock;
+}
+
+int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+ unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+ int r;
+
+ /* bypass vclk and dclk with bclk */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ /* put PLL in bypass mode */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+
+ if (!vclk || !dclk) {
+ /* keep the Bypass mode, put PLL to sleep */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ return 0;
+ }
+
+ r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
+ 16384, 0x03FFFFFF, 0, 128, 5,
+ &fb_div, &vclk_div, &dclk_div);
+ if (r)
+ return r;
+
+ /* set RESET_ANTI_MUX to 0 */
+ WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
+
+ /* set VCO_MODE to 1 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
+
+ /* toggle UPLL_SLEEP to 1 then back to 0 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
+
+ /* deassert UPLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+ mdelay(1);
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* assert UPLL_RESET again */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+ /* disable spread spectrum. */
+ WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
+
+ /* set feedback divider */
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
+
+ /* set ref divider to 0 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
+
+ if (fb_div < 307200)
+ WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
+ else
+ WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
+
+ /* set PDIV_A and PDIV_B */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
+ ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
+
+ /* give the PLL some time to settle */
+ mdelay(15);
+
+ /* deassert PLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+ mdelay(15);
+
+ /* switch from bypass mode to normal mode */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* switch VCLK and DCLK selection */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ mdelay(100);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/si_blit_shaders.c b/drivers/gpu/drm/radeon/si_blit_shaders.c
new file mode 100644
index 0000000..ec415e7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_blit_shaders.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+const u32 si_default_state[] =
+{
+ 0xc0066900,
+ 0x00000000,
+ 0x00000060, /* DB_RENDER_CONTROL */
+ 0x00000000, /* DB_COUNT_CONTROL */
+ 0x00000000, /* DB_DEPTH_VIEW */
+ 0x0000002a, /* DB_RENDER_OVERRIDE */
+ 0x00000000, /* DB_RENDER_OVERRIDE2 */
+ 0x00000000, /* DB_HTILE_DATA_BASE */
+
+ 0xc0046900,
+ 0x00000008,
+ 0x00000000, /* DB_DEPTH_BOUNDS_MIN */
+ 0x00000000, /* DB_DEPTH_BOUNDS_MAX */
+ 0x00000000, /* DB_STENCIL_CLEAR */
+ 0x00000000, /* DB_DEPTH_CLEAR */
+
+ 0xc0036900,
+ 0x0000000f,
+ 0x00000000, /* DB_DEPTH_INFO */
+ 0x00000000, /* DB_Z_INFO */
+ 0x00000000, /* DB_STENCIL_INFO */
+
+ 0xc0016900,
+ 0x00000080,
+ 0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+ 0xc00d6900,
+ 0x00000083,
+ 0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+ 0x00000000, /* PA_SC_CLIPRECT_0_TL */
+ 0x20002000, /* PA_SC_CLIPRECT_0_BR */
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0xaaaaaaaa, /* PA_SC_EDGERULE */
+ 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+ 0x0000000f, /* CB_TARGET_MASK */
+ 0x0000000f, /* CB_SHADER_MASK */
+
+ 0xc0226900,
+ 0x00000094,
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+ 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+ 0xc0026900,
+ 0x000000d9,
+ 0x00000000, /* CP_RINGID */
+ 0x00000000, /* CP_VMID */
+
+ 0xc0046900,
+ 0x00000100,
+ 0xffffffff, /* VGT_MAX_VTX_INDX */
+ 0x00000000, /* VGT_MIN_VTX_INDX */
+ 0x00000000, /* VGT_INDX_OFFSET */
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+
+ 0xc0046900,
+ 0x00000105,
+ 0x00000000, /* CB_BLEND_RED */
+ 0x00000000, /* CB_BLEND_GREEN */
+ 0x00000000, /* CB_BLEND_BLUE */
+ 0x00000000, /* CB_BLEND_ALPHA */
+
+ 0xc0016900,
+ 0x000001e0,
+ 0x00000000, /* CB_BLEND0_CONTROL */
+
+ 0xc00e6900,
+ 0x00000200,
+ 0x00000000, /* DB_DEPTH_CONTROL */
+ 0x00000000, /* DB_EQAA */
+ 0x00cc0010, /* CB_COLOR_CONTROL */
+ 0x00000210, /* DB_SHADER_CONTROL */
+ 0x00010000, /* PA_CL_CLIP_CNTL */
+ 0x00000004, /* PA_SU_SC_MODE_CNTL */
+ 0x00000100, /* PA_CL_VTE_CNTL */
+ 0x00000000, /* PA_CL_VS_OUT_CNTL */
+ 0x00000000, /* PA_CL_NANINF_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+ 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+ 0x00000000, /* */
+ 0x00000000, /* */
+
+ 0xc0116900,
+ 0x00000280,
+ 0x00000000, /* PA_SU_POINT_SIZE */
+ 0x00000000, /* PA_SU_POINT_MINMAX */
+ 0x00000008, /* PA_SU_LINE_CNTL */
+ 0x00000000, /* PA_SC_LINE_STIPPLE */
+ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+ 0x00000000, /* VGT_HOS_CNTL */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* VGT_GS_MODE */
+
+ 0xc0026900,
+ 0x00000292,
+ 0x00000000, /* PA_SC_MODE_CNTL_0 */
+ 0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+ 0xc0016900,
+ 0x000002a1,
+ 0x00000000, /* VGT_PRIMITIVEID_EN */
+
+ 0xc0016900,
+ 0x000002a5,
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+ 0xc0026900,
+ 0x000002a8,
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+ 0x00000000,
+
+ 0xc0026900,
+ 0x000002ad,
+ 0x00000000, /* VGT_REUSE_OFF */
+ 0x00000000,
+
+ 0xc0016900,
+ 0x000002d5,
+ 0x00000000, /* VGT_SHADER_STAGES_EN */
+
+ 0xc0016900,
+ 0x000002dc,
+ 0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+ 0xc0066900,
+ 0x000002de,
+ 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0026900,
+ 0x000002e5,
+ 0x00000000, /* VGT_STRMOUT_CONFIG */
+ 0x00000000,
+
+ 0xc01b6900,
+ 0x000002f5,
+ 0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
+ 0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
+ 0x00000000, /* PA_SC_LINE_CNTL */
+ 0x00000000, /* PA_SC_AA_CONFIG */
+ 0x00000005, /* PA_SU_VTX_CNTL */
+ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
+ 0xffffffff,
+
+ 0xc0026900,
+ 0x00000316,
+ 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ 0x00000010, /* */
+};
+
+const u32 si_default_size = ARRAY_SIZE(si_default_state);
diff --git a/drivers/gpu/drm/radeon/si_blit_shaders.h b/drivers/gpu/drm/radeon/si_blit_shaders.h
new file mode 100644
index 0000000..c739e51
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_blit_shaders.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SI_BLIT_SHADERS_H
+#define SI_BLIT_SHADERS_H
+
+extern const u32 si_default_state[];
+
+extern const u32 si_default_size;
+
+#endif
diff --git a/drivers/gpu/drm/radeon/si_reg.h b/drivers/gpu/drm/radeon/si_reg.h
new file mode 100644
index 0000000..501f9d4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_reg.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __SI_REG_H__
+#define __SI_REG_H__
+
+/* SI */
+#define SI_DC_GPIO_HPD_MASK 0x65b0
+#define SI_DC_GPIO_HPD_A 0x65b4
+#define SI_DC_GPIO_HPD_EN 0x65b8
+#define SI_DC_GPIO_HPD_Y 0x65bc
+
+#define SI_GRPH_CONTROL 0x6804
+# define SI_GRPH_DEPTH(x) (((x) & 0x3) << 0)
+# define SI_GRPH_DEPTH_8BPP 0
+# define SI_GRPH_DEPTH_16BPP 1
+# define SI_GRPH_DEPTH_32BPP 2
+# define SI_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+# define SI_ADDR_SURF_2_BANK 0
+# define SI_ADDR_SURF_4_BANK 1
+# define SI_ADDR_SURF_8_BANK 2
+# define SI_ADDR_SURF_16_BANK 3
+# define SI_GRPH_Z(x) (((x) & 0x3) << 4)
+# define SI_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+# define SI_ADDR_SURF_BANK_WIDTH_1 0
+# define SI_ADDR_SURF_BANK_WIDTH_2 1
+# define SI_ADDR_SURF_BANK_WIDTH_4 2
+# define SI_ADDR_SURF_BANK_WIDTH_8 3
+# define SI_GRPH_FORMAT(x) (((x) & 0x7) << 8)
+/* 8 BPP */
+# define SI_GRPH_FORMAT_INDEXED 0
+/* 16 BPP */
+# define SI_GRPH_FORMAT_ARGB1555 0
+# define SI_GRPH_FORMAT_ARGB565 1
+# define SI_GRPH_FORMAT_ARGB4444 2
+# define SI_GRPH_FORMAT_AI88 3
+# define SI_GRPH_FORMAT_MONO16 4
+# define SI_GRPH_FORMAT_BGRA5551 5
+/* 32 BPP */
+# define SI_GRPH_FORMAT_ARGB8888 0
+# define SI_GRPH_FORMAT_ARGB2101010 1
+# define SI_GRPH_FORMAT_32BPP_DIG 2
+# define SI_GRPH_FORMAT_8B_ARGB2101010 3
+# define SI_GRPH_FORMAT_BGRA1010102 4
+# define SI_GRPH_FORMAT_8B_BGRA1010102 5
+# define SI_GRPH_FORMAT_RGB111110 6
+# define SI_GRPH_FORMAT_BGR101111 7
+# define SI_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+# define SI_ADDR_SURF_BANK_HEIGHT_1 0
+# define SI_ADDR_SURF_BANK_HEIGHT_2 1
+# define SI_ADDR_SURF_BANK_HEIGHT_4 2
+# define SI_ADDR_SURF_BANK_HEIGHT_8 3
+# define SI_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+# define SI_ADDR_SURF_TILE_SPLIT_64B 0
+# define SI_ADDR_SURF_TILE_SPLIT_128B 1
+# define SI_ADDR_SURF_TILE_SPLIT_256B 2
+# define SI_ADDR_SURF_TILE_SPLIT_512B 3
+# define SI_ADDR_SURF_TILE_SPLIT_1KB 4
+# define SI_ADDR_SURF_TILE_SPLIT_2KB 5
+# define SI_ADDR_SURF_TILE_SPLIT_4KB 6
+# define SI_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_8 3
+# define SI_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
+# define SI_GRPH_ARRAY_LINEAR_GENERAL 0
+# define SI_GRPH_ARRAY_LINEAR_ALIGNED 1
+# define SI_GRPH_ARRAY_1D_TILED_THIN1 2
+# define SI_GRPH_ARRAY_2D_TILED_THIN1 4
+# define SI_GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
+# define SI_ADDR_SURF_P2 0
+# define SI_ADDR_SURF_P4_8x16 4
+# define SI_ADDR_SURF_P4_16x16 5
+# define SI_ADDR_SURF_P4_16x32 6
+# define SI_ADDR_SURF_P4_32x32 7
+# define SI_ADDR_SURF_P8_16x16_8x16 8
+# define SI_ADDR_SURF_P8_16x32_8x16 9
+# define SI_ADDR_SURF_P8_32x32_8x16 10
+# define SI_ADDR_SURF_P8_16x32_16x16 11
+# define SI_ADDR_SURF_P8_32x32_16x16 12
+# define SI_ADDR_SURF_P8_32x32_16x32 13
+# define SI_ADDR_SURF_P8_32x64_32x32 14
+
+#endif
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
new file mode 100644
index 0000000..495f41f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -0,0 +1,1115 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef SI_H
+#define SI_H
+
+#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
+
+/* discrete uvd clocks */
+#define CG_UPLL_FUNC_CNTL 0x634
+# define UPLL_RESET_MASK 0x00000001
+# define UPLL_SLEEP_MASK 0x00000002
+# define UPLL_BYPASS_EN_MASK 0x00000004
+# define UPLL_CTLREQ_MASK 0x00000008
+# define UPLL_VCO_MODE_MASK 0x00000600
+# define UPLL_REF_DIV_MASK 0x003F0000
+# define UPLL_CTLACK_MASK 0x40000000
+# define UPLL_CTLACK2_MASK 0x80000000
+#define CG_UPLL_FUNC_CNTL_2 0x638
+# define UPLL_PDIV_A(x) ((x) << 0)
+# define UPLL_PDIV_A_MASK 0x0000007F
+# define UPLL_PDIV_B(x) ((x) << 8)
+# define UPLL_PDIV_B_MASK 0x00007F00
+# define VCLK_SRC_SEL(x) ((x) << 20)
+# define VCLK_SRC_SEL_MASK 0x01F00000
+# define DCLK_SRC_SEL(x) ((x) << 25)
+# define DCLK_SRC_SEL_MASK 0x3E000000
+#define CG_UPLL_FUNC_CNTL_3 0x63C
+# define UPLL_FB_DIV(x) ((x) << 0)
+# define UPLL_FB_DIV_MASK 0x01FFFFFF
+#define CG_UPLL_FUNC_CNTL_4 0x644
+# define UPLL_SPARE_ISPARE9 0x00020000
+#define CG_UPLL_FUNC_CNTL_5 0x648
+# define RESET_ANTI_MUX_MASK 0x00000200
+#define CG_UPLL_SPREAD_SPECTRUM 0x650
+# define SSEN_MASK 0x00000001
+
+#define CG_MULT_THERMAL_STATUS 0x714
+#define ASIC_MAX_TEMP(x) ((x) << 0)
+#define ASIC_MAX_TEMP_MASK 0x000001ff
+#define ASIC_MAX_TEMP_SHIFT 0
+#define CTF_TEMP(x) ((x) << 9)
+#define CTF_TEMP_MASK 0x0003fe00
+#define CTF_TEMP_SHIFT 9
+
+#define SI_MAX_SH_GPRS 256
+#define SI_MAX_TEMP_GPRS 16
+#define SI_MAX_SH_THREADS 256
+#define SI_MAX_SH_STACK_ENTRIES 4096
+#define SI_MAX_FRC_EOV_CNT 16384
+#define SI_MAX_BACKENDS 8
+#define SI_MAX_BACKENDS_MASK 0xFF
+#define SI_MAX_BACKENDS_PER_SE_MASK 0x0F
+#define SI_MAX_SIMDS 12
+#define SI_MAX_SIMDS_MASK 0x0FFF
+#define SI_MAX_SIMDS_PER_SE_MASK 0x00FF
+#define SI_MAX_PIPES 8
+#define SI_MAX_PIPES_MASK 0xFF
+#define SI_MAX_PIPES_PER_SIMD_MASK 0x3F
+#define SI_MAX_LDS_NUM 0xFFFF
+#define SI_MAX_TCC 16
+#define SI_MAX_TCC_MASK 0xFFFF
+
+#define VGA_HDP_CONTROL 0x328
+#define VGA_MEMORY_DISABLE (1 << 4)
+
+#define CG_CLKPIN_CNTL 0x660
+# define XTALIN_DIVIDE (1 << 1)
+#define CG_CLKPIN_CNTL_2 0x664
+# define MUX_TCLK_TO_XCLK (1 << 8)
+
+#define DMIF_ADDR_CONFIG 0xBD4
+
+#define DMIF_ADDR_CALC 0xC00
+
+#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
+# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
+# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
+
+#define SRBM_STATUS 0xE50
+#define GRBM_RQ_PENDING (1 << 5)
+#define VMC_BUSY (1 << 8)
+#define MCB_BUSY (1 << 9)
+#define MCB_NON_DISPLAY_BUSY (1 << 10)
+#define MCC_BUSY (1 << 11)
+#define MCD_BUSY (1 << 12)
+#define SEM_BUSY (1 << 14)
+#define IH_BUSY (1 << 17)
+
+#define SRBM_SOFT_RESET 0x0E60
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_DMA1 (1 << 6)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_DMA (1 << 20)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
+
+#define CC_SYS_RB_BACKEND_DISABLE 0xe80
+#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
+
+#define SRBM_STATUS2 0x0EC4
+#define DMA_BUSY (1 << 5)
+#define DMA1_BUSY (1 << 6)
+
+#define VM_L2_CNTL 0x1400
+#define ENABLE_L2_CACHE (1 << 0)
+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
+#define L2_CACHE_PTE_ENDIAN_SWAP_MODE(x) ((x) << 2)
+#define L2_CACHE_PDE_ENDIAN_SWAP_MODE(x) ((x) << 4)
+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
+#define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10)
+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 15)
+#define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 19)
+#define VM_L2_CNTL2 0x1404
+#define INVALIDATE_ALL_L1_TLBS (1 << 0)
+#define INVALIDATE_L2_CACHE (1 << 1)
+#define INVALIDATE_CACHE_MODE(x) ((x) << 26)
+#define INVALIDATE_PTE_AND_PDE_CACHES 0
+#define INVALIDATE_ONLY_PTE_CACHES 1
+#define INVALIDATE_ONLY_PDE_CACHES 2
+#define VM_L2_CNTL3 0x1408
+#define BANK_SELECT(x) ((x) << 0)
+#define L2_CACHE_UPDATE_MODE(x) ((x) << 6)
+#define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15)
+#define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20)
+#define VM_L2_STATUS 0x140C
+#define L2_BUSY (1 << 0)
+#define VM_CONTEXT0_CNTL 0x1410
+#define ENABLE_CONTEXT (1 << 0)
+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
+#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
+#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
+#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
+#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
+#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
+#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
+#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
+#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
+#define VM_CONTEXT1_CNTL 0x1414
+#define VM_CONTEXT0_CNTL2 0x1430
+#define VM_CONTEXT1_CNTL2 0x1434
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR 0x1438
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR 0x143c
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR 0x1440
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR 0x1444
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR 0x1448
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR 0x144c
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x1450
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x1454
+
+#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
+
+#define VM_INVALIDATE_REQUEST 0x1478
+#define VM_INVALIDATE_RESPONSE 0x147c
+
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c
+
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153c
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR 0x1540
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR 0x1544
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR 0x1548
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR 0x154c
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR 0x1550
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR 0x1554
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR 0x1558
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155c
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR 0x1560
+
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580
+
+#define MC_SHARED_CHMAP 0x2004
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x0000f000
+#define MC_SHARED_CHREMAP 0x2008
+
+#define MC_VM_FB_LOCATION 0x2024
+#define MC_VM_AGP_TOP 0x2028
+#define MC_VM_AGP_BOT 0x202C
+#define MC_VM_AGP_BASE 0x2030
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
+
+#define MC_VM_MX_L1_TLB_CNTL 0x2064
+#define ENABLE_L1_TLB (1 << 0)
+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
+#define ENABLE_ADVANCED_DRIVER_MODEL (1 << 6)
+
+#define MC_SHARED_BLACKOUT_CNTL 0x20ac
+
+#define MC_ARB_RAMCFG 0x2760
+#define NOOFBANK_SHIFT 0
+#define NOOFBANK_MASK 0x00000003
+#define NOOFRANK_SHIFT 2
+#define NOOFRANK_MASK 0x00000004
+#define NOOFROWS_SHIFT 3
+#define NOOFROWS_MASK 0x00000038
+#define NOOFCOLS_SHIFT 6
+#define NOOFCOLS_MASK 0x000000C0
+#define CHANSIZE_SHIFT 8
+#define CHANSIZE_MASK 0x00000100
+#define CHANSIZE_OVERRIDE (1 << 11)
+#define NOOFGROUPS_SHIFT 12
+#define NOOFGROUPS_MASK 0x00001000
+
+#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x2808
+#define TRAIN_DONE_D0 (1 << 30)
+#define TRAIN_DONE_D1 (1 << 31)
+
+#define MC_SEQ_SUP_CNTL 0x28c8
+#define RUN_MASK (1 << 0)
+#define MC_SEQ_SUP_PGM 0x28cc
+
+#define MC_IO_PAD_CNTL_D0 0x29d0
+#define MEM_FALL_OUT_CMD (1 << 8)
+
+#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
+#define MC_SEQ_IO_DEBUG_DATA 0x2a48
+
+#define HDP_HOST_PATH_CNTL 0x2C00
+#define HDP_NONSURFACE_BASE 0x2C04
+#define HDP_NONSURFACE_INFO 0x2C08
+#define HDP_NONSURFACE_SIZE 0x2C0C
+
+#define HDP_ADDR_CONFIG 0x2F48
+#define HDP_MISC_CNTL 0x2F4C
+#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
+
+#define IH_RB_CNTL 0x3e00
+# define IH_RB_ENABLE (1 << 0)
+# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
+# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
+# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
+# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
+# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
+#define IH_RB_BASE 0x3e04
+#define IH_RB_RPTR 0x3e08
+#define IH_RB_WPTR 0x3e0c
+# define RB_OVERFLOW (1 << 0)
+# define WPTR_OFFSET_MASK 0x3fffc
+#define IH_RB_WPTR_ADDR_HI 0x3e10
+#define IH_RB_WPTR_ADDR_LO 0x3e14
+#define IH_CNTL 0x3e18
+# define ENABLE_INTR (1 << 0)
+# define IH_MC_SWAP(x) ((x) << 1)
+# define IH_MC_SWAP_NONE 0
+# define IH_MC_SWAP_16BIT 1
+# define IH_MC_SWAP_32BIT 2
+# define IH_MC_SWAP_64BIT 3
+# define RPTR_REARM (1 << 4)
+# define MC_WRREQ_CREDIT(x) ((x) << 15)
+# define MC_WR_CLEAN_CNT(x) ((x) << 20)
+# define MC_VMID(x) ((x) << 25)
+
+#define CONFIG_MEMSIZE 0x5428
+
+#define INTERRUPT_CNTL 0x5468
+# define IH_DUMMY_RD_OVERRIDE (1 << 0)
+# define IH_DUMMY_RD_EN (1 << 1)
+# define IH_REQ_NONSNOOP_EN (1 << 3)
+# define GEN_IH_INT_EN (1 << 8)
+#define INTERRUPT_CNTL2 0x546c
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
+
+#define BIF_FB_EN 0x5490
+#define FB_READ_EN (1 << 0)
+#define FB_WRITE_EN (1 << 1)
+
+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
+
+#define DC_LB_MEMORY_SPLIT 0x6b0c
+#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
+
+#define PRIORITY_A_CNT 0x6b18
+#define PRIORITY_MARK_MASK 0x7fff
+#define PRIORITY_OFF (1 << 16)
+#define PRIORITY_ALWAYS_ON (1 << 20)
+#define PRIORITY_B_CNT 0x6b1c
+
+#define DPG_PIPE_ARBITRATION_CONTROL3 0x6cc8
+# define LATENCY_WATERMARK_MASK(x) ((x) << 16)
+#define DPG_PIPE_LATENCY_CONTROL 0x6ccc
+# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
+# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS 0x6bb8
+# define VLINE_OCCURRED (1 << 0)
+# define VLINE_ACK (1 << 4)
+# define VLINE_STAT (1 << 12)
+# define VLINE_INTERRUPT (1 << 16)
+# define VLINE_INTERRUPT_TYPE (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS 0x6bbc
+# define VBLANK_OCCURRED (1 << 0)
+# define VBLANK_ACK (1 << 4)
+# define VBLANK_STAT (1 << 12)
+# define VBLANK_INTERRUPT (1 << 16)
+# define VBLANK_INTERRUPT_TYPE (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK 0x6b40
+# define VBLANK_INT_MASK (1 << 0)
+# define VLINE_INT_MASK (1 << 4)
+
+#define DISP_INTERRUPT_STATUS 0x60f4
+# define LB_D1_VLINE_INTERRUPT (1 << 2)
+# define LB_D1_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD1_INTERRUPT (1 << 17)
+# define DC_HPD1_RX_INTERRUPT (1 << 18)
+# define DACA_AUTODETECT_INTERRUPT (1 << 22)
+# define DACB_AUTODETECT_INTERRUPT (1 << 23)
+# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
+# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8
+# define LB_D2_VLINE_INTERRUPT (1 << 2)
+# define LB_D2_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD2_INTERRUPT (1 << 17)
+# define DC_HPD2_RX_INTERRUPT (1 << 18)
+# define DISP_TIMER_INTERRUPT (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc
+# define LB_D3_VLINE_INTERRUPT (1 << 2)
+# define LB_D3_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD3_INTERRUPT (1 << 17)
+# define DC_HPD3_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100
+# define LB_D4_VLINE_INTERRUPT (1 << 2)
+# define LB_D4_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD4_INTERRUPT (1 << 17)
+# define DC_HPD4_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c
+# define LB_D5_VLINE_INTERRUPT (1 << 2)
+# define LB_D5_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD5_INTERRUPT (1 << 17)
+# define DC_HPD5_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6150
+# define LB_D6_VLINE_INTERRUPT (1 << 2)
+# define LB_D6_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD6_INTERRUPT (1 << 17)
+# define DC_HPD6_RX_INTERRUPT (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS 0x6858
+# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
+# define GRPH_PFLIP_INT_CLEAR (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define GRPH_INT_CONTROL 0x685c
+# define GRPH_PFLIP_INT_MASK (1 << 0)
+# define GRPH_PFLIP_INT_TYPE (1 << 8)
+
+#define DACA_AUTODETECT_INT_CONTROL 0x66c8
+
+#define DC_HPD1_INT_STATUS 0x601c
+#define DC_HPD2_INT_STATUS 0x6028
+#define DC_HPD3_INT_STATUS 0x6034
+#define DC_HPD4_INT_STATUS 0x6040
+#define DC_HPD5_INT_STATUS 0x604c
+#define DC_HPD6_INT_STATUS 0x6058
+# define DC_HPDx_INT_STATUS (1 << 0)
+# define DC_HPDx_SENSE (1 << 1)
+# define DC_HPDx_RX_INT_STATUS (1 << 8)
+
+#define DC_HPD1_INT_CONTROL 0x6020
+#define DC_HPD2_INT_CONTROL 0x602c
+#define DC_HPD3_INT_CONTROL 0x6038
+#define DC_HPD4_INT_CONTROL 0x6044
+#define DC_HPD5_INT_CONTROL 0x6050
+#define DC_HPD6_INT_CONTROL 0x605c
+# define DC_HPDx_INT_ACK (1 << 0)
+# define DC_HPDx_INT_POLARITY (1 << 8)
+# define DC_HPDx_INT_EN (1 << 16)
+# define DC_HPDx_RX_INT_ACK (1 << 20)
+# define DC_HPDx_RX_INT_EN (1 << 24)
+
+#define DC_HPD1_CONTROL 0x6024
+#define DC_HPD2_CONTROL 0x6030
+#define DC_HPD3_CONTROL 0x603c
+#define DC_HPD4_CONTROL 0x6048
+#define DC_HPD5_CONTROL 0x6054
+#define DC_HPD6_CONTROL 0x6060
+# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
+# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
+# define DC_HPDx_EN (1 << 28)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT 0x6e98
+
+#define GRBM_CNTL 0x8000
+#define GRBM_READ_TIMEOUT(x) ((x) << 0)
+
+#define GRBM_STATUS2 0x8008
+#define RLC_RQ_PENDING (1 << 0)
+#define RLC_BUSY (1 << 8)
+#define TC_BUSY (1 << 9)
+
+#define GRBM_STATUS 0x8010
+#define CMDFIFO_AVAIL_MASK 0x0000000F
+#define RING2_RQ_PENDING (1 << 4)
+#define SRBM_RQ_PENDING (1 << 5)
+#define RING1_RQ_PENDING (1 << 6)
+#define CF_RQ_PENDING (1 << 7)
+#define PF_RQ_PENDING (1 << 8)
+#define GDS_DMA_RQ_PENDING (1 << 9)
+#define GRBM_EE_BUSY (1 << 10)
+#define DB_CLEAN (1 << 12)
+#define CB_CLEAN (1 << 13)
+#define TA_BUSY (1 << 14)
+#define GDS_BUSY (1 << 15)
+#define VGT_BUSY (1 << 17)
+#define IA_BUSY_NO_DMA (1 << 18)
+#define IA_BUSY (1 << 19)
+#define SX_BUSY (1 << 20)
+#define SPI_BUSY (1 << 22)
+#define BCI_BUSY (1 << 23)
+#define SC_BUSY (1 << 24)
+#define PA_BUSY (1 << 25)
+#define DB_BUSY (1 << 26)
+#define CP_COHERENCY_BUSY (1 << 28)
+#define CP_BUSY (1 << 29)
+#define CB_BUSY (1 << 30)
+#define GUI_ACTIVE (1 << 31)
+#define GRBM_STATUS_SE0 0x8014
+#define GRBM_STATUS_SE1 0x8018
+#define SE_DB_CLEAN (1 << 1)
+#define SE_CB_CLEAN (1 << 2)
+#define SE_BCI_BUSY (1 << 22)
+#define SE_VGT_BUSY (1 << 23)
+#define SE_PA_BUSY (1 << 24)
+#define SE_TA_BUSY (1 << 25)
+#define SE_SX_BUSY (1 << 26)
+#define SE_SPI_BUSY (1 << 27)
+#define SE_SC_BUSY (1 << 29)
+#define SE_DB_BUSY (1 << 30)
+#define SE_CB_BUSY (1 << 31)
+
+#define GRBM_SOFT_RESET 0x8020
+#define SOFT_RESET_CP (1 << 0)
+#define SOFT_RESET_CB (1 << 1)
+#define SOFT_RESET_RLC (1 << 2)
+#define SOFT_RESET_DB (1 << 3)
+#define SOFT_RESET_GDS (1 << 4)
+#define SOFT_RESET_PA (1 << 5)
+#define SOFT_RESET_SC (1 << 6)
+#define SOFT_RESET_BCI (1 << 7)
+#define SOFT_RESET_SPI (1 << 8)
+#define SOFT_RESET_SX (1 << 10)
+#define SOFT_RESET_TC (1 << 11)
+#define SOFT_RESET_TA (1 << 12)
+#define SOFT_RESET_VGT (1 << 14)
+#define SOFT_RESET_IA (1 << 15)
+
+#define GRBM_GFX_INDEX 0x802C
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SH_INDEX(x) ((x) << 8)
+#define SE_INDEX(x) ((x) << 16)
+#define SH_BROADCAST_WRITES (1 << 29)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
+
+#define GRBM_INT_CNTL 0x8060
+# define RDERR_INT_ENABLE (1 << 0)
+# define GUI_IDLE_INT_ENABLE (1 << 19)
+
+#define CP_STRMOUT_CNTL 0x84FC
+#define SCRATCH_REG0 0x8500
+#define SCRATCH_REG1 0x8504
+#define SCRATCH_REG2 0x8508
+#define SCRATCH_REG3 0x850C
+#define SCRATCH_REG4 0x8510
+#define SCRATCH_REG5 0x8514
+#define SCRATCH_REG6 0x8518
+#define SCRATCH_REG7 0x851C
+
+#define SCRATCH_UMSK 0x8540
+#define SCRATCH_ADDR 0x8544
+
+#define CP_SEM_WAIT_TIMER 0x85BC
+
+#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
+
+#define CP_ME_CNTL 0x86D8
+#define CP_CE_HALT (1 << 24)
+#define CP_PFP_HALT (1 << 26)
+#define CP_ME_HALT (1 << 28)
+
+#define CP_COHER_CNTL2 0x85E8
+
+#define CP_RB2_RPTR 0x86f8
+#define CP_RB1_RPTR 0x86fc
+#define CP_RB0_RPTR 0x8700
+#define CP_RB_WPTR_DELAY 0x8704
+
+#define CP_QUEUE_THRESHOLDS 0x8760
+#define ROQ_IB1_START(x) ((x) << 0)
+#define ROQ_IB2_START(x) ((x) << 8)
+#define CP_MEQ_THRESHOLDS 0x8764
+#define MEQ1_START(x) ((x) << 0)
+#define MEQ2_START(x) ((x) << 8)
+
+#define CP_PERFMON_CNTL 0x87FC
+
+#define VGT_VTX_VECT_EJECT_REG 0x88B0
+
+#define VGT_CACHE_INVALIDATION 0x88C4
+#define CACHE_INVALIDATION(x) ((x) << 0)
+#define VC_ONLY 0
+#define TC_ONLY 1
+#define VC_AND_TC 2
+#define AUTO_INVLD_EN(x) ((x) << 6)
+#define NO_AUTO 0
+#define ES_AUTO 1
+#define GS_AUTO 2
+#define ES_AND_GS_AUTO 3
+#define VGT_ESGS_RING_SIZE 0x88C8
+#define VGT_GSVS_RING_SIZE 0x88CC
+
+#define VGT_GS_VERTEX_REUSE 0x88D4
+
+#define VGT_PRIMITIVE_TYPE 0x8958
+#define VGT_INDEX_TYPE 0x895C
+
+#define VGT_NUM_INDICES 0x8970
+#define VGT_NUM_INSTANCES 0x8974
+
+#define VGT_TF_RING_SIZE 0x8988
+
+#define VGT_HS_OFFCHIP_PARAM 0x89B0
+
+#define VGT_TF_MEMORY_BASE 0x89B8
+
+#define CC_GC_SHADER_ARRAY_CONFIG 0x89bc
+#define INACTIVE_CUS_MASK 0xFFFF0000
+#define INACTIVE_CUS_SHIFT 16
+#define GC_USER_SHADER_ARRAY_CONFIG 0x89c0
+
+#define PA_CL_ENHANCE 0x8A14
+#define CLIP_VTX_REORDER_ENA (1 << 0)
+#define NUM_CLIP_SEQ(x) ((x) << 1)
+
+#define PA_SU_LINE_STIPPLE_VALUE 0x8A60
+
+#define PA_SC_LINE_STIPPLE_STATE 0x8B10
+
+#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
+#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
+#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
+
+#define PA_SC_FIFO_SIZE 0x8BCC
+#define SC_FRONTEND_PRIM_FIFO_SIZE(x) ((x) << 0)
+#define SC_BACKEND_PRIM_FIFO_SIZE(x) ((x) << 6)
+#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 15)
+#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 23)
+
+#define PA_SC_ENHANCE 0x8BF0
+
+#define SQ_CONFIG 0x8C00
+
+#define SQC_CACHES 0x8C08
+
+#define SX_DEBUG_1 0x9060
+
+#define SPI_STATIC_THREAD_MGMT_1 0x90E0
+#define SPI_STATIC_THREAD_MGMT_2 0x90E4
+#define SPI_STATIC_THREAD_MGMT_3 0x90E8
+#define SPI_PS_MAX_WAVE_ID 0x90EC
+
+#define SPI_CONFIG_CNTL 0x9100
+
+#define SPI_CONFIG_CNTL_1 0x913C
+#define VTX_DONE_DELAY(x) ((x) << 0)
+#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
+
+#define CGTS_TCC_DISABLE 0x9148
+#define CGTS_USER_TCC_DISABLE 0x914C
+#define TCC_DISABLE_MASK 0xFFFF0000
+#define TCC_DISABLE_SHIFT 16
+
+#define TA_CNTL_AUX 0x9508
+
+#define CC_RB_BACKEND_DISABLE 0x98F4
+#define BACKEND_DISABLE(x) ((x) << 16)
+#define GB_ADDR_CONFIG 0x98F8
+#define NUM_PIPES(x) ((x) << 0)
+#define NUM_PIPES_MASK 0x00000007
+#define NUM_PIPES_SHIFT 0
+#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
+#define PIPE_INTERLEAVE_SIZE_MASK 0x00000070
+#define PIPE_INTERLEAVE_SIZE_SHIFT 4
+#define NUM_SHADER_ENGINES(x) ((x) << 12)
+#define NUM_SHADER_ENGINES_MASK 0x00003000
+#define NUM_SHADER_ENGINES_SHIFT 12
+#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
+#define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000
+#define SHADER_ENGINE_TILE_SIZE_SHIFT 16
+#define NUM_GPUS(x) ((x) << 20)
+#define NUM_GPUS_MASK 0x00700000
+#define NUM_GPUS_SHIFT 20
+#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
+#define MULTI_GPU_TILE_SIZE_MASK 0x03000000
+#define MULTI_GPU_TILE_SIZE_SHIFT 24
+#define ROW_SIZE(x) ((x) << 28)
+#define ROW_SIZE_MASK 0x30000000
+#define ROW_SIZE_SHIFT 28
+
+#define GB_TILE_MODE0 0x9910
+# define MICRO_TILE_MODE(x) ((x) << 0)
+# define ADDR_SURF_DISPLAY_MICRO_TILING 0
+# define ADDR_SURF_THIN_MICRO_TILING 1
+# define ADDR_SURF_DEPTH_MICRO_TILING 2
+# define ARRAY_MODE(x) ((x) << 2)
+# define ARRAY_LINEAR_GENERAL 0
+# define ARRAY_LINEAR_ALIGNED 1
+# define ARRAY_1D_TILED_THIN1 2
+# define ARRAY_2D_TILED_THIN1 4
+# define PIPE_CONFIG(x) ((x) << 6)
+# define ADDR_SURF_P2 0
+# define ADDR_SURF_P4_8x16 4
+# define ADDR_SURF_P4_16x16 5
+# define ADDR_SURF_P4_16x32 6
+# define ADDR_SURF_P4_32x32 7
+# define ADDR_SURF_P8_16x16_8x16 8
+# define ADDR_SURF_P8_16x32_8x16 9
+# define ADDR_SURF_P8_32x32_8x16 10
+# define ADDR_SURF_P8_16x32_16x16 11
+# define ADDR_SURF_P8_32x32_16x16 12
+# define ADDR_SURF_P8_32x32_16x32 13
+# define ADDR_SURF_P8_32x64_32x32 14
+# define TILE_SPLIT(x) ((x) << 11)
+# define ADDR_SURF_TILE_SPLIT_64B 0
+# define ADDR_SURF_TILE_SPLIT_128B 1
+# define ADDR_SURF_TILE_SPLIT_256B 2
+# define ADDR_SURF_TILE_SPLIT_512B 3
+# define ADDR_SURF_TILE_SPLIT_1KB 4
+# define ADDR_SURF_TILE_SPLIT_2KB 5
+# define ADDR_SURF_TILE_SPLIT_4KB 6
+# define BANK_WIDTH(x) ((x) << 14)
+# define ADDR_SURF_BANK_WIDTH_1 0
+# define ADDR_SURF_BANK_WIDTH_2 1
+# define ADDR_SURF_BANK_WIDTH_4 2
+# define ADDR_SURF_BANK_WIDTH_8 3
+# define BANK_HEIGHT(x) ((x) << 16)
+# define ADDR_SURF_BANK_HEIGHT_1 0
+# define ADDR_SURF_BANK_HEIGHT_2 1
+# define ADDR_SURF_BANK_HEIGHT_4 2
+# define ADDR_SURF_BANK_HEIGHT_8 3
+# define MACRO_TILE_ASPECT(x) ((x) << 18)
+# define ADDR_SURF_MACRO_ASPECT_1 0
+# define ADDR_SURF_MACRO_ASPECT_2 1
+# define ADDR_SURF_MACRO_ASPECT_4 2
+# define ADDR_SURF_MACRO_ASPECT_8 3
+# define NUM_BANKS(x) ((x) << 20)
+# define ADDR_SURF_2_BANK 0
+# define ADDR_SURF_4_BANK 1
+# define ADDR_SURF_8_BANK 2
+# define ADDR_SURF_16_BANK 3
+
+#define CB_PERFCOUNTER0_SELECT0 0x9a20
+#define CB_PERFCOUNTER0_SELECT1 0x9a24
+#define CB_PERFCOUNTER1_SELECT0 0x9a28
+#define CB_PERFCOUNTER1_SELECT1 0x9a2c
+#define CB_PERFCOUNTER2_SELECT0 0x9a30
+#define CB_PERFCOUNTER2_SELECT1 0x9a34
+#define CB_PERFCOUNTER3_SELECT0 0x9a38
+#define CB_PERFCOUNTER3_SELECT1 0x9a3c
+
+#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
+#define BACKEND_DISABLE_MASK 0x00FF0000
+#define BACKEND_DISABLE_SHIFT 16
+
+#define TCP_CHAN_STEER_LO 0xac0c
+#define TCP_CHAN_STEER_HI 0xac10
+
+#define CP_RB0_BASE 0xC100
+#define CP_RB0_CNTL 0xC104
+#define RB_BUFSZ(x) ((x) << 0)
+#define RB_BLKSZ(x) ((x) << 8)
+#define BUF_SWAP_32BIT (2 << 16)
+#define RB_NO_UPDATE (1 << 27)
+#define RB_RPTR_WR_ENA (1 << 31)
+
+#define CP_RB0_RPTR_ADDR 0xC10C
+#define CP_RB0_RPTR_ADDR_HI 0xC110
+#define CP_RB0_WPTR 0xC114
+
+#define CP_PFP_UCODE_ADDR 0xC150
+#define CP_PFP_UCODE_DATA 0xC154
+#define CP_ME_RAM_RADDR 0xC158
+#define CP_ME_RAM_WADDR 0xC15C
+#define CP_ME_RAM_DATA 0xC160
+
+#define CP_CE_UCODE_ADDR 0xC168
+#define CP_CE_UCODE_DATA 0xC16C
+
+#define CP_RB1_BASE 0xC180
+#define CP_RB1_CNTL 0xC184
+#define CP_RB1_RPTR_ADDR 0xC188
+#define CP_RB1_RPTR_ADDR_HI 0xC18C
+#define CP_RB1_WPTR 0xC190
+#define CP_RB2_BASE 0xC194
+#define CP_RB2_CNTL 0xC198
+#define CP_RB2_RPTR_ADDR 0xC19C
+#define CP_RB2_RPTR_ADDR_HI 0xC1A0
+#define CP_RB2_WPTR 0xC1A4
+#define CP_INT_CNTL_RING0 0xC1A8
+#define CP_INT_CNTL_RING1 0xC1AC
+#define CP_INT_CNTL_RING2 0xC1B0
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define WAIT_MEM_SEM_INT_ENABLE (1 << 21)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+# define CP_RINGID2_INT_ENABLE (1 << 29)
+# define CP_RINGID1_INT_ENABLE (1 << 30)
+# define CP_RINGID0_INT_ENABLE (1 << 31)
+#define CP_INT_STATUS_RING0 0xC1B4
+#define CP_INT_STATUS_RING1 0xC1B8
+#define CP_INT_STATUS_RING2 0xC1BC
+# define WAIT_MEM_SEM_INT_STAT (1 << 21)
+# define TIME_STAMP_INT_STAT (1 << 26)
+# define CP_RINGID2_INT_STAT (1 << 29)
+# define CP_RINGID1_INT_STAT (1 << 30)
+# define CP_RINGID0_INT_STAT (1 << 31)
+
+#define CP_DEBUG 0xC1FC
+
+#define RLC_CNTL 0xC300
+# define RLC_ENABLE (1 << 0)
+#define RLC_RL_BASE 0xC304
+#define RLC_RL_SIZE 0xC308
+#define RLC_LB_CNTL 0xC30C
+#define RLC_SAVE_AND_RESTORE_BASE 0xC310
+#define RLC_LB_CNTR_MAX 0xC314
+#define RLC_LB_CNTR_INIT 0xC318
+
+#define RLC_CLEAR_STATE_RESTORE_BASE 0xC320
+
+#define RLC_UCODE_ADDR 0xC32C
+#define RLC_UCODE_DATA 0xC330
+
+#define RLC_GPU_CLOCK_COUNT_LSB 0xC338
+#define RLC_GPU_CLOCK_COUNT_MSB 0xC33C
+#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC340
+#define RLC_MC_CNTL 0xC344
+#define RLC_UCODE_CNTL 0xC348
+
+#define PA_SC_RASTER_CONFIG 0x28350
+# define RASTER_CONFIG_RB_MAP_0 0
+# define RASTER_CONFIG_RB_MAP_1 1
+# define RASTER_CONFIG_RB_MAP_2 2
+# define RASTER_CONFIG_RB_MAP_3 3
+
+#define VGT_EVENT_INITIATOR 0x28a90
+# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
+# define SAMPLE_STREAMOUTSTATS2 (2 << 0)
+# define SAMPLE_STREAMOUTSTATS3 (3 << 0)
+# define CACHE_FLUSH_TS (4 << 0)
+# define CACHE_FLUSH (6 << 0)
+# define CS_PARTIAL_FLUSH (7 << 0)
+# define VGT_STREAMOUT_RESET (10 << 0)
+# define END_OF_PIPE_INCR_DE (11 << 0)
+# define END_OF_PIPE_IB_END (12 << 0)
+# define RST_PIX_CNT (13 << 0)
+# define VS_PARTIAL_FLUSH (15 << 0)
+# define PS_PARTIAL_FLUSH (16 << 0)
+# define CACHE_FLUSH_AND_INV_TS_EVENT (20 << 0)
+# define ZPASS_DONE (21 << 0)
+# define CACHE_FLUSH_AND_INV_EVENT (22 << 0)
+# define PERFCOUNTER_START (23 << 0)
+# define PERFCOUNTER_STOP (24 << 0)
+# define PIPELINESTAT_START (25 << 0)
+# define PIPELINESTAT_STOP (26 << 0)
+# define PERFCOUNTER_SAMPLE (27 << 0)
+# define SAMPLE_PIPELINESTAT (30 << 0)
+# define SAMPLE_STREAMOUTSTATS (32 << 0)
+# define RESET_VTX_CNT (33 << 0)
+# define VGT_FLUSH (36 << 0)
+# define BOTTOM_OF_PIPE_TS (40 << 0)
+# define DB_CACHE_FLUSH_AND_INV (42 << 0)
+# define FLUSH_AND_INV_DB_DATA_TS (43 << 0)
+# define FLUSH_AND_INV_DB_META (44 << 0)
+# define FLUSH_AND_INV_CB_DATA_TS (45 << 0)
+# define FLUSH_AND_INV_CB_META (46 << 0)
+# define CS_DONE (47 << 0)
+# define PS_DONE (48 << 0)
+# define FLUSH_AND_INV_CB_PIXEL_DATA (49 << 0)
+# define THREAD_TRACE_START (51 << 0)
+# define THREAD_TRACE_STOP (52 << 0)
+# define THREAD_TRACE_FLUSH (54 << 0)
+# define THREAD_TRACE_FINISH (55 << 0)
+
+/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG 0xEF4C
+#define UVD_UDEC_DB_ADDR_CONFIG 0xEF50
+#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
+#define UVD_RBC_RB_RPTR 0xF690
+#define UVD_RBC_RB_WPTR 0xF694
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
+ (((reg) >> 2) & 0xFFFF) | \
+ ((n) & 0x3FFF) << 16)
+#define CP_PACKET2 0x80000000
+#define PACKET2_PAD_SHIFT 0
+#define PACKET2_PAD_MASK (0x3fffffff << 0)
+
+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+
+#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+
+/* Packet 3 types */
+#define PACKET3_NOP 0x10
+#define PACKET3_SET_BASE 0x11
+#define PACKET3_BASE_INDEX(x) ((x) << 0)
+#define GDS_PARTITION_BASE 2
+#define CE_PARTITION_BASE 3
+#define PACKET3_CLEAR_STATE 0x12
+#define PACKET3_INDEX_BUFFER_SIZE 0x13
+#define PACKET3_DISPATCH_DIRECT 0x15
+#define PACKET3_DISPATCH_INDIRECT 0x16
+#define PACKET3_ALLOC_GDS 0x1B
+#define PACKET3_WRITE_GDS_RAM 0x1C
+#define PACKET3_ATOMIC_GDS 0x1D
+#define PACKET3_ATOMIC 0x1E
+#define PACKET3_OCCLUSION_QUERY 0x1F
+#define PACKET3_SET_PREDICATION 0x20
+#define PACKET3_REG_RMW 0x21
+#define PACKET3_COND_EXEC 0x22
+#define PACKET3_PRED_EXEC 0x23
+#define PACKET3_DRAW_INDIRECT 0x24
+#define PACKET3_DRAW_INDEX_INDIRECT 0x25
+#define PACKET3_INDEX_BASE 0x26
+#define PACKET3_DRAW_INDEX_2 0x27
+#define PACKET3_CONTEXT_CONTROL 0x28
+#define PACKET3_INDEX_TYPE 0x2A
+#define PACKET3_DRAW_INDIRECT_MULTI 0x2C
+#define PACKET3_DRAW_INDEX_AUTO 0x2D
+#define PACKET3_DRAW_INDEX_IMMD 0x2E
+#define PACKET3_NUM_INSTANCES 0x2F
+#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
+#define PACKET3_INDIRECT_BUFFER_CONST 0x31
+#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
+#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
+#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
+#define PACKET3_WRITE_DATA 0x37
+#define WRITE_DATA_DST_SEL(x) ((x) << 8)
+ /* 0 - register
+ * 1 - memory (sync - via GRBM)
+ * 2 - tc/l2
+ * 3 - gds
+ * 4 - reserved
+ * 5 - memory (async - direct)
+ */
+#define WR_ONE_ADDR (1 << 16)
+#define WR_CONFIRM (1 << 20)
+#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
+ /* 0 - me
+ * 1 - pfp
+ * 2 - ce
+ */
+#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
+#define PACKET3_MEM_SEMAPHORE 0x39
+#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_COPY_DW 0x3B
+#define PACKET3_WAIT_REG_MEM 0x3C
+#define PACKET3_MEM_WRITE 0x3D
+#define PACKET3_COPY_DATA 0x40
+#define PACKET3_CP_DMA 0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ * SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
+ /* 0 - DST_ADDR
+ * 1 - GDS
+ */
+# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
+ /* 0 - ME
+ * 1 - PFP
+ */
+# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ * 2 - DATA
+ */
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_CP_DMA_DIS_WC (1 << 21)
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
+# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30)
+#define PACKET3_PFP_SYNC_ME 0x42
+#define PACKET3_SURFACE_SYNC 0x43
+# define PACKET3_DEST_BASE_0_ENA (1 << 0)
+# define PACKET3_DEST_BASE_1_ENA (1 << 1)
+# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
+# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
+# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
+# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
+# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
+# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
+# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
+# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
+# define PACKET3_DEST_BASE_2_ENA (1 << 19)
+# define PACKET3_DEST_BASE_3_ENA (1 << 21)
+# define PACKET3_TCL1_ACTION_ENA (1 << 22)
+# define PACKET3_TC_ACTION_ENA (1 << 23)
+# define PACKET3_CB_ACTION_ENA (1 << 25)
+# define PACKET3_DB_ACTION_ENA (1 << 26)
+# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
+# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
+#define PACKET3_ME_INITIALIZE 0x44
+#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define PACKET3_COND_WRITE 0x45
+#define PACKET3_EVENT_WRITE 0x46
+#define EVENT_TYPE(x) ((x) << 0)
+#define EVENT_INDEX(x) ((x) << 8)
+ /* 0 - any non-TS event
+ * 1 - ZPASS_DONE
+ * 2 - SAMPLE_PIPELINESTAT
+ * 3 - SAMPLE_STREAMOUTSTAT*
+ * 4 - *S_PARTIAL_FLUSH
+ * 5 - EOP events
+ * 6 - EOS events
+ * 7 - CACHE_FLUSH, CACHE_FLUSH_AND_INV_EVENT
+ */
+#define INV_L2 (1 << 20)
+ /* INV TC L2 cache when EVENT_INDEX = 7 */
+#define PACKET3_EVENT_WRITE_EOP 0x47
+#define DATA_SEL(x) ((x) << 29)
+ /* 0 - discard
+ * 1 - send low 32bit data
+ * 2 - send 64bit data
+ * 3 - send 64bit counter value
+ */
+#define INT_SEL(x) ((x) << 24)
+ /* 0 - none
+ * 1 - interrupt only (DATA_SEL = 0)
+ * 2 - interrupt when data write is confirmed
+ */
+#define PACKET3_EVENT_WRITE_EOS 0x48
+#define PACKET3_PREAMBLE_CNTL 0x4A
+# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
+# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
+#define PACKET3_ONE_REG_WRITE 0x57
+#define PACKET3_LOAD_CONFIG_REG 0x5F
+#define PACKET3_LOAD_CONTEXT_REG 0x60
+#define PACKET3_LOAD_SH_REG 0x61
+#define PACKET3_SET_CONFIG_REG 0x68
+#define PACKET3_SET_CONFIG_REG_START 0x00008000
+#define PACKET3_SET_CONFIG_REG_END 0x0000b000
+#define PACKET3_SET_CONTEXT_REG 0x69
+#define PACKET3_SET_CONTEXT_REG_START 0x00028000
+#define PACKET3_SET_CONTEXT_REG_END 0x00029000
+#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
+#define PACKET3_SET_RESOURCE_INDIRECT 0x74
+#define PACKET3_SET_SH_REG 0x76
+#define PACKET3_SET_SH_REG_START 0x0000b000
+#define PACKET3_SET_SH_REG_END 0x0000c000
+#define PACKET3_SET_SH_REG_OFFSET 0x77
+#define PACKET3_ME_WRITE 0x7A
+#define PACKET3_SCRATCH_RAM_WRITE 0x7D
+#define PACKET3_SCRATCH_RAM_READ 0x7E
+#define PACKET3_CE_WRITE 0x7F
+#define PACKET3_LOAD_CONST_RAM 0x80
+#define PACKET3_WRITE_CONST_RAM 0x81
+#define PACKET3_WRITE_CONST_RAM_OFFSET 0x82
+#define PACKET3_DUMP_CONST_RAM 0x83
+#define PACKET3_INCREMENT_CE_COUNTER 0x84
+#define PACKET3_INCREMENT_DE_COUNTER 0x85
+#define PACKET3_WAIT_ON_CE_COUNTER 0x86
+#define PACKET3_WAIT_ON_DE_COUNTER 0x87
+#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
+#define PACKET3_SET_CE_DE_COUNTERS 0x89
+#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
+#define PACKET3_SWITCH_BUFFER 0x8B
+
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET 0x800 /* not a register */
+
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_RB_BASE 0xd004
+#define DMA_RB_RPTR 0xd008
+#define DMA_RB_WPTR 0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI 0xd01c
+#define DMA_RB_RPTR_ADDR_LO 0xd020
+
+#define DMA_IB_CNTL 0xd024
+# define DMA_IB_ENABLE (1 << 0)
+# define DMA_IB_SWAP_ENABLE (1 << 4)
+#define DMA_IB_RPTR 0xd028
+#define DMA_CNTL 0xd02c
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+#define DMA_TILING_CONFIG 0xd0b8
+
+#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((b) & 0x1) << 26) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \
+ (((vmid) & 0xF) << 20) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \
+ (1 << 26) | \
+ (1 << 21) | \
+ (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
+#endif